repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
CS-SI/QGIS | tests/src/python/test_qgspointclusterrenderer.py | 23 | 7934 | # -*- coding: utf-8 -*-
"""
***************************************************************************
test_qgspointclusterrenderer.py
-----------------------------
Date : September 2016
Copyright : (C) 2016 by Nyall Dawson
Email : nyall dot dawson at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Nyall Dawson'
__date__ = 'September 2016'
__copyright__ = '(C) 2016, Nyall Dawson'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from qgis.PyQt.QtCore import QSize
from qgis.PyQt.QtGui import QColor
from qgis.PyQt.QtXml import QDomDocument
from qgis.core import (QgsVectorLayer,
QgsProject,
QgsRectangle,
QgsMultiRenderChecker,
QgsPointClusterRenderer,
QgsUnitTypes,
QgsMapUnitScale,
QgsMarkerSymbol,
QgsSingleSymbolRenderer,
QgsReadWriteContext,
QgsPointDisplacementRenderer,
QgsMapSettings,
QgsProperty,
QgsSymbolLayer
)
from qgis.testing import start_app, unittest
from utilities import (unitTestDataPath)
# Convenience instances in case you may need them
# not used in this test
start_app()
TEST_DATA_DIR = unitTestDataPath()
class TestQgsPointClusterRenderer(unittest.TestCase):
def setUp(self):
myShpFile = os.path.join(TEST_DATA_DIR, 'points.shp')
self.layer = QgsVectorLayer(myShpFile, 'Points', 'ogr')
QgsProject.instance().addMapLayer(self.layer)
self.renderer = QgsPointClusterRenderer()
sym1 = QgsMarkerSymbol.createSimple({'color': '#ff00ff', 'size': '3', 'outline_style': 'no'})
renderer = QgsSingleSymbolRenderer(sym1)
self.renderer.setEmbeddedRenderer(renderer)
self.renderer.setClusterSymbol(QgsMarkerSymbol.createSimple({'color': '#ffff00', 'size': '3', 'outline_style': 'no'}))
self.layer.setRenderer(self.renderer)
rendered_layers = [self.layer]
self.mapsettings = QgsMapSettings()
self.mapsettings.setOutputSize(QSize(400, 400))
self.mapsettings.setOutputDpi(96)
self.mapsettings.setExtent(QgsRectangle(-123, 18, -70, 52))
self.mapsettings.setLayers(rendered_layers)
def tearDown(self):
QgsProject.instance().removeAllMapLayers()
def _setProperties(self, r):
""" set properties for a renderer for testing with _checkProperties"""
r.setTolerance(5)
r.setToleranceUnit(QgsUnitTypes.RenderMapUnits)
r.setToleranceMapUnitScale(QgsMapUnitScale(5, 15))
m = QgsMarkerSymbol()
m.setColor(QColor(0, 255, 0))
r.setClusterSymbol(m)
sym1 = QgsMarkerSymbol.createSimple({'color': '#fdbf6f'})
renderer = QgsSingleSymbolRenderer(sym1)
r.setEmbeddedRenderer(renderer)
def _checkProperties(self, r):
""" test properties of renderer against expected"""
self.assertEqual(r.tolerance(), 5)
self.assertEqual(r.toleranceUnit(), QgsUnitTypes.RenderMapUnits)
self.assertEqual(r.toleranceMapUnitScale(), QgsMapUnitScale(5, 15))
self.assertEqual(r.clusterSymbol().color(), QColor(0, 255, 0))
self.assertEqual(r.embeddedRenderer().symbol().color().name(), '#fdbf6f')
def testGettersSetters(self):
""" test getters and setters """
r = QgsPointClusterRenderer()
self._setProperties(r)
self._checkProperties(r)
def testClone(self):
""" test cloning renderer """
r = QgsPointClusterRenderer()
self._setProperties(r)
c = r.clone()
self._checkProperties(c)
def testSaveCreate(self):
""" test saving and recreating from XML """
r = QgsPointClusterRenderer()
self._setProperties(r)
doc = QDomDocument("testdoc")
elem = r.save(doc, QgsReadWriteContext())
c = QgsPointClusterRenderer.create(elem, QgsReadWriteContext())
self._checkProperties(c)
def testConvert(self):
""" test renderer conversion """
# same type, should clone
r = QgsPointClusterRenderer()
self._setProperties(r)
c = QgsPointClusterRenderer.convertFromRenderer(r)
self._checkProperties(c)
# test conversion from displacement renderer
r = QgsPointDisplacementRenderer()
r.setTolerance(5)
r.setToleranceUnit(QgsUnitTypes.RenderMapUnits)
r.setToleranceMapUnitScale(QgsMapUnitScale(5, 15))
m = QgsMarkerSymbol()
m.setColor(QColor(0, 255, 0))
r.setCenterSymbol(m)
sym1 = QgsMarkerSymbol.createSimple({'color': '#fdbf6f'})
renderer = QgsSingleSymbolRenderer(sym1)
r.setEmbeddedRenderer(renderer)
# want to keep as many settings as possible when converting between cluster and displacement renderer
d = QgsPointClusterRenderer.convertFromRenderer(r)
self.assertEqual(d.tolerance(), 5)
self.assertEqual(d.toleranceUnit(), QgsUnitTypes.RenderMapUnits)
self.assertEqual(d.toleranceMapUnitScale(), QgsMapUnitScale(5, 15))
self.assertEqual(d.clusterSymbol().color(), QColor(0, 255, 0))
self.assertEqual(d.embeddedRenderer().symbol().color().name(), '#fdbf6f')
def testRenderNoCluster(self):
self.layer.renderer().setTolerance(1)
renderchecker = QgsMultiRenderChecker()
renderchecker.setMapSettings(self.mapsettings)
renderchecker.setControlPathPrefix('cluster_renderer')
renderchecker.setControlName('expected_cluster_no_cluster')
self.assertTrue(renderchecker.runTest('cluster_no_cluster'))
def testRenderWithin(self):
self.layer.renderer().setTolerance(10)
renderchecker = QgsMultiRenderChecker()
renderchecker.setMapSettings(self.mapsettings)
renderchecker.setControlPathPrefix('cluster_renderer')
renderchecker.setControlName('expected_cluster_cluster')
self.assertTrue(renderchecker.runTest('expected_cluster_cluster'))
def testRenderVariables(self):
""" test rendering with expression variables in marker """
self.layer.renderer().setTolerance(10)
old_marker = self.layer.renderer().clusterSymbol().clone()
new_marker = QgsMarkerSymbol.createSimple({'color': '#ffff00', 'size': '3', 'outline_style': 'no'})
new_marker.symbolLayer(0).setDataDefinedProperty(QgsSymbolLayer.PropertyFillColor, QgsProperty.fromExpression('@cluster_color'))
new_marker.symbolLayer(0).setDataDefinedProperty(QgsSymbolLayer.PropertySize, QgsProperty.fromExpression('@cluster_size*2'))
self.layer.renderer().setClusterSymbol(new_marker)
renderchecker = QgsMultiRenderChecker()
renderchecker.setMapSettings(self.mapsettings)
renderchecker.setControlPathPrefix('cluster_renderer')
renderchecker.setControlName('expected_cluster_variables')
result = renderchecker.runTest('expected_cluster_variables')
self.layer.renderer().setClusterSymbol(old_marker)
self.assertTrue(result)
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
adamgreenhall/SqlBeautifier | sqlparse2/engine/filter.py | 2 | 3368 | # -*- coding: utf-8 -*-
from sqlparse2.sql import Statement, Token
from sqlparse2 import tokens as T
class StatementFilter:
"Filter that split stream at individual statements"
def __init__(self):
self._in_declare = False
self._in_dbldollar = False
self._is_create = False
self._begin_depth = 0
def _reset(self):
"Set the filter attributes to its default values"
self._in_declare = False
self._in_dbldollar = False
self._is_create = False
self._begin_depth = 0
def _change_splitlevel(self, ttype, value):
"Get the new split level (increase, decrease or remain equal)"
# PostgreSQL
if (ttype == T.Name.Builtin
and value.startswith('$') and value.endswith('$')):
if self._in_dbldollar:
self._in_dbldollar = False
return -1
else:
self._in_dbldollar = True
return 1
elif self._in_dbldollar:
return 0
# ANSI
if ttype not in T.Keyword:
return 0
unified = value.upper()
if unified == 'DECLARE' and self._is_create:
self._in_declare = True
return 1
if unified == 'BEGIN':
self._begin_depth += 1
if self._in_declare or self._is_create:
# FIXME(andi): This makes no sense.
return 1
return 0
if unified == 'END':
# Should this respect a preceeding BEGIN?
# In CASE ... WHEN ... END this results in a split level -1.
self._begin_depth = max(0, self._begin_depth - 1)
return -1
if ttype is T.Keyword.DDL and unified.startswith('CREATE'):
self._is_create = True
return 0
if (unified in ('IF', 'FOR')
and self._is_create and self._begin_depth > 0):
return 1
# Default
return 0
def process(self, stack, stream):
"Process the stream"
consume_ws = False
splitlevel = 0
stmt = None
stmt_tokens = []
# Run over all stream tokens
for ttype, value in stream:
# Yield token if we finished a statement and there's no whitespaces
if consume_ws and ttype not in (T.Whitespace, T.Comment.Single):
stmt.tokens = stmt_tokens
yield stmt
# Reset filter and prepare to process next statement
self._reset()
consume_ws = False
splitlevel = 0
stmt = None
# Create a new statement if we are not currently in one of them
if stmt is None:
stmt = Statement()
stmt_tokens = []
# Change current split level (increase, decrease or remain equal)
splitlevel += self._change_splitlevel(ttype, value)
# Append the token to the current statement
stmt_tokens.append(Token(ttype, value))
# Check if we get the end of a statement
if splitlevel <= 0 and ttype is T.Punctuation and value == ';':
consume_ws = True
# Yield pending statement (if any)
if stmt is not None:
stmt.tokens = stmt_tokens
yield stmt
| mit |
sleekmason/cyanogenmod12 | tools/perf/python/twatch.py | 7370 | 1334 | #! /usr/bin/python
# -*- python -*-
# -*- coding: utf-8 -*-
# twatch - Experimental use of the perf python interface
# Copyright (C) 2011 Arnaldo Carvalho de Melo <acme@redhat.com>
#
# This application is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2.
#
# This application is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import perf
def main():
cpus = perf.cpu_map()
threads = perf.thread_map()
evsel = perf.evsel(task = 1, comm = 1, mmap = 0,
wakeup_events = 1, watermark = 1,
sample_id_all = 1,
sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU | perf.SAMPLE_TID)
evsel.open(cpus = cpus, threads = threads);
evlist = perf.evlist(cpus, threads)
evlist.add(evsel)
evlist.mmap()
while True:
evlist.poll(timeout = -1)
for cpu in cpus:
event = evlist.read_on_cpu(cpu)
if not event:
continue
print "cpu: %2d, pid: %4d, tid: %4d" % (event.sample_cpu,
event.sample_pid,
event.sample_tid),
print event
if __name__ == '__main__':
main()
| gpl-2.0 |
mne-tools/mne-tools.github.io | dev/_downloads/a4921072acc135828760714b86be20cc/eeglab_head_sphere.py | 10 | 4762 | """
.. _ex-topomap-eeglab-style:
========================================
How to plot topomaps the way EEGLAB does
========================================
If you have previous EEGLAB experience you may have noticed that topomaps
(topoplots) generated using MNE-Python look a little different from those
created in EEGLAB. If you prefer the EEGLAB style this example will show you
how to calculate head sphere origin and radius to obtain EEGLAB-like channel
layout in MNE.
"""
# Authors: Mikołaj Magnuski <mmagnuski@swps.edu.pl>
#
# License: BSD (3-clause)
import numpy as np
from matplotlib import pyplot as plt
import mne
print(__doc__)
###############################################################################
# Create fake data
# ----------------
#
# First we will create a simple evoked object with a single timepoint using
# biosemi 10-20 channel layout.
biosemi_montage = mne.channels.make_standard_montage('biosemi64')
n_channels = len(biosemi_montage.ch_names)
fake_info = mne.create_info(ch_names=biosemi_montage.ch_names, sfreq=250.,
ch_types='eeg')
rng = np.random.RandomState(0)
data = rng.normal(size=(n_channels, 1)) * 1e-6
fake_evoked = mne.EvokedArray(data, fake_info)
fake_evoked.set_montage(biosemi_montage)
###############################################################################
# Calculate sphere origin and radius
# ----------------------------------
#
# EEGLAB plots head outline at the level where the head circumference is
# measured
# in the 10-20 system (a line going through Fpz, T8/T4, Oz and T7/T3 channels).
# MNE-Python places the head outline lower on the z dimension, at the level of
# the anatomical landmarks :term:`LPA, RPA, and NAS <fiducial>`.
# Therefore to use the EEGLAB layout we
# have to move the origin of the reference sphere (a sphere that is used as a
# reference when projecting channel locations to a 2d plane) a few centimeters
# up.
#
# Instead of approximating this position by eye, as we did in :ref:`the sensor
# locations tutorial <tut-sensor-locations>`, here we will calculate it using
# the position of Fpz, T8, Oz and T7 channels available in our montage.
# first we obtain the 3d positions of selected channels
chs = ['Oz', 'Fpz', 'T7', 'T8']
pos = np.stack([biosemi_montage.get_positions()['ch_pos'][ch] for ch in chs])
# now we calculate the radius from T7 and T8 x position
# (we could use Oz and Fpz y positions as well)
radius = np.abs(pos[[2, 3], 0]).mean()
# then we obtain the x, y, z sphere center this way:
# x: x position of the Oz channel (should be very close to 0)
# y: y position of the T8 channel (should be very close to 0 too)
# z: average z position of Oz, Fpz, T7 and T8 (their z position should be the
# the same, so we could also use just one of these channels), it should be
# positive and somewhere around `0.03` (3 cm)
x = pos[0, 0]
y = pos[-1, 1]
z = pos[:, -1].mean()
# lets print the values we got:
print([f'{v:0.5f}' for v in [x, y, z, radius]])
###############################################################################
# Compare MNE and EEGLAB channel layout
# -------------------------------------
#
# We already have the required x, y, z sphere center and its radius — we can
# use these values passing them to the ``sphere`` argument of many
# topo-plotting functions (by passing ``sphere=(x, y, z, radius)``).
# create a two-panel figure with some space for the titles at the top
fig, ax = plt.subplots(ncols=2, figsize=(8, 4), gridspec_kw=dict(top=0.9),
sharex=True, sharey=True)
# we plot the channel positions with default sphere - the mne way
fake_evoked.plot_sensors(axes=ax[0], show=False)
# in the second panel we plot the positions using the EEGLAB reference sphere
fake_evoked.plot_sensors(sphere=(x, y, z, radius), axes=ax[1], show=False)
# add titles
ax[0].set_title('MNE channel projection', fontweight='bold')
ax[1].set_title('EEGLAB channel projection', fontweight='bold')
###############################################################################
# Topomaps (topoplots)
# --------------------
#
# As the last step we do the same, but plotting the topomaps. These will not
# be particularly interesting as they will show random data but hopefully you
# will see the difference.
fig, ax = plt.subplots(ncols=2, figsize=(8, 4), gridspec_kw=dict(top=0.9),
sharex=True, sharey=True)
mne.viz.plot_topomap(fake_evoked.data[:, 0], fake_evoked.info, axes=ax[0],
show=False)
mne.viz.plot_topomap(fake_evoked.data[:, 0], fake_evoked.info, axes=ax[1],
show=False, sphere=(x, y, z, radius))
# add titles
ax[0].set_title('MNE', fontweight='bold')
ax[1].set_title('EEGLAB', fontweight='bold')
| bsd-3-clause |
makelove/OpenCV-Python-Tutorial | 官方samples/opt_flow.py | 5 | 2532 | #!/usr/bin/env python
'''
example to show optical flow
USAGE: opt_flow.py [<video_source>]
Keys:
1 - toggle HSV flow visualization
2 - toggle glitch
Keys:
ESC - exit
'''
# Python 2/3 compatibility
from __future__ import print_function
import numpy as np
import cv2
import video
def draw_flow(img, flow, step=16):
h, w = img.shape[:2]
y, x = np.mgrid[step/2:h:step, step/2:w:step].reshape(2,-1).astype(int)
fx, fy = flow[y,x].T
lines = np.vstack([x, y, x+fx, y+fy]).T.reshape(-1, 2, 2)
lines = np.int32(lines + 0.5)
vis = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
cv2.polylines(vis, lines, 0, (0, 255, 0))
for (x1, y1), (x2, y2) in lines:
cv2.circle(vis, (x1, y1), 1, (0, 255, 0), -1)
return vis
def draw_hsv(flow):
h, w = flow.shape[:2]
fx, fy = flow[:,:,0], flow[:,:,1]
ang = np.arctan2(fy, fx) + np.pi
v = np.sqrt(fx*fx+fy*fy)
hsv = np.zeros((h, w, 3), np.uint8)
hsv[...,0] = ang*(180/np.pi/2)
hsv[...,1] = 255
hsv[...,2] = np.minimum(v*4, 255)
bgr = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
return bgr
def warp_flow(img, flow):
h, w = flow.shape[:2]
flow = -flow
flow[:,:,0] += np.arange(w)
flow[:,:,1] += np.arange(h)[:,np.newaxis]
res = cv2.remap(img, flow, None, cv2.INTER_LINEAR)
return res
if __name__ == '__main__':
import sys
print(__doc__)
try:
fn = sys.argv[1]
except IndexError:
fn = 0
cam = video.create_capture(fn)
ret, prev = cam.read()
prevgray = cv2.cvtColor(prev, cv2.COLOR_BGR2GRAY)
show_hsv = False
show_glitch = False
cur_glitch = prev.copy()
while True:
ret, img = cam.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
flow = cv2.calcOpticalFlowFarneback(prevgray, gray, None, 0.5, 3, 15, 3, 5, 1.2, 0)
prevgray = gray
cv2.imshow('flow', draw_flow(gray, flow))
if show_hsv:
cv2.imshow('flow HSV', draw_hsv(flow))
if show_glitch:
cur_glitch = warp_flow(cur_glitch, flow)
cv2.imshow('glitch', cur_glitch)
ch = cv2.waitKey(5)
if ch == 27:
break
if ch == ord('1'):
show_hsv = not show_hsv
print('HSV flow visualization is', ['off', 'on'][show_hsv])
if ch == ord('2'):
show_glitch = not show_glitch
if show_glitch:
cur_glitch = img.copy()
print('glitch is', ['off', 'on'][show_glitch])
cv2.destroyAllWindows()
| mit |
vadimtk/chrome4sdp | tools/telemetry/third_party/gsutilz/third_party/boto/tests/integration/s3/test_versioning.py | 114 | 6218 | # Copyright (c) 2010 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010, Eucalyptus Systems, Inc.
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Some unit tests for the S3 Versioning.
"""
import unittest
import time
from boto.s3.connection import S3Connection
from boto.exception import S3ResponseError
from boto.s3.deletemarker import DeleteMarker
from boto.compat import six
class S3VersionTest (unittest.TestCase):
def setUp(self):
self.conn = S3Connection()
self.bucket_name = 'version-%d' % int(time.time())
self.bucket = self.conn.create_bucket(self.bucket_name)
def tearDown(self):
for k in self.bucket.list_versions():
self.bucket.delete_key(k.name, version_id=k.version_id)
self.bucket.delete()
def test_1_versions(self):
# check versioning off
d = self.bucket.get_versioning_status()
self.assertFalse('Versioning' in d)
# enable versioning
self.bucket.configure_versioning(versioning=True)
d = self.bucket.get_versioning_status()
self.assertEqual('Enabled', d['Versioning'])
# create a new key in the versioned bucket
k = self.bucket.new_key("foobar")
s1 = 'This is v1'
k.set_contents_from_string(s1)
# remember the version id of this object
v1 = k.version_id
# now get the contents from s3
o1 = k.get_contents_as_string().decode('utf-8')
# check to make sure content read from k is identical to original
self.assertEqual(s1, o1)
# now overwrite that same key with new data
s2 = 'This is v2'
k.set_contents_from_string(s2)
v2 = k.version_id
# now retrieve latest contents as a string and compare
k2 = self.bucket.new_key("foobar")
o2 = k2.get_contents_as_string().decode('utf-8')
self.assertEqual(s2, o2)
# next retrieve explicit versions and compare
o1 = k.get_contents_as_string(version_id=v1).decode('utf-8')
o2 = k.get_contents_as_string(version_id=v2).decode('utf-8')
self.assertEqual(s1, o1)
self.assertEqual(s2, o2)
# Now list all versions and compare to what we have
rs = self.bucket.get_all_versions()
self.assertEqual(v2, rs[0].version_id)
self.assertEqual(v1, rs[1].version_id)
# Now do a regular list command and make sure only the new key shows up
rs = self.bucket.get_all_keys()
self.assertEqual(1, len(rs))
# Now do regular delete
self.bucket.delete_key('foobar')
# Now list versions and make sure old versions are there
# plus the DeleteMarker which is latest.
rs = self.bucket.get_all_versions()
self.assertEqual(3, len(rs))
self.assertTrue(isinstance(rs[0], DeleteMarker))
# Now delete v1 of the key
self.bucket.delete_key('foobar', version_id=v1)
# Now list versions again and make sure v1 is not there
rs = self.bucket.get_all_versions()
versions = [k.version_id for k in rs]
self.assertTrue(v1 not in versions)
self.assertTrue(v2 in versions)
# Now suspend Versioning on the bucket
self.bucket.configure_versioning(False)
# Allow time for the change to fully propagate.
time.sleep(3)
d = self.bucket.get_versioning_status()
self.assertEqual('Suspended', d['Versioning'])
def test_latest_version(self):
self.bucket.configure_versioning(versioning=True)
# add v1 of an object
key_name = "key"
kv1 = self.bucket.new_key(key_name)
kv1.set_contents_from_string("v1")
# read list which should contain latest v1
listed_kv1 = next(iter(self.bucket.get_all_versions()))
self.assertEqual(listed_kv1.name, key_name)
self.assertEqual(listed_kv1.version_id, kv1.version_id)
self.assertEqual(listed_kv1.is_latest, True)
# add v2 of the object
kv2 = self.bucket.new_key(key_name)
kv2.set_contents_from_string("v2")
# read 2 versions, confirm v2 is latest
i = iter(self.bucket.get_all_versions())
listed_kv2 = next(i)
listed_kv1 = next(i)
self.assertEqual(listed_kv2.version_id, kv2.version_id)
self.assertEqual(listed_kv1.version_id, kv1.version_id)
self.assertEqual(listed_kv2.is_latest, True)
self.assertEqual(listed_kv1.is_latest, False)
# delete key, which creates a delete marker as latest
self.bucket.delete_key(key_name)
i = iter(self.bucket.get_all_versions())
listed_kv3 = next(i)
listed_kv2 = next(i)
listed_kv1 = next(i)
self.assertNotEqual(listed_kv3.version_id, None)
self.assertEqual(listed_kv2.version_id, kv2.version_id)
self.assertEqual(listed_kv1.version_id, kv1.version_id)
self.assertEqual(listed_kv3.is_latest, True)
self.assertEqual(listed_kv2.is_latest, False)
self.assertEqual(listed_kv1.is_latest, False)
| bsd-3-clause |
Epirex/android_external_chromium_org | tools/mac/dump-static-initializers.py | 121 | 2107 | #!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Dumps a list of files with static initializers. Use with release builds.
Usage:
tools/mac/dump-static-initializers.py out/Release/Chromium\ Framework.framework.dSYM/Contents/Resources/DWARF/Chromium\ Framework
Do NOT use mac_strip_release=0 or component=shared_library if you want to use
this script.
"""
import optparse
import re
import subprocess
import sys
# Matches for example:
# [ 1] 000001ca 64 (N_SO ) 00 0000 0000000000000000 'test.cc'
dsymutil_file_re = re.compile("N_SO.*'([^']*)'")
# Matches for example:
# [ 2] 000001d2 66 (N_OSO ) 00 0001 000000004ed856a0 '/Volumes/MacintoshHD2/src/chrome-git/src/test.o'
dsymutil_o_file_re = re.compile("N_OSO.*'([^']*)'")
# Matches for example:
# [ 8] 00000233 24 (N_FUN ) 01 0000 0000000000001b40 '__GLOBAL__I_s'
# [185989] 00dc69ef 26 (N_STSYM ) 02 0000 00000000022e2290 '__GLOBAL__I_a'
dsymutil_re = re.compile(r"(?:N_FUN|N_STSYM).*\s[0-9a-f]*\s'__GLOBAL__I_")
def ParseDsymutil(binary):
"""Given a binary, prints source and object filenames for files with
static initializers.
"""
child = subprocess.Popen(['dsymutil', '-s', binary], stdout=subprocess.PIPE)
for line in child.stdout:
file_match = dsymutil_file_re.search(line)
if file_match:
current_filename = file_match.group(1)
else:
o_file_match = dsymutil_o_file_re.search(line)
if o_file_match:
current_o_filename = o_file_match.group(1)
else:
match = dsymutil_re.search(line)
if match:
print current_filename
print current_o_filename
print
def main():
parser = optparse.OptionParser(usage='%prog filename')
opts, args = parser.parse_args()
if len(args) != 1:
parser.error('missing filename argument')
return 1
binary = args[0]
ParseDsymutil(binary)
return 0
if '__main__' == __name__:
sys.exit(main())
| bsd-3-clause |
umbraclet16/ardupilot | Tools/LogAnalyzer/tests/TestEvents.py | 73 | 1788 | from LogAnalyzer import Test,TestResult
import DataflashLog
class TestEvents(Test):
'''test for erroneous events and failsafes'''
# TODO: need to check for vehicle-specific codes
def __init__(self):
Test.__init__(self)
self.name = "Event/Failsafe"
def run(self, logdata, verbose):
self.result = TestResult()
self.result.status = TestResult.StatusType.GOOD
errors = set()
if "ERR" in logdata.channels:
assert(len(logdata.channels["ERR"]["Subsys"].listData) == len(logdata.channels["ERR"]["ECode"].listData))
for i in range(len(logdata.channels["ERR"]["Subsys"].listData)):
subSys = logdata.channels["ERR"]["Subsys"].listData[i][1]
eCode = logdata.channels["ERR"]["ECode"].listData[i][1]
if subSys == 2 and (eCode == 1):
errors.add("PPM")
elif subSys == 3 and (eCode == 1 or eCode == 2):
errors.add("COMPASS")
elif subSys == 5 and (eCode == 1):
errors.add("FS_THR")
elif subSys == 6 and (eCode == 1):
errors.add("FS_BATT")
elif subSys == 7 and (eCode == 1):
errors.add("GPS")
elif subSys == 8 and (eCode == 1):
errors.add("GCS")
elif subSys == 9 and (eCode == 1 or eCode == 2):
errors.add("FENCE")
elif subSys == 10:
errors.add("FLT_MODE")
elif subSys == 11 and (eCode == 2):
errors.add("GPS_GLITCH")
elif subSys == 12 and (eCode == 1):
errors.add("CRASH")
if errors:
if len(errors) == 1 and "FENCE" in errors:
self.result.status = TestResult.StatusType.WARN
else:
self.result.status = TestResult.StatusType.FAIL
if len(errors) == 1:
self.result.statusMessage = "ERR found: "
else:
self.result.statusMessage = "ERRs found: "
for err in errors:
self.result.statusMessage = self.result.statusMessage + err + " "
| gpl-3.0 |
mantidproject/mantid | qt/applications/workbench/workbench/widgets/about/test/test_about_presenter.py | 3 | 9817 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2020 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
# This file is part of the mantid workbench
import unittest
from unittest import TestCase
from unittest.mock import call, Mock, patch
from mantidqt.utils.qt.testing import start_qapplication
from mantidqt.utils.testing.strict_mock import StrictMock
from workbench.widgets.about.presenter import AboutPresenter
class MockInstrument(object):
def __init__(self, idx):
self.name = StrictMock(return_value="instr{}".format(idx))
class MockFacility(object):
def __init__(self, name):
self.name = StrictMock(return_value=name)
self.all_instruments = [MockInstrument(0), MockInstrument(1)]
self.instruments = StrictMock(return_value=self.all_instruments)
class MockConfigService(object):
all_facilities = ["facility1", "facility2"]
def __init__(self):
self.mock_facility = MockFacility(self.all_facilities[0])
self.mock_instrument = self.mock_facility.all_instruments[0]
self.getFacilityNames = StrictMock(return_value=self.all_facilities)
self.getFacility = StrictMock(return_value=self.mock_facility)
self.getInstrument = StrictMock(return_value=self.mock_instrument)
self.getString = StrictMock(return_value="FACILITY1")
self.setFacility = StrictMock()
self.setString = StrictMock()
class FakeQSettings(object):
def __init__(self, string_value):
self.string_value = string_value
self.beginGroup = StrictMock()
self.value = StrictMock()
self.value.side_effect = self.value_depending_on_str
self.endGroup = StrictMock()
def value_depending_on_str(self, p_str, defaultValue=None, type=None):
if p_str == AboutPresenter.DO_NOT_SHOW:
return "2"
elif p_str == AboutPresenter.LAST_VERSION:
return self.string_value
else:
return "unknown p_str"
@start_qapplication
class AboutPresenterTest(TestCase):
CONFIG_SERVICE_CLASSPATH = "workbench.widgets.about.presenter.ConfigService"
QSETTINGS_CLASSPATH = "workbench.widgets.about.presenter.QSettings"
RELEASE_NOTES_URL_CLASSPATH = "workbench.widgets.about.presenter.release_notes_url"
@patch(CONFIG_SERVICE_CLASSPATH, new_callable=MockConfigService)
def test_should_show_on_startup_no_facility(self, MockConfigService):
MockConfigService.getString.return_value = ""
self.assertTrue(AboutPresenter.should_show_on_startup(),
"If the facilty is not set then should_show_on_startup should always be true")
MockConfigService.getString.assert_has_calls([call(AboutPresenter.FACILITY),
call(AboutPresenter.INSTRUMENT)])
@patch(CONFIG_SERVICE_CLASSPATH, new_callable=MockConfigService)
def test_should_show_on_startup_invalid_facility(self, MockConfigService):
MockConfigService.getFacility.side_effect = RuntimeError("Invalid Facility name")
self.assertTrue(AboutPresenter.should_show_on_startup(),
"If the facilty is invalid then should_show_on_startup should always be true")
MockConfigService.getString.assert_has_calls([call(AboutPresenter.FACILITY),
call(AboutPresenter.INSTRUMENT)])
MockConfigService.getFacility.assert_has_calls([call("FACILITY1")])
@patch(CONFIG_SERVICE_CLASSPATH, new_callable=MockConfigService)
def test_should_show_on_startup_invalid_instrument(self, MockConfigService):
MockConfigService.getInstrument.side_effect = RuntimeError("Invalid Instrument name")
self.assertTrue(AboutPresenter.should_show_on_startup(),
"If the instrument is invalid then should_show_on_startup should always be true")
MockConfigService.getString.assert_has_calls([call(AboutPresenter.FACILITY),
call(AboutPresenter.INSTRUMENT)])
MockConfigService.getFacility.assert_has_calls([call("FACILITY1")])
MockConfigService.getInstrument.assert_has_calls([call("FACILITY1")])
@patch(CONFIG_SERVICE_CLASSPATH, new_callable=MockConfigService)
def test_should_show_on_startup_do_not_show_same_version(self, MockConfigService):
version_str = "the same every time"
with patch(self.QSETTINGS_CLASSPATH, return_value=FakeQSettings(version_str)):
with patch(self.RELEASE_NOTES_URL_CLASSPATH, return_value=version_str):
self.assertFalse(AboutPresenter.should_show_on_startup(),
"If do not show is in Qsettings then should_show_on_startup should always be False"
+ "for the same version")
MockConfigService.getString.assert_has_calls([call(AboutPresenter.FACILITY),
call(AboutPresenter.INSTRUMENT)])
MockConfigService.getFacility.assert_has_calls([call("FACILITY1")])
MockConfigService.getInstrument.assert_has_calls([call("FACILITY1")])
@patch(CONFIG_SERVICE_CLASSPATH, new_callable=MockConfigService)
def test_should_show_on_startup_do_not_show_different_versions(self, MockConfigService):
version_str = "the same every time"
with patch(self.QSETTINGS_CLASSPATH, return_value=FakeQSettings(version_str)):
with patch(self.RELEASE_NOTES_URL_CLASSPATH, return_value="not the " + version_str):
self.assertTrue(AboutPresenter.should_show_on_startup(),
"If do not show is in Qsettings then should_show_on_startup should always be True"
+ " for different versions")
MockConfigService.getString.assert_has_calls([call(AboutPresenter.FACILITY),
call(AboutPresenter.INSTRUMENT)])
MockConfigService.getFacility.assert_has_calls([call("FACILITY1")])
MockConfigService.getInstrument.assert_has_calls([call("FACILITY1")])
def assert_connected_once(self, owner, signal):
self.assertEqual(1, owner.receivers(signal))
@patch(CONFIG_SERVICE_CLASSPATH, new_callable=MockConfigService)
def test_setup_facilities_with_valid_combination(self, mock_ConfigService):
self.assertEqual(0, mock_ConfigService.mock_instrument.name.call_count)
presenter = AboutPresenter(None)
self.assertEqual(0, mock_ConfigService.setFacility.call_count)
self.assertEqual(3, mock_ConfigService.getFacility.call_count)
self.assertEqual(3, mock_ConfigService.mock_facility.name.call_count)
self.assert_connected_once(presenter.view.about_widget.cb_facility,
presenter.view.about_widget.cb_facility.currentTextChanged)
def test_setup_checkbox_signals(self):
presenter = AboutPresenter(None)
about_widget = presenter.view.about_widget
self.assert_connected_once(about_widget.chk_do_not_show_until_next_release,
presenter.view.about_widget.chk_do_not_show_until_next_release.stateChanged)
self.assert_connected_once(about_widget.chk_allow_usage_data,
about_widget.chk_allow_usage_data.stateChanged)
def test_setup_button_signals(self):
presenter = AboutPresenter(None)
about_widget = presenter.view.about_widget
self.assert_connected_once(about_widget.clb_release_notes,
about_widget.clb_release_notes.clicked)
self.assert_connected_once(about_widget.clb_sample_datasets,
about_widget.clb_sample_datasets.clicked)
self.assert_connected_once(about_widget.clb_mantid_introduction,
about_widget.clb_mantid_introduction.clicked)
self.assert_connected_once(about_widget.clb_python_introduction,
about_widget.clb_python_introduction.clicked)
self.assert_connected_once(about_widget.clb_python_in_mantid,
about_widget.clb_python_in_mantid.clicked)
self.assert_connected_once(about_widget.clb_extending_mantid,
about_widget.clb_extending_mantid.clicked)
self.assert_connected_once(about_widget.pb_manage_user_directories,
about_widget.pb_manage_user_directories.clicked)
self.assert_connected_once(about_widget.lbl_privacy_policy,
about_widget.lbl_privacy_policy.linkActivated)
def test_setup_link_signals(self):
presenter = AboutPresenter(None)
about_widget = presenter.view.about_widget
self.assert_connected_once(about_widget.clb_release_notes,
about_widget.clb_release_notes.clicked)
@patch(CONFIG_SERVICE_CLASSPATH, new_callable=MockConfigService)
def test_that_about_presenter_is_instantiated_without_error_when_getFacility_causes_exception(self,
MockConfigService):
MockConfigService.getFacility.side_effect = RuntimeError(Mock(status=101), "No facility")
presenter = AboutPresenter(None)
self.assertEqual(3, MockConfigService.getFacility.call_count)
self.assertEqual(presenter._get_current_facility(), None)
if __name__ == "__main__":
unittest.main()
| gpl-3.0 |
fbradyirl/home-assistant | homeassistant/components/alexa/__init__.py | 4 | 2673 | """Support for Alexa skill service end point."""
import logging
import voluptuous as vol
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers import entityfilter
from homeassistant.const import CONF_NAME
from . import flash_briefings, intent, smart_home_http
from .const import (
CONF_AUDIO,
CONF_CLIENT_ID,
CONF_CLIENT_SECRET,
CONF_DISPLAY_URL,
CONF_ENDPOINT,
CONF_TEXT,
CONF_TITLE,
CONF_UID,
DOMAIN,
CONF_FILTER,
CONF_ENTITY_CONFIG,
CONF_DESCRIPTION,
CONF_DISPLAY_CATEGORIES,
)
_LOGGER = logging.getLogger(__name__)
CONF_FLASH_BRIEFINGS = "flash_briefings"
CONF_SMART_HOME = "smart_home"
ALEXA_ENTITY_SCHEMA = vol.Schema(
{
vol.Optional(CONF_DESCRIPTION): cv.string,
vol.Optional(CONF_DISPLAY_CATEGORIES): cv.string,
vol.Optional(CONF_NAME): cv.string,
}
)
SMART_HOME_SCHEMA = vol.Schema(
{
vol.Optional(CONF_ENDPOINT): cv.string,
vol.Optional(CONF_CLIENT_ID): cv.string,
vol.Optional(CONF_CLIENT_SECRET): cv.string,
vol.Optional(CONF_FILTER, default={}): entityfilter.FILTER_SCHEMA,
vol.Optional(CONF_ENTITY_CONFIG): {cv.entity_id: ALEXA_ENTITY_SCHEMA},
}
)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: {
CONF_FLASH_BRIEFINGS: {
cv.string: vol.All(
cv.ensure_list,
[
{
vol.Optional(CONF_UID): cv.string,
vol.Required(CONF_TITLE): cv.template,
vol.Optional(CONF_AUDIO): cv.template,
vol.Required(CONF_TEXT, default=""): cv.template,
vol.Optional(CONF_DISPLAY_URL): cv.template,
}
],
)
},
# vol.Optional here would mean we couldn't distinguish between an empty
# smart_home: and none at all.
CONF_SMART_HOME: vol.Any(SMART_HOME_SCHEMA, None),
}
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass, config):
"""Activate the Alexa component."""
config = config.get(DOMAIN, {})
flash_briefings_config = config.get(CONF_FLASH_BRIEFINGS)
intent.async_setup(hass)
if flash_briefings_config:
flash_briefings.async_setup(hass, flash_briefings_config)
try:
smart_home_config = config[CONF_SMART_HOME]
except KeyError:
pass
else:
smart_home_config = smart_home_config or SMART_HOME_SCHEMA({})
await smart_home_http.async_setup(hass, smart_home_config)
return True
| apache-2.0 |
xmikos/qhangups | qhangups/ui_qhangupsconversationslist.py | 1 | 1909 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'qhangups/qhangupsconversationslist.ui'
#
# Created by: PyQt5 UI code generator 5.7
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_QHangupsConversationsList(object):
def setupUi(self, QHangupsConversationsList):
QHangupsConversationsList.setObjectName("QHangupsConversationsList")
QHangupsConversationsList.resize(250, 500)
self.centralwidget = QtWidgets.QWidget(QHangupsConversationsList)
self.centralwidget.setObjectName("centralwidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.centralwidget)
self.verticalLayout.setObjectName("verticalLayout")
self.conversationsListWidget = QtWidgets.QListWidget(self.centralwidget)
self.conversationsListWidget.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.conversationsListWidget.setObjectName("conversationsListWidget")
self.verticalLayout.addWidget(self.conversationsListWidget)
QHangupsConversationsList.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(QHangupsConversationsList)
self.menubar.setGeometry(QtCore.QRect(0, 0, 250, 27))
self.menubar.setObjectName("menubar")
QHangupsConversationsList.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(QHangupsConversationsList)
self.statusbar.setObjectName("statusbar")
QHangupsConversationsList.setStatusBar(self.statusbar)
self.retranslateUi(QHangupsConversationsList)
QtCore.QMetaObject.connectSlotsByName(QHangupsConversationsList)
def retranslateUi(self, QHangupsConversationsList):
_translate = QtCore.QCoreApplication.translate
QHangupsConversationsList.setWindowTitle(_translate("QHangupsConversationsList", "QHangups"))
| gpl-3.0 |
rixrix/servo | tests/wpt/web-platform-tests/tools/wptserve/wptserve/response.py | 158 | 14749 | from collections import OrderedDict
from datetime import datetime, timedelta
import Cookie
import json
import types
import uuid
import socket
from constants import response_codes
from logger import get_logger
missing = object()
class Response(object):
"""Object representing the response to a HTTP request
:param handler: RequestHandler being used for this response
:param request: Request that this is the response for
.. attribute:: request
Request associated with this Response.
.. attribute:: encoding
The encoding to use when converting unicode to strings for output.
.. attribute:: add_required_headers
Boolean indicating whether mandatory headers should be added to the
response.
.. attribute:: send_body_for_head_request
Boolean, default False, indicating whether the body content should be
sent when the request method is HEAD.
.. attribute:: explicit_flush
Boolean indicating whether output should be flushed automatically or only
when requested.
.. attribute:: writer
The ResponseWriter for this response
.. attribute:: status
Status tuple (code, message). Can be set to an integer, in which case the
message part is filled in automatically, or a tuple.
.. attribute:: headers
List of HTTP headers to send with the response. Each item in the list is a
tuple of (name, value).
.. attribute:: content
The body of the response. This can either be a string or a iterable of response
parts. If it is an iterable, any item may be a string or a function of zero
parameters which, when called, returns a string."""
def __init__(self, handler, request):
self.request = request
self.encoding = "utf8"
self.add_required_headers = True
self.send_body_for_head_request = False
self.explicit_flush = False
self.close_connection = False
self.writer = ResponseWriter(handler, self)
self._status = (200, None)
self.headers = ResponseHeaders()
self.content = []
self.logger = get_logger()
@property
def status(self):
return self._status
@status.setter
def status(self, value):
if hasattr(value, "__len__"):
if len(value) != 2:
raise ValueError
else:
self._status = (int(value[0]), str(value[1]))
else:
self._status = (int(value), None)
def set_cookie(self, name, value, path="/", domain=None, max_age=None,
expires=None, secure=False, httponly=False, comment=None):
"""Set a cookie to be sent with a Set-Cookie header in the
response
:param name: String name of the cookie
:param value: String value of the cookie
:param max_age: datetime.timedelta int representing the time (in seconds)
until the cookie expires
:param path: String path to which the cookie applies
:param domain: String domain to which the cookie applies
:param secure: Boolean indicating whether the cookie is marked as secure
:param httponly: Boolean indicating whether the cookie is marked as
HTTP Only
:param comment: String comment
:param expires: datetime.datetime or datetime.timedelta indicating a
time or interval from now when the cookie expires
"""
days = dict((i+1, name) for i, name in enumerate(["jan", "feb", "mar",
"apr", "may", "jun",
"jul", "aug", "sep",
"oct", "nov", "dec"]))
if value is None:
value = ''
max_age = 0
expires = timedelta(days=-1)
if isinstance(expires, timedelta):
expires = datetime.utcnow() + expires
if expires is not None:
expires_str = expires.strftime("%d %%s %Y %H:%M:%S GMT")
expires_str = expires_str % days[expires.month]
expires = expires_str
if max_age is not None:
if hasattr(max_age, "total_seconds"):
max_age = int(max_age.total_seconds())
max_age = "%.0d" % max_age
m = Cookie.Morsel()
def maybe_set(key, value):
if value is not None and value is not False:
m[key] = value
m.set(name, value, value)
maybe_set("path", path)
maybe_set("domain", domain)
maybe_set("comment", comment)
maybe_set("expires", expires)
maybe_set("max-age", max_age)
maybe_set("secure", secure)
maybe_set("httponly", httponly)
self.headers.append("Set-Cookie", m.OutputString())
def unset_cookie(self, name):
"""Remove a cookie from those that are being sent with the response"""
cookies = self.headers.get("Set-Cookie")
parser = Cookie.BaseCookie()
for cookie in cookies:
parser.load(cookie)
if name in parser.keys():
del self.headers["Set-Cookie"]
for m in parser.values():
if m.key != name:
self.headers.append(("Set-Cookie", m.OutputString()))
def delete_cookie(self, name, path="/", domain=None):
"""Delete a cookie on the client by setting it to the empty string
and to expire in the past"""
self.set_cookie(name, None, path=path, domain=domain, max_age=0,
expires=timedelta(days=-1))
def iter_content(self):
"""Iterator returning chunks of response body content.
If any part of the content is a function, this will be called
and the resulting value (if any) returned."""
if type(self.content) in types.StringTypes:
yield self.content
else:
for item in self.content:
if hasattr(item, "__call__"):
value = item()
else:
value = item
if value:
yield value
def write_status_headers(self):
"""Write out the status line and headers for the response"""
self.writer.write_status(*self.status)
for item in self.headers:
self.writer.write_header(*item)
self.writer.end_headers()
def write_content(self):
"""Write out the response content"""
if self.request.method != "HEAD" or self.send_body_for_head_request:
for item in self.iter_content():
self.writer.write_content(item)
def write(self):
"""Write the whole response"""
self.write_status_headers()
self.write_content()
def set_error(self, code, message=""):
"""Set the response status headers and body to indicate an
error"""
err = {"code": code,
"message": message}
data = json.dumps({"error": err})
self.status = code
self.headers = [("Content-Type", "text/json"),
("Content-Length", len(data))]
self.content = data
if code == 500:
self.logger.error(message)
class MultipartContent(object):
def __init__(self, boundary=None, default_content_type=None):
self.items = []
if boundary is None:
boundary = str(uuid.uuid4())
self.boundary = boundary
self.default_content_type = default_content_type
def __call__(self):
boundary = "--" + self.boundary
rv = ["", boundary]
for item in self.items:
rv.append(str(item))
rv.append(boundary)
rv[-1] += "--"
return "\r\n".join(rv)
def append_part(self, data, content_type=None, headers=None):
if content_type is None:
content_type = self.default_content_type
self.items.append(MultipartPart(data, content_type, headers))
def __iter__(self):
#This is hackish; when writing the response we need an iterable
#or a string. For a multipart/byterange response we want an
#iterable that contains a single callable; the MultipartContent
#object itself
yield self
class MultipartPart(object):
def __init__(self, data, content_type=None, headers=None):
self.headers = ResponseHeaders()
if content_type is not None:
self.headers.set("Content-Type", content_type)
if headers is not None:
for name, value in headers:
if name.lower() == "content-type":
func = self.headers.set
else:
func = self.headers.append
func(name, value)
self.data = data
def __str__(self):
rv = []
for item in self.headers:
rv.append("%s: %s" % item)
rv.append("")
rv.append(self.data)
return "\r\n".join(rv)
class ResponseHeaders(object):
"""Dictionary-like object holding the headers for the response"""
def __init__(self):
self.data = OrderedDict()
def set(self, key, value):
"""Set a header to a specific value, overwriting any previous header
with the same name
:param key: Name of the header to set
:param value: Value to set the header to
"""
self.data[key.lower()] = (key, [value])
def append(self, key, value):
"""Add a new header with a given name, not overwriting any existing
headers with the same name
:param key: Name of the header to add
:param value: Value to set for the header
"""
if key.lower() in self.data:
self.data[key.lower()][1].append(value)
else:
self.set(key, value)
def get(self, key, default=missing):
"""Get the set values for a particular header."""
try:
return self[key]
except KeyError:
if default is missing:
return []
return default
def __getitem__(self, key):
"""Get a list of values for a particular header
"""
return self.data[key.lower()][1]
def __delitem__(self, key):
del self.data[key.lower()]
def __contains__(self, key):
return key.lower() in self.data
def __setitem__(self, key, value):
self.set(key, value)
def __iter__(self):
for key, values in self.data.itervalues():
for value in values:
yield key, value
def items(self):
return list(self)
def update(self, items_iter):
for name, value in items_iter:
self.set(name, value)
def __repr__(self):
return repr(self.data)
class ResponseWriter(object):
"""Object providing an API to write out a HTTP response.
:param handler: The RequestHandler being used.
:param response: The Response associated with this writer.
After each part of the response is written, the output is
flushed unless response.explicit_flush is False, in which case
the user must call .flush() explicitly."""
def __init__(self, handler, response):
self._wfile = handler.wfile
self._response = response
self._handler = handler
self._headers_seen = set()
self._headers_complete = False
self.content_written = False
self.request = response.request
def write_status(self, code, message=None):
"""Write out the status line of a response.
:param code: The integer status code of the response.
:param message: The message of the response. Defaults to the message commonly used
with the status code."""
if message is None:
if code in response_codes:
message = response_codes[code][0]
else:
message = ''
self.write("%s %d %s\r\n" %
(self._response.request.protocol_version, code, message))
def write_header(self, name, value):
"""Write out a single header for the response.
:param name: Name of the header field
:param value: Value of the header field
"""
self._headers_seen.add(name.lower())
self.write("%s: %s\r\n" % (name, value))
if not self._response.explicit_flush:
self.flush()
def write_default_headers(self):
for name, f in [("Server", self._handler.version_string),
("Date", self._handler.date_time_string)]:
if name.lower() not in self._headers_seen:
self.write_header(name, f())
if (type(self._response.content) in (str, unicode) and
"content-length" not in self._headers_seen):
#Would be nice to avoid double-encoding here
self.write_header("Content-Length", len(self.encode(self._response.content)))
def end_headers(self):
"""Finish writing headers and write the separator.
Unless add_required_headers on the response is False,
this will also add HTTP-mandated headers that have not yet been supplied
to the response headers"""
if self._response.add_required_headers:
self.write_default_headers()
self.write("\r\n")
if "content-length" not in self._headers_seen:
self._response.close_connection = True
if not self._response.explicit_flush:
self.flush()
self._headers_complete = True
def write_content(self, data):
"""Write the body of the response."""
self.write(self.encode(data))
if not self._response.explicit_flush:
self.flush()
def write(self, data):
"""Write directly to the response, converting unicode to bytes
according to response.encoding. Does not flush."""
self.content_written = True
try:
self._wfile.write(self.encode(data))
except socket.error:
# This can happen if the socket got closed by the remote end
pass
def encode(self, data):
"""Convert unicode to bytes according to response.encoding."""
if isinstance(data, str):
return data
elif isinstance(data, unicode):
return data.encode(self._response.encoding)
else:
raise ValueError
def flush(self):
"""Flush the output."""
try:
self._wfile.flush()
except socket.error:
# This can happen if the socket got closed by the remote end
pass
| mpl-2.0 |
miurahr/translate | translate/storage/placeables/terminology.py | 25 | 3498 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2009 Zuza Software Foundation
#
# This file is part of the Translate Toolkit.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""
Contains the placeable that represents a terminology term.
"""
from translate.storage.placeables import StringElem, base
__all__ = ['TerminologyPlaceable', 'parsers']
class TerminologyPlaceable(base.Ph):
"""Terminology distinguished from the rest of a string by being
a placeable."""
matchers = []
"""A list of matcher objects to use to identify terminology."""
translations = []
"""The available translations for this placeable."""
def __init__(self, *args, **kwargs):
self.translations = []
super(TerminologyPlaceable, self).__init__(*args, **kwargs)
@classmethod
def parse(cls, pstr):
parts = []
matches = []
match_info = {}
for matcher in cls.matchers:
matches.extend(matcher.matches(pstr))
match_info.update(matcher.match_info)
lastend = 0
def sort_matches(x, y):
# This function will sort a list of matches according to the
# match's starting position, putting the one with the longer
# source text first, if two are the same.
c = cmp(match_info[x.source]['pos'], match_info[y.source]['pos'])
return c and c or cmp(len(y.source), len(x.source))
matches.sort(sort_matches)
for match in matches:
info = match_info[match.source]
if info['pos'] < lastend:
continue
end = info['pos'] + len(match.source)
if 'newtermlen' in info:
end = info['pos'] + info['newtermlen']
if lastend < info['pos']:
parts.append(StringElem(pstr[lastend:info['pos']]))
term_string = pstr[info['pos']:end]
term_placeable = cls([term_string])
parts.append(term_placeable)
# Get translations for the placeable
for m in matches:
m_info = match_info[m.source]
m_end = m_info['pos']
if 'newtermlen' in m_info:
m_end += m_info['newtermlen']
else:
m_end += len(m.source)
if info['pos'] == m_info['pos'] and end == m_end:
term_placeable.translations.append(m.target)
# remove duplicates:
term_placeable.translations = list(set(term_placeable.translations))
lastend = end
if lastend != len(pstr) and parts:
parts.append(StringElem(pstr[lastend:]))
return parts or None
def translate(self):
return (self.translations and self.translations[0] or
super(TerminologyPlaceable, self).translate())
parsers = [TerminologyPlaceable.parse]
| gpl-2.0 |
vikkyrk/incubator-beam | sdks/python/apache_beam/runners/direct/consumer_tracking_pipeline_visitor_test.py | 1 | 4343 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests for consumer_tracking_pipeline_visitor."""
import logging
import unittest
from apache_beam import pvalue
from apache_beam.io import iobase
from apache_beam.io import Read
from apache_beam.pipeline import Pipeline
from apache_beam.pvalue import AsList
from apache_beam.runners.direct import DirectRunner
from apache_beam.runners.direct.consumer_tracking_pipeline_visitor import ConsumerTrackingPipelineVisitor
from apache_beam.transforms import CoGroupByKey
from apache_beam.transforms import Create
from apache_beam.transforms import DoFn
from apache_beam.transforms import FlatMap
from apache_beam.transforms import Flatten
from apache_beam.transforms import ParDo
# Disable frequent lint warning due to pipe operator for chaining transforms.
# pylint: disable=expression-not-assigned
# pylint: disable=pointless-statement
class ConsumerTrackingPipelineVisitorTest(unittest.TestCase):
def setUp(self):
self.pipeline = Pipeline(DirectRunner())
self.visitor = ConsumerTrackingPipelineVisitor()
def test_root_transforms(self):
class DummySource(iobase.BoundedSource):
pass
root_read = Read(DummySource())
root_flatten = Flatten(pipeline=self.pipeline)
pbegin = pvalue.PBegin(self.pipeline)
pcoll_read = pbegin | 'read' >> root_read
pcoll_read | FlatMap(lambda x: x)
[] | 'flatten' >> root_flatten
self.pipeline.visit(self.visitor)
root_transforms = sorted(
[t.transform for t in self.visitor.root_transforms])
self.assertEqual(root_transforms, sorted(
[root_read, root_flatten]))
pbegin_consumers = sorted(
[c.transform for c in self.visitor.value_to_consumers[pbegin]])
self.assertEqual(pbegin_consumers, sorted([root_read]))
self.assertEqual(len(self.visitor.step_names), 3)
def test_side_inputs(self):
class SplitNumbersFn(DoFn):
def process(self, element):
if element < 0:
yield pvalue.OutputValue('tag_negative', element)
else:
yield element
class ProcessNumbersFn(DoFn):
def process(self, element, negatives):
yield element
class DummySource(iobase.BoundedSource):
pass
root_read = Read(DummySource())
result = (self.pipeline
| 'read' >> root_read
| ParDo(SplitNumbersFn()).with_outputs('tag_negative',
main='positive'))
positive, negative = result
positive | ParDo(ProcessNumbersFn(), AsList(negative))
self.pipeline.visit(self.visitor)
root_transforms = sorted(
[t.transform for t in self.visitor.root_transforms])
self.assertEqual(root_transforms, sorted([root_read]))
self.assertEqual(len(self.visitor.step_names), 3)
self.assertEqual(len(self.visitor.views), 1)
self.assertTrue(isinstance(self.visitor.views[0],
pvalue.AsList))
def test_co_group_by_key(self):
emails = self.pipeline | 'email' >> Create([('joe', 'joe@example.com')])
phones = self.pipeline | 'phone' >> Create([('mary', '111-222-3333')])
{'emails': emails, 'phones': phones} | CoGroupByKey()
self.pipeline.visit(self.visitor)
root_transforms = sorted(
[t.transform for t in self.visitor.root_transforms])
self.assertEqual(len(root_transforms), 2)
self.assertGreater(
len(self.visitor.step_names), 3) # 2 creates + expanded CoGBK
self.assertEqual(len(self.visitor.views), 0)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.DEBUG)
unittest.main()
| apache-2.0 |
VigTech/Vigtech-Services | env/lib/python2.7/site-packages/django/db/backends/base/validation.py | 81 | 1492 | from django.core import checks
class BaseDatabaseValidation(object):
"""
This class encapsulates all backend-specific model validation.
"""
def __init__(self, connection):
self.connection = connection
def validate_field(self, errors, opts, f):
"""
By default, there is no backend-specific validation.
This method has been deprecated by the new checks framework. New
backends should implement check_field instead.
"""
# This is deliberately commented out. It exists as a marker to
# remind us to remove this method, and the check_field() shim,
# when the time comes.
# warnings.warn('"validate_field" has been deprecated", RemovedInDjango19Warning)
pass
def check_field(self, field, **kwargs):
class ErrorList(list):
"""A dummy list class that emulates API used by the older
validate_field() method. When validate_field() is fully
deprecated, this dummy can be removed too.
"""
def add(self, opts, error_message):
self.append(checks.Error(error_message, hint=None, obj=field))
errors = ErrorList()
# Some tests create fields in isolation -- the fields are not attached
# to any model, so they have no `model` attribute.
opts = field.model._meta if hasattr(field, 'model') else None
self.validate_field(errors, field, opts)
return list(errors)
| lgpl-3.0 |
mohamedhagag/community-addons | sale_product_set/tests/test_product_set.py | 16 | 3756 | # -*- coding: utf-8 -*-
from openerp.tests import common
from openerp.exceptions import except_orm
class test_product_set(common.TransactionCase):
""" Test Product set"""
def setUp(self):
super(test_product_set, self).setUp()
self.sale_order = self.env['sale.order']
self.product_set_add = self.env['product.set.add']
self.product_set = self.env['product.set']
def test_add_set(self):
so = self.env.ref('sale.sale_order_6')
count_lines = len(so.order_line)
untaxed_amount = so.amount_untaxed
tax_amount = so.amount_tax
total_amount = so.amount_total
product_set = self.env.ref(
'sale_product_set.product_set_i5_computer')
# Simulation the opening of the wizard and adding a set on the
# current sale order
so_set = self.product_set_add.with_context(
active_id=so.id).create({'product_set_id': product_set.id,
'quantity': 2})
so_set.add_set()
# checking our sale order
self.assertEquals(len(so.order_line), count_lines + 4)
# untaxed_amount + ((147*1)+(2100*1)+(2000*1)+(85*2)) * 2
self.assertEquals(so.amount_untaxed, untaxed_amount + 8834.0)
self.assertEquals(so.amount_tax, tax_amount + 0) # without tax
self.assertEquals(so.amount_total, total_amount + 8834.0)
sequence = {}
for line in so.order_line:
sequence[line.product_id.id] = line.sequence
for set_line in product_set.set_line_ids:
if line.product_id.id == set_line.product_id.id:
self.assertEquals(line.product_id.name,
set_line.product_id.name)
# make sure sale order line sequence keep sequence set on set
seq_line1 = sequence.pop(
self.env.ref(
"sale_product_set.product_set_line_computer_2"
).product_id.id)
seq_line2 = sequence.pop(
self.env.ref(
"sale_product_set.product_set_line_computer_4"
).product_id.id)
seq_line3 = sequence.pop(
self.env.ref(
"sale_product_set.product_set_line_computer_1"
).product_id.id)
seq_line4 = sequence.pop(
self.env.ref(
"sale_product_set.product_set_line_computer_3"
).product_id.id)
self.assertTrue(max([v for k, v in sequence.iteritems()]) <
seq_line1 < seq_line2 < seq_line3 < seq_line4)
def test_add_set_on_empty_so(self):
so = self.sale_order.create({
'partner_id': self.ref('base.res_partner_1')})
product_set = self.env.ref(
'sale_product_set.product_set_i5_computer')
so_set = self.product_set_add.with_context(
active_id=so.id).create({'product_set_id': product_set.id,
'quantity': 2})
so_set.add_set()
self.assertEquals(len(so.order_line), 4)
def test_copy_product_set(self):
pdt_set = self.env.ref('sale_product_set.product_set_i5_computer')
pdt_set_copy = pdt_set.copy()
self.assertEquals(
len(pdt_set.set_line_ids),
len(pdt_set_copy.set_line_ids)
)
self.assertNotEquals(
pdt_set.set_line_ids,
pdt_set_copy.set_line_ids
)
self.assertEquals(
pdt_set.name + " (copy)",
pdt_set_copy.name
)
def test_copy_multi_product_set(self):
productset = self.product_set.search([])
self.assertTrue(len(productset) > 0)
with self.assertRaises(except_orm):
productset.copy()
| agpl-3.0 |
khkaminska/scikit-learn | sklearn/externals/joblib/pool.py | 237 | 23894 | """Custom implementation of multiprocessing.Pool with custom pickler
This module provides efficient ways of working with data stored in
shared memory with numpy.memmap arrays without inducing any memory
copy between the parent and child processes.
This module should not be imported if multiprocessing is not
available as it implements subclasses of multiprocessing Pool
that uses a custom alternative to SimpleQueue.
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# Copyright: 2012, Olivier Grisel
# License: BSD 3 clause
from mmap import mmap
import errno
import os
import stat
import sys
import threading
import atexit
import tempfile
import shutil
try:
# Python 2 compat
from cPickle import loads
from cPickle import dumps
except ImportError:
from pickle import loads
from pickle import dumps
import copyreg
# Customizable pure Python pickler in Python 2
# customizable C-optimized pickler under Python 3.3+
from pickle import Pickler
from pickle import HIGHEST_PROTOCOL
from io import BytesIO
from ._multiprocessing_helpers import mp, assert_spawning
# We need the class definition to derive from it not the multiprocessing.Pool
# factory function
from multiprocessing.pool import Pool
try:
import numpy as np
from numpy.lib.stride_tricks import as_strided
except ImportError:
np = None
from .numpy_pickle import load
from .numpy_pickle import dump
from .hashing import hash
# Some system have a ramdisk mounted by default, we can use it instead of /tmp
# as the default folder to dump big arrays to share with subprocesses
SYSTEM_SHARED_MEM_FS = '/dev/shm'
# Folder and file permissions to chmod temporary files generated by the
# memmaping pool. Only the owner of the Python process can access the
# temporary files and folder.
FOLDER_PERMISSIONS = stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
FILE_PERMISSIONS = stat.S_IRUSR | stat.S_IWUSR
###############################################################################
# Support for efficient transient pickling of numpy data structures
def _get_backing_memmap(a):
"""Recursively look up the original np.memmap instance base if any"""
b = getattr(a, 'base', None)
if b is None:
# TODO: check scipy sparse datastructure if scipy is installed
# a nor its descendants do not have a memmap base
return None
elif isinstance(b, mmap):
# a is already a real memmap instance.
return a
else:
# Recursive exploration of the base ancestry
return _get_backing_memmap(b)
def has_shareable_memory(a):
"""Return True if a is backed by some mmap buffer directly or not"""
return _get_backing_memmap(a) is not None
def _strided_from_memmap(filename, dtype, mode, offset, order, shape, strides,
total_buffer_len):
"""Reconstruct an array view on a memmory mapped file"""
if mode == 'w+':
# Do not zero the original data when unpickling
mode = 'r+'
if strides is None:
# Simple, contiguous memmap
return np.memmap(filename, dtype=dtype, shape=shape, mode=mode,
offset=offset, order=order)
else:
# For non-contiguous data, memmap the total enclosing buffer and then
# extract the non-contiguous view with the stride-tricks API
base = np.memmap(filename, dtype=dtype, shape=total_buffer_len,
mode=mode, offset=offset, order=order)
return as_strided(base, shape=shape, strides=strides)
def _reduce_memmap_backed(a, m):
"""Pickling reduction for memmap backed arrays
a is expected to be an instance of np.ndarray (or np.memmap)
m is expected to be an instance of np.memmap on the top of the ``base``
attribute ancestry of a. ``m.base`` should be the real python mmap object.
"""
# offset that comes from the striding differences between a and m
a_start, a_end = np.byte_bounds(a)
m_start = np.byte_bounds(m)[0]
offset = a_start - m_start
# offset from the backing memmap
offset += m.offset
if m.flags['F_CONTIGUOUS']:
order = 'F'
else:
# The backing memmap buffer is necessarily contiguous hence C if not
# Fortran
order = 'C'
if a.flags['F_CONTIGUOUS'] or a.flags['C_CONTIGUOUS']:
# If the array is a contiguous view, no need to pass the strides
strides = None
total_buffer_len = None
else:
# Compute the total number of items to map from which the strided
# view will be extracted.
strides = a.strides
total_buffer_len = (a_end - a_start) // a.itemsize
return (_strided_from_memmap,
(m.filename, a.dtype, m.mode, offset, order, a.shape, strides,
total_buffer_len))
def reduce_memmap(a):
"""Pickle the descriptors of a memmap instance to reopen on same file"""
m = _get_backing_memmap(a)
if m is not None:
# m is a real mmap backed memmap instance, reduce a preserving striding
# information
return _reduce_memmap_backed(a, m)
else:
# This memmap instance is actually backed by a regular in-memory
# buffer: this can happen when using binary operators on numpy.memmap
# instances
return (loads, (dumps(np.asarray(a), protocol=HIGHEST_PROTOCOL),))
class ArrayMemmapReducer(object):
"""Reducer callable to dump large arrays to memmap files.
Parameters
----------
max_nbytes: int
Threshold to trigger memmaping of large arrays to files created
a folder.
temp_folder: str
Path of a folder where files for backing memmaped arrays are created.
mmap_mode: 'r', 'r+' or 'c'
Mode for the created memmap datastructure. See the documentation of
numpy.memmap for more details. Note: 'w+' is coerced to 'r+'
automatically to avoid zeroing the data on unpickling.
verbose: int, optional, 0 by default
If verbose > 0, memmap creations are logged.
If verbose > 1, both memmap creations, reuse and array pickling are
logged.
context_id: int, optional, None by default
Set to a value identifying a call context to spare costly hashing of
the content of the input arrays when it is safe to assume that each
array will not be mutated by the parent process for the duration of the
dispatch process. This is the case when using the high level Parallel
API. It might not be the case when using the MemmapingPool API
directly.
prewarm: bool, optional, False by default.
Force a read on newly memmaped array to make sure that OS pre-cache it
memory. This can be useful to avoid concurrent disk access when the
same data array is passed to different worker processes.
"""
def __init__(self, max_nbytes, temp_folder, mmap_mode, verbose=0,
context_id=None, prewarm=True):
self._max_nbytes = max_nbytes
self._temp_folder = temp_folder
self._mmap_mode = mmap_mode
self.verbose = int(verbose)
self._context_id = context_id
self._prewarm = prewarm
def __call__(self, a):
m = _get_backing_memmap(a)
if m is not None:
# a is already backed by a memmap file, let's reuse it directly
return _reduce_memmap_backed(a, m)
if (not a.dtype.hasobject
and self._max_nbytes is not None
and a.nbytes > self._max_nbytes):
# check that the folder exists (lazily create the pool temp folder
# if required)
try:
os.makedirs(self._temp_folder)
os.chmod(self._temp_folder, FOLDER_PERMISSIONS)
except OSError as e:
if e.errno != errno.EEXIST:
raise e
# Find a unique, concurrent safe filename for writing the
# content of this array only once.
if self._context_id is not None:
marker = self._context_id
else:
marker = hash(a)
basename = "%d-%d-%d-%s.pkl" % (
os.getpid(), id(threading.current_thread()), id(a), marker)
filename = os.path.join(self._temp_folder, basename)
# In case the same array with the same content is passed several
# times to the pool subprocess children, serialize it only once
# XXX: implement an explicit reference counting scheme to make it
# possible to delete temporary files as soon as the workers are
# done processing this data.
if not os.path.exists(filename):
if self.verbose > 0:
print("Memmaping (shape=%r, dtype=%s) to new file %s" % (
a.shape, a.dtype, filename))
for dumped_filename in dump(a, filename):
os.chmod(dumped_filename, FILE_PERMISSIONS)
if self._prewarm:
# Warm up the data to avoid concurrent disk access in
# multiple children processes
load(filename, mmap_mode=self._mmap_mode).max()
elif self.verbose > 1:
print("Memmaping (shape=%s, dtype=%s) to old file %s" % (
a.shape, a.dtype, filename))
# Let's use the memmap reducer
return reduce_memmap(load(filename, mmap_mode=self._mmap_mode))
else:
# do not convert a into memmap, let pickler do its usual copy with
# the default system pickler
if self.verbose > 1:
print("Pickling array (shape=%r, dtype=%s)." % (
a.shape, a.dtype))
return (loads, (dumps(a, protocol=HIGHEST_PROTOCOL),))
###############################################################################
# Enable custom pickling in Pool queues
class CustomizablePickler(Pickler):
"""Pickler that accepts custom reducers.
HIGHEST_PROTOCOL is selected by default as this pickler is used
to pickle ephemeral datastructures for interprocess communication
hence no backward compatibility is required.
`reducers` is expected expected to be a dictionary with key/values
being `(type, callable)` pairs where `callable` is a function that
give an instance of `type` will return a tuple `(constructor,
tuple_of_objects)` to rebuild an instance out of the pickled
`tuple_of_objects` as would return a `__reduce__` method. See the
standard library documentation on pickling for more details.
"""
# We override the pure Python pickler as its the only way to be able to
# customize the dispatch table without side effects in Python 2.6
# to 3.2. For Python 3.3+ leverage the new dispatch_table
# feature from http://bugs.python.org/issue14166 that makes it possible
# to use the C implementation of the Pickler which is faster.
def __init__(self, writer, reducers=None, protocol=HIGHEST_PROTOCOL):
Pickler.__init__(self, writer, protocol=protocol)
if reducers is None:
reducers = {}
if hasattr(Pickler, 'dispatch'):
# Make the dispatch registry an instance level attribute instead of
# a reference to the class dictionary under Python 2
self.dispatch = Pickler.dispatch.copy()
else:
# Under Python 3 initialize the dispatch table with a copy of the
# default registry
self.dispatch_table = copyreg.dispatch_table.copy()
for type, reduce_func in reducers.items():
self.register(type, reduce_func)
def register(self, type, reduce_func):
if hasattr(Pickler, 'dispatch'):
# Python 2 pickler dispatching is not explicitly customizable.
# Let us use a closure to workaround this limitation.
def dispatcher(self, obj):
reduced = reduce_func(obj)
self.save_reduce(obj=obj, *reduced)
self.dispatch[type] = dispatcher
else:
self.dispatch_table[type] = reduce_func
class CustomizablePicklingQueue(object):
"""Locked Pipe implementation that uses a customizable pickler.
This class is an alternative to the multiprocessing implementation
of SimpleQueue in order to make it possible to pass custom
pickling reducers, for instance to avoid memory copy when passing
memmory mapped datastructures.
`reducers` is expected expected to be a dictionary with key/values
being `(type, callable)` pairs where `callable` is a function that
give an instance of `type` will return a tuple `(constructor,
tuple_of_objects)` to rebuild an instance out of the pickled
`tuple_of_objects` as would return a `__reduce__` method. See the
standard library documentation on pickling for more details.
"""
def __init__(self, context, reducers=None):
self._reducers = reducers
self._reader, self._writer = context.Pipe(duplex=False)
self._rlock = context.Lock()
if sys.platform == 'win32':
self._wlock = None
else:
self._wlock = context.Lock()
self._make_methods()
def __getstate__(self):
assert_spawning(self)
return (self._reader, self._writer, self._rlock, self._wlock,
self._reducers)
def __setstate__(self, state):
(self._reader, self._writer, self._rlock, self._wlock,
self._reducers) = state
self._make_methods()
def empty(self):
return not self._reader.poll()
def _make_methods(self):
self._recv = recv = self._reader.recv
racquire, rrelease = self._rlock.acquire, self._rlock.release
def get():
racquire()
try:
return recv()
finally:
rrelease()
self.get = get
if self._reducers:
def send(obj):
buffer = BytesIO()
CustomizablePickler(buffer, self._reducers).dump(obj)
self._writer.send_bytes(buffer.getvalue())
self._send = send
else:
self._send = send = self._writer.send
if self._wlock is None:
# writes to a message oriented win32 pipe are atomic
self.put = send
else:
wlock_acquire, wlock_release = (
self._wlock.acquire, self._wlock.release)
def put(obj):
wlock_acquire()
try:
return send(obj)
finally:
wlock_release()
self.put = put
class PicklingPool(Pool):
"""Pool implementation with customizable pickling reducers.
This is useful to control how data is shipped between processes
and makes it possible to use shared memory without useless
copies induces by the default pickling methods of the original
objects passed as arguments to dispatch.
`forward_reducers` and `backward_reducers` are expected to be
dictionaries with key/values being `(type, callable)` pairs where
`callable` is a function that give an instance of `type` will return
a tuple `(constructor, tuple_of_objects)` to rebuild an instance out
of the pickled `tuple_of_objects` as would return a `__reduce__`
method. See the standard library documentation on pickling for more
details.
"""
def __init__(self, processes=None, forward_reducers=None,
backward_reducers=None, **kwargs):
if forward_reducers is None:
forward_reducers = dict()
if backward_reducers is None:
backward_reducers = dict()
self._forward_reducers = forward_reducers
self._backward_reducers = backward_reducers
poolargs = dict(processes=processes)
poolargs.update(kwargs)
super(PicklingPool, self).__init__(**poolargs)
def _setup_queues(self):
context = getattr(self, '_ctx', mp)
self._inqueue = CustomizablePicklingQueue(context,
self._forward_reducers)
self._outqueue = CustomizablePicklingQueue(context,
self._backward_reducers)
self._quick_put = self._inqueue._send
self._quick_get = self._outqueue._recv
def delete_folder(folder_path):
"""Utility function to cleanup a temporary folder if still existing"""
if os.path.exists(folder_path):
shutil.rmtree(folder_path)
class MemmapingPool(PicklingPool):
"""Process pool that shares large arrays to avoid memory copy.
This drop-in replacement for `multiprocessing.pool.Pool` makes
it possible to work efficiently with shared memory in a numpy
context.
Existing instances of numpy.memmap are preserved: the child
suprocesses will have access to the same shared memory in the
original mode except for the 'w+' mode that is automatically
transformed as 'r+' to avoid zeroing the original data upon
instantiation.
Furthermore large arrays from the parent process are automatically
dumped to a temporary folder on the filesystem such as child
processes to access their content via memmaping (file system
backed shared memory).
Note: it is important to call the terminate method to collect
the temporary folder used by the pool.
Parameters
----------
processes: int, optional
Number of worker processes running concurrently in the pool.
initializer: callable, optional
Callable executed on worker process creation.
initargs: tuple, optional
Arguments passed to the initializer callable.
temp_folder: str, optional
Folder to be used by the pool for memmaping large arrays
for sharing memory with worker processes. If None, this will try in
order:
- a folder pointed by the JOBLIB_TEMP_FOLDER environment variable,
- /dev/shm if the folder exists and is writable: this is a RAMdisk
filesystem available by default on modern Linux distributions,
- the default system temporary folder that can be overridden
with TMP, TMPDIR or TEMP environment variables, typically /tmp
under Unix operating systems.
max_nbytes int or None, optional, 1e6 by default
Threshold on the size of arrays passed to the workers that
triggers automated memmory mapping in temp_folder.
Use None to disable memmaping of large arrays.
forward_reducers: dictionary, optional
Reducers used to pickle objects passed from master to worker
processes: see below.
backward_reducers: dictionary, optional
Reducers used to pickle return values from workers back to the
master process.
verbose: int, optional
Make it possible to monitor how the communication of numpy arrays
with the subprocess is handled (pickling or memmaping)
context_id: int, optional, None by default
Set to a value identifying a call context to spare costly hashing of
the content of the input arrays when it is safe to assume that each
array will not be mutated by the parent process for the duration of the
dispatch process. This is the case when using the high level Parallel
API.
prewarm: bool or str, optional, "auto" by default.
If True, force a read on newly memmaped array to make sure that OS pre-
cache it in memory. This can be useful to avoid concurrent disk access
when the same data array is passed to different worker processes.
If "auto" (by default), prewarm is set to True, unless the Linux shared
memory partition /dev/shm is available and used as temp_folder.
`forward_reducers` and `backward_reducers` are expected to be
dictionaries with key/values being `(type, callable)` pairs where
`callable` is a function that give an instance of `type` will return
a tuple `(constructor, tuple_of_objects)` to rebuild an instance out
of the pickled `tuple_of_objects` as would return a `__reduce__`
method. See the standard library documentation on pickling for more
details.
"""
def __init__(self, processes=None, temp_folder=None, max_nbytes=1e6,
mmap_mode='r', forward_reducers=None, backward_reducers=None,
verbose=0, context_id=None, prewarm=False, **kwargs):
if forward_reducers is None:
forward_reducers = dict()
if backward_reducers is None:
backward_reducers = dict()
# Prepare a sub-folder name for the serialization of this particular
# pool instance (do not create in advance to spare FS write access if
# no array is to be dumped):
use_shared_mem = False
pool_folder_name = "joblib_memmaping_pool_%d_%d" % (
os.getpid(), id(self))
if temp_folder is None:
temp_folder = os.environ.get('JOBLIB_TEMP_FOLDER', None)
if temp_folder is None:
if os.path.exists(SYSTEM_SHARED_MEM_FS):
try:
temp_folder = SYSTEM_SHARED_MEM_FS
pool_folder = os.path.join(temp_folder, pool_folder_name)
if not os.path.exists(pool_folder):
os.makedirs(pool_folder)
use_shared_mem = True
except IOError:
# Missing rights in the the /dev/shm partition,
# fallback to regular temp folder.
temp_folder = None
if temp_folder is None:
# Fallback to the default tmp folder, typically /tmp
temp_folder = tempfile.gettempdir()
temp_folder = os.path.abspath(os.path.expanduser(temp_folder))
pool_folder = os.path.join(temp_folder, pool_folder_name)
self._temp_folder = pool_folder
# Register the garbage collector at program exit in case caller forgets
# to call terminate explicitly: note we do not pass any reference to
# self to ensure that this callback won't prevent garbage collection of
# the pool instance and related file handler resources such as POSIX
# semaphores and pipes
atexit.register(lambda: delete_folder(pool_folder))
if np is not None:
# Register smart numpy.ndarray reducers that detects memmap backed
# arrays and that is alse able to dump to memmap large in-memory
# arrays over the max_nbytes threshold
if prewarm == "auto":
prewarm = not use_shared_mem
forward_reduce_ndarray = ArrayMemmapReducer(
max_nbytes, pool_folder, mmap_mode, verbose,
context_id=context_id, prewarm=prewarm)
forward_reducers[np.ndarray] = forward_reduce_ndarray
forward_reducers[np.memmap] = reduce_memmap
# Communication from child process to the parent process always
# pickles in-memory numpy.ndarray without dumping them as memmap
# to avoid confusing the caller and make it tricky to collect the
# temporary folder
backward_reduce_ndarray = ArrayMemmapReducer(
None, pool_folder, mmap_mode, verbose)
backward_reducers[np.ndarray] = backward_reduce_ndarray
backward_reducers[np.memmap] = reduce_memmap
poolargs = dict(
processes=processes,
forward_reducers=forward_reducers,
backward_reducers=backward_reducers)
poolargs.update(kwargs)
super(MemmapingPool, self).__init__(**poolargs)
def terminate(self):
super(MemmapingPool, self).terminate()
delete_folder(self._temp_folder)
| bsd-3-clause |
dpac-vlsi/SynchroTrace | src/arch/x86/isa/insts/general_purpose/data_conversion/ascii_adjust.py | 91 | 2402 | # Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = ""
#let {{
# class AAA(Inst):
# "GenFault ${new UnimpInstFault}"
# class AAD(Inst):
# "GenFault ${new UnimpInstFault}"
# class AAM(Inst):
# "GenFault ${new UnimpInstFault}"
# class AAS(Inst):
# "GenFault ${new UnimpInstFault}"
#}};
| bsd-3-clause |
mapr/sahara | sahara/plugins/hdp/configprovider.py | 2 | 2923 | # Copyright (c) 2013 Hortonworks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sahara import exceptions
from sahara.i18n import _
from sahara.plugins import provisioning as p
class ConfigurationProvider:
def __init__(self, config):
self.config = config
self.config_mapper = {}
self.config_items = []
self._initialize(config)
def get_config_items(self):
return self.config_items
def get_applicable_target(self, name):
return self.config_mapper.get(name)
def _get_target(self, apptarget):
if apptarget == 'TODO':
apptarget = 'general'
return apptarget
def _initialize(self, config):
for configuration in self.config['configurations']:
for service_property in configuration['properties']:
config = p.Config(service_property['name'],
self._get_target(
service_property['applicable_target']),
service_property['scope'],
config_type=service_property['config_type'],
default_value=service_property
['default_value'],
is_optional=service_property[
'is_optional'],
description=service_property[
'description'])
setattr(config, 'tag', configuration['tag'].rsplit(".", 1)[0])
self.config_items.append(config)
# TODO(jspeidel): an assumption is made that property names
# are unique across configuration sections which is dangerous
property_name = service_property['name']
# if property already exists, throw an exception
if property_name in self.config_mapper:
# internal error
# ambari-config-resource contains duplicates
raise exceptions.InvalidDataException(
_('Internal Error. Duplicate property '
'name detected: %s') % property_name)
self.config_mapper[service_property['name']] = (
self._get_target(
service_property['applicable_target']))
| apache-2.0 |
huang4fstudio/django | django/contrib/gis/geos/point.py | 338 | 4419 | from ctypes import c_uint
from django.contrib.gis.geos import prototypes as capi
from django.contrib.gis.geos.error import GEOSException
from django.contrib.gis.geos.geometry import GEOSGeometry
from django.utils import six
from django.utils.six.moves import range
class Point(GEOSGeometry):
_minlength = 2
_maxlength = 3
has_cs = True
def __init__(self, x, y=None, z=None, srid=None):
"""
The Point object may be initialized with either a tuple, or individual
parameters.
For Example:
>>> p = Point((5, 23)) # 2D point, passed in as a tuple
>>> p = Point(5, 23, 8) # 3D point, passed in with individual parameters
"""
if isinstance(x, (tuple, list)):
# Here a tuple or list was passed in under the `x` parameter.
ndim = len(x)
coords = x
elif isinstance(x, six.integer_types + (float,)) and isinstance(y, six.integer_types + (float,)):
# Here X, Y, and (optionally) Z were passed in individually, as parameters.
if isinstance(z, six.integer_types + (float,)):
ndim = 3
coords = [x, y, z]
else:
ndim = 2
coords = [x, y]
else:
raise TypeError('Invalid parameters given for Point initialization.')
point = self._create_point(ndim, coords)
# Initializing using the address returned from the GEOS
# createPoint factory.
super(Point, self).__init__(point, srid=srid)
def _create_point(self, ndim, coords):
"""
Create a coordinate sequence, set X, Y, [Z], and create point
"""
if ndim < 2 or ndim > 3:
raise TypeError('Invalid point dimension: %s' % str(ndim))
cs = capi.create_cs(c_uint(1), c_uint(ndim))
i = iter(coords)
capi.cs_setx(cs, 0, next(i))
capi.cs_sety(cs, 0, next(i))
if ndim == 3:
capi.cs_setz(cs, 0, next(i))
return capi.create_point(cs)
def _set_list(self, length, items):
ptr = self._create_point(length, items)
if ptr:
capi.destroy_geom(self.ptr)
self._ptr = ptr
self._set_cs()
else:
# can this happen?
raise GEOSException('Geometry resulting from slice deletion was invalid.')
def _set_single(self, index, value):
self._cs.setOrdinate(index, 0, value)
def __iter__(self):
"Allows iteration over coordinates of this Point."
for i in range(len(self)):
yield self[i]
def __len__(self):
"Returns the number of dimensions for this Point (either 0, 2 or 3)."
if self.empty:
return 0
if self.hasz:
return 3
else:
return 2
def _get_single_external(self, index):
if index == 0:
return self.x
elif index == 1:
return self.y
elif index == 2:
return self.z
_get_single_internal = _get_single_external
def get_x(self):
"Returns the X component of the Point."
return self._cs.getOrdinate(0, 0)
def set_x(self, value):
"Sets the X component of the Point."
self._cs.setOrdinate(0, 0, value)
def get_y(self):
"Returns the Y component of the Point."
return self._cs.getOrdinate(1, 0)
def set_y(self, value):
"Sets the Y component of the Point."
self._cs.setOrdinate(1, 0, value)
def get_z(self):
"Returns the Z component of the Point."
if self.hasz:
return self._cs.getOrdinate(2, 0)
else:
return None
def set_z(self, value):
"Sets the Z component of the Point."
if self.hasz:
self._cs.setOrdinate(2, 0, value)
else:
raise GEOSException('Cannot set Z on 2D Point.')
# X, Y, Z properties
x = property(get_x, set_x)
y = property(get_y, set_y)
z = property(get_z, set_z)
# ### Tuple setting and retrieval routines. ###
def get_coords(self):
"Returns a tuple of the point."
return self._cs.tuple
def set_coords(self, tup):
"Sets the coordinates of the point with the given tuple."
self._cs[0] = tup
# The tuple and coords properties
tuple = property(get_coords, set_coords)
coords = tuple
| bsd-3-clause |
easy-as-pie-labs/tweap | tweap/tweap/middleware.py | 2 | 1773 | import re
from django.conf import settings
from django.contrib.auth.decorators import login_required
class RequireLoginMiddleware(object):
"""
Middleware component that wraps the login_required decorator around
matching URL patterns. To use, add the class to MIDDLEWARE_CLASSES and
define LOGIN_REQUIRED_URLS and LOGIN_REQUIRED_URLS_EXCEPTIONS in your
settings.py. For example:
------
LOGIN_REQUIRED_URLS = (
r'/topsecret/(.*)$',
)
LOGIN_REQUIRED_URLS_EXCEPTIONS = (
r'/topsecret/login(.*)$',
r'/topsecret/logout(.*)$',
)
------
LOGIN_REQUIRED_URLS is where you define URL patterns; each pattern must
be a valid regex.
LOGIN_REQUIRED_URLS_EXCEPTIONS is, conversely, where you explicitly
define any exceptions (like login and logout URLs).
"""
def __init__(self):
self.required = tuple(re.compile(url) for url in settings.LOGIN_REQUIRED_URLS)
self.exceptions = tuple(re.compile(url) for url in settings.LOGIN_REQUIRED_URLS_EXCEPTIONS)
def process_view(self, request, view_func, view_args, view_kwargs):
# No need to process URLs if user already logged in
if request.user.is_authenticated():
return None
# An exception match should immediately return None
for url in self.exceptions:
if url.match(request.path):
return None
# Requests matching a restricted URL pattern are returned
# wrapped with the login_required decorator
for url in self.required:
if url.match(request.path):
return login_required(view_func)(request, *view_args, **view_kwargs)
# Explicitly return None for all non-matching requests
return None | gpl-3.0 |
GuessWhoSamFoo/pandas | pandas/tests/sparse/frame/test_analytics.py | 2 | 1118 | import numpy as np
import pytest
from pandas import DataFrame, SparseDataFrame, SparseSeries
from pandas.util import testing as tm
@pytest.mark.xfail(reason='Wrong SparseBlock initialization (GH#17386)')
def test_quantile():
# GH 17386
data = [[1, 1], [2, 10], [3, 100], [np.nan, np.nan]]
q = 0.1
sparse_df = SparseDataFrame(data)
result = sparse_df.quantile(q)
dense_df = DataFrame(data)
dense_expected = dense_df.quantile(q)
sparse_expected = SparseSeries(dense_expected)
tm.assert_series_equal(result, dense_expected)
tm.assert_sp_series_equal(result, sparse_expected)
@pytest.mark.xfail(reason='Wrong SparseBlock initialization (GH#17386)')
def test_quantile_multi():
# GH 17386
data = [[1, 1], [2, 10], [3, 100], [np.nan, np.nan]]
q = [0.1, 0.5]
sparse_df = SparseDataFrame(data)
result = sparse_df.quantile(q)
dense_df = DataFrame(data)
dense_expected = dense_df.quantile(q)
sparse_expected = SparseDataFrame(dense_expected)
tm.assert_frame_equal(result, dense_expected)
tm.assert_sp_frame_equal(result, sparse_expected)
| bsd-3-clause |
andreparrish/python-for-android | python-modules/twisted/twisted/tap/socks.py | 61 | 1148 |
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
I am a support module for making SOCKSv4 servers with twistd.
"""
from twisted.protocols import socks
from twisted.python import usage
from twisted.application import internet
import sys
class Options(usage.Options):
synopsis = "[-i <interface>] [-p <port>] [-l <file>]"
optParameters = [["interface", "i", "127.0.0.1", "local interface to which we listen"],
["port", "p", 1080, "Port on which to listen"],
["log", "l", None, "file to log connection data to"]]
zsh_actions = {"log" : "_files -g '*.log'"}
longdesc = "Makes a SOCKSv4 server."
def makeService(config):
if config["interface"] != "127.0.0.1":
print
print "WARNING:"
print " You have chosen to listen on a non-local interface."
print " This may allow intruders to access your local network"
print " if you run this on a firewall."
print
t = socks.SOCKSv4Factory(config['log'])
portno = int(config['port'])
return internet.TCPServer(portno, t, interface=config['interface'])
| apache-2.0 |
mohamed--abdel-maksoud/chromium.src | chrome/common/extensions/docs/server2/test_servlet.py | 78 | 2781 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from extensions_paths import PUBLIC_TEMPLATES
from instance_servlet import (
InstanceServlet, InstanceServletRenderServletDelegate)
from link_error_detector import LinkErrorDetector, StringifyBrokenLinks
from servlet import Request, Response, Servlet
class BrokenLinkTester(object):
'''Run link error detector tests.
'''
def __init__(self, server_instance, renderer):
self.link_error_detector = LinkErrorDetector(
server_instance.host_file_system_provider.GetMaster(),
renderer,
PUBLIC_TEMPLATES,
root_pages=('extensions/index.html', 'apps/about_apps.html'))
def TestBrokenLinks(self):
broken_links = self.link_error_detector.GetBrokenLinks()
return (
len(broken_links),
'Warning: Found %d broken links:\n%s' % (
len(broken_links), StringifyBrokenLinks(broken_links)))
def TestOrphanedPages(self):
orphaned_pages = self.link_error_detector.GetOrphanedPages()
return (
len(orphaned_pages),
'Warning: Found %d orphaned pages:\n%s' % (
len(orphaned_pages), '\n'.join(orphaned_pages)))
class TestServlet(Servlet):
'''Runs tests against the live server. Supports running all broken link
detection tests, in parts or all at once.
'''
def __init__(self, request, delegate_for_test=None):
Servlet.__init__(self, request)
self._delegate = delegate_for_test or InstanceServlet.Delegate()
def Get(self):
link_error_tests = ('broken_links', 'orphaned_pages', 'link_errors')
if not self._request.path in link_error_tests:
return Response.NotFound('Test %s not found. Available tests are: %s' % (
self._request.path, ','.join(link_error_tests)))
constructor = InstanceServlet.GetConstructor(self._delegate)
def renderer(path):
return constructor(Request(path, '', self._request.headers)).Get()
link_tester = BrokenLinkTester(
InstanceServletRenderServletDelegate(
self._delegate).CreateServerInstance(),
renderer)
if self._request.path == 'broken_links':
errors, content = link_tester.TestBrokenLinks()
elif self._request.path == 'orphaned_pages':
errors, content = link_tester.TestOrphanedPages()
else:
link_errors, link_content = link_tester.TestBrokenLinks()
orphaned_errors, orphaned_content = link_tester.TestOrphanedPages()
errors = link_errors + orphaned_errors
content = "%s\n%s" % (link_content, orphaned_content)
if errors:
return Response.InternalError(content=content)
return Response.Ok(content="%s test passed." % self._request.path)
| bsd-3-clause |
tvtsoft/odoo8 | addons/survey/controllers/main.py | 1 | 20571 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import json
import logging
import werkzeug
import werkzeug.utils
from datetime import datetime
from math import ceil
from openerp import SUPERUSER_ID
from openerp.addons.web import http
from openerp.addons.web.http import request
from openerp.tools.misc import DEFAULT_SERVER_DATETIME_FORMAT as DTF, ustr
_logger = logging.getLogger(__name__)
class WebsiteSurvey(http.Controller):
## HELPER METHODS ##
def _check_bad_cases(self, cr, uid, request, survey_obj, survey, user_input_obj, context=None):
# In case of bad survey, redirect to surveys list
if survey_obj.exists(cr, SUPERUSER_ID, survey.id, context=context) == []:
return werkzeug.utils.redirect("/survey/")
# In case of auth required, block public user
if survey.auth_required and uid == request.website.user_id.id:
return request.website.render("survey.auth_required", {'survey': survey})
# In case of non open surveys
if survey.stage_id.closed:
return request.website.render("survey.notopen")
# If there is no pages
if not survey.page_ids:
return request.website.render("survey.nopages")
# Everything seems to be ok
return None
def _check_deadline(self, cr, uid, user_input, context=None):
'''Prevent opening of the survey if the deadline has turned out
! This will NOT disallow access to users who have already partially filled the survey !'''
if user_input.deadline:
dt_deadline = datetime.strptime(user_input.deadline, DTF)
dt_now = datetime.now()
if dt_now > dt_deadline: # survey is not open anymore
return request.website.render("survey.notopen")
return None
## ROUTES HANDLERS ##
# Survey start
@http.route(['/survey/start/<model("survey.survey"):survey>',
'/survey/start/<model("survey.survey"):survey>/<string:token>'],
type='http', auth='public', website=True)
def start_survey(self, survey, token=None, **post):
cr, uid, context = request.cr, request.uid, request.context
survey_obj = request.registry['survey.survey']
user_input_obj = request.registry['survey.user_input']
# Test mode
if token and token == "phantom":
_logger.info("[survey] Phantom mode")
user_input_id = user_input_obj.create(cr, uid, {'survey_id': survey.id, 'test_entry': True}, context=context)
user_input = user_input_obj.browse(cr, uid, [user_input_id], context=context)[0]
data = {'survey': survey, 'page': None, 'token': user_input.token}
return request.website.render('survey.survey_init', data)
# END Test mode
# Controls if the survey can be displayed
errpage = self._check_bad_cases(cr, uid, request, survey_obj, survey, user_input_obj, context=context)
if errpage:
return errpage
# Manual surveying
if not token:
vals = {'survey_id': survey.id}
if request.website.user_id.id != uid:
vals['partner_id'] = request.registry['res.users'].browse(cr, uid, uid, context=context).partner_id.id
user_input_id = user_input_obj.create(cr, uid, vals, context=context)
user_input = user_input_obj.browse(cr, uid, [user_input_id], context=context)[0]
else:
try:
user_input_id = user_input_obj.search(cr, SUPERUSER_ID, [('token', '=', token)], context=context)[0]
except IndexError: # Invalid token
return request.website.render("website.403")
else:
user_input = user_input_obj.browse(cr, SUPERUSER_ID, [user_input_id], context=context)[0]
# Do not open expired survey
errpage = self._check_deadline(cr, uid, user_input, context=context)
if errpage:
return errpage
# Select the right page
if user_input.state == 'new': # Intro page
data = {'survey': survey, 'page': None, 'token': user_input.token}
return request.website.render('survey.survey_init', data)
else:
return request.redirect('/survey/fill/%s/%s' % (survey.id, user_input.token))
# Survey displaying
@http.route(['/survey/fill/<model("survey.survey"):survey>/<string:token>',
'/survey/fill/<model("survey.survey"):survey>/<string:token>/<string:prev>'],
type='http', auth='public', website=True)
def fill_survey(self, survey, token, prev=None, **post):
'''Display and validates a survey'''
cr, uid, context = request.cr, request.uid, request.context
survey_obj = request.registry['survey.survey']
user_input_obj = request.registry['survey.user_input']
# Controls if the survey can be displayed
errpage = self._check_bad_cases(cr, uid, request, survey_obj, survey, user_input_obj, context=context)
if errpage:
return errpage
# Load the user_input
try:
user_input_id = user_input_obj.search(cr, SUPERUSER_ID, [('token', '=', token)])[0]
except IndexError: # Invalid token
return request.website.render("website.403")
else:
user_input = user_input_obj.browse(cr, SUPERUSER_ID, [user_input_id], context=context)[0]
# Do not display expired survey (even if some pages have already been
# displayed -- There's a time for everything!)
errpage = self._check_deadline(cr, uid, user_input, context=context)
if errpage:
return errpage
# Select the right page
if user_input.state == 'new': # First page
page, page_nr, last = survey_obj.next_page(cr, uid, user_input, 0, go_back=False, context=context)
data = {'survey': survey, 'page': page, 'page_nr': page_nr, 'token': user_input.token}
if last:
data.update({'last': True})
return request.website.render('survey.survey', data)
elif user_input.state == 'done': # Display success message
return request.website.render('survey.sfinished', {'survey': survey,
'token': token,
'user_input': user_input})
elif user_input.state == 'skip':
flag = (True if prev and prev == 'prev' else False)
page, page_nr, last = survey_obj.next_page(cr, uid, user_input, user_input.last_displayed_page_id.id, go_back=flag, context=context)
#special case if you click "previous" from the last page, then leave the survey, then reopen it from the URL, avoid crash
if not page:
page, page_nr, last = survey_obj.next_page(cr, uid, user_input, user_input.last_displayed_page_id.id, go_back=True, context=context)
data = {'survey': survey, 'page': page, 'page_nr': page_nr, 'token': user_input.token}
if last:
data.update({'last': True})
return request.website.render('survey.survey', data)
else:
return request.website.render("website.403")
# AJAX prefilling of a survey
@http.route(['/survey/prefill/<model("survey.survey"):survey>/<string:token>',
'/survey/prefill/<model("survey.survey"):survey>/<string:token>/<model("survey.page"):page>'],
type='http', auth='public', website=True)
def prefill(self, survey, token, page=None, **post):
cr, uid, context = request.cr, request.uid, request.context
user_input_line_obj = request.registry['survey.user_input_line']
ret = {}
# Fetch previous answers
if page:
ids = user_input_line_obj.search(cr, SUPERUSER_ID, [('user_input_id.token', '=', token), ('page_id', '=', page.id)], context=context)
else:
ids = user_input_line_obj.search(cr, SUPERUSER_ID, [('user_input_id.token', '=', token)], context=context)
previous_answers = user_input_line_obj.browse(cr, uid, ids, context=context)
# Return non empty answers in a JSON compatible format
for answer in previous_answers:
if not answer.skipped:
answer_tag = '%s_%s_%s' % (answer.survey_id.id, answer.page_id.id, answer.question_id.id)
answer_value = None
if answer.answer_type == 'free_text':
answer_value = answer.value_free_text
elif answer.answer_type == 'text' and answer.question_id.type == 'textbox':
answer_value = answer.value_text
elif answer.answer_type == 'text' and answer.question_id.type != 'textbox':
# here come comment answers for matrices, simple choice and multiple choice
answer_tag = "%s_%s" % (answer_tag, 'comment')
answer_value = answer.value_text
elif answer.answer_type == 'number':
answer_value = answer.value_number.__str__()
elif answer.answer_type == 'date':
answer_value = answer.value_date
elif answer.answer_type == 'suggestion' and not answer.value_suggested_row:
answer_value = answer.value_suggested.id
elif answer.answer_type == 'suggestion' and answer.value_suggested_row:
answer_tag = "%s_%s" % (answer_tag, answer.value_suggested_row.id)
answer_value = answer.value_suggested.id
if answer_value:
dict_soft_update(ret, answer_tag, answer_value)
else:
_logger.warning("[survey] No answer has been found for question %s marked as non skipped" % answer_tag)
return json.dumps(ret)
# AJAX scores loading for quiz correction mode
@http.route(['/survey/scores/<model("survey.survey"):survey>/<string:token>'],
type='http', auth='public', website=True)
def get_scores(self, survey, token, page=None, **post):
cr, uid, context = request.cr, request.uid, request.context
user_input_line_obj = request.registry['survey.user_input_line']
ret = {}
# Fetch answers
ids = user_input_line_obj.search(cr, SUPERUSER_ID, [('user_input_id.token', '=', token)], context=context)
previous_answers = user_input_line_obj.browse(cr, uid, ids, context=context)
# Compute score for each question
for answer in previous_answers:
tmp_score = ret.get(answer.question_id.id, 0.0)
ret.update({answer.question_id.id: tmp_score + answer.quizz_mark})
return json.dumps(ret)
# AJAX submission of a page
@http.route(['/survey/submit/<model("survey.survey"):survey>'],
type='http', methods=['POST'], auth='public', website=True)
def submit(self, survey, **post):
_logger.debug('Incoming data: %s', post)
page_id = int(post['page_id'])
cr, uid, context = request.cr, request.uid, request.context
survey_obj = request.registry['survey.survey']
questions_obj = request.registry['survey.question']
questions_ids = questions_obj.search(cr, uid, [('page_id', '=', page_id)], context=context)
questions = questions_obj.browse(cr, uid, questions_ids, context=context)
# Answer validation
errors = {}
for question in questions:
answer_tag = "%s_%s_%s" % (survey.id, page_id, question.id)
errors.update(questions_obj.validate_question(cr, uid, question, post, answer_tag, context=context))
ret = {}
if (len(errors) != 0):
# Return errors messages to webpage
ret['errors'] = errors
else:
# Store answers into database
user_input_obj = request.registry['survey.user_input']
user_input_line_obj = request.registry['survey.user_input_line']
try:
user_input_id = user_input_obj.search(cr, SUPERUSER_ID, [('token', '=', post['token'])], context=context)[0]
except KeyError: # Invalid token
return request.website.render("website.403")
user_input = user_input_obj.browse(cr, SUPERUSER_ID, user_input_id, context=context)
user_id = uid if user_input.type != 'link' else SUPERUSER_ID
for question in questions:
answer_tag = "%s_%s_%s" % (survey.id, page_id, question.id)
user_input_line_obj.save_lines(cr, user_id, user_input_id, question, post, answer_tag, context=context)
go_back = post['button_submit'] == 'previous'
next_page, _, last = survey_obj.next_page(cr, uid, user_input, page_id, go_back=go_back, context=context)
vals = {'last_displayed_page_id': page_id}
if next_page is None and not go_back:
vals.update({'state': 'done'})
else:
vals.update({'state': 'skip'})
user_input_obj.write(cr, user_id, user_input_id, vals, context=context)
ret['redirect'] = '/survey/fill/%s/%s' % (survey.id, post['token'])
if go_back:
ret['redirect'] += '/prev'
return json.dumps(ret)
# Printing routes
@http.route(['/survey/print/<model("survey.survey"):survey>',
'/survey/print/<model("survey.survey"):survey>/<string:token>'],
type='http', auth='public', website=True)
def print_survey(self, survey, token=None, **post):
'''Display an survey in printable view; if <token> is set, it will
grab the answers of the user_input_id that has <token>.'''
return request.website.render('survey.survey_print',
{'survey': survey,
'token': token,
'page_nr': 0,
'quizz_correction': True if survey.quizz_mode and token else False})
@http.route(['/survey/results/<model("survey.survey"):survey>'],
type='http', auth='user', website=True)
def survey_reporting(self, survey, token=None, **post):
'''Display survey Results & Statistics for given survey.'''
result_template ='survey.result'
current_filters = []
filter_display_data = []
filter_finish = False
survey_obj = request.registry['survey.survey']
if not survey.user_input_ids or not [input_id.id for input_id in survey.user_input_ids if input_id.state != 'new']:
result_template = 'survey.no_result'
if 'finished' in post:
post.pop('finished')
filter_finish = True
if post or filter_finish:
filter_data = self.get_filter_data(post)
current_filters = survey_obj.filter_input_ids(request.cr, request.uid, survey, filter_data, filter_finish, context=request.context)
filter_display_data = survey_obj.get_filter_display_data(request.cr, request.uid, filter_data, context=request.context)
return request.website.render(result_template,
{'survey': survey,
'survey_dict': self.prepare_result_dict(survey, current_filters),
'page_range': self.page_range,
'current_filters': current_filters,
'filter_display_data': filter_display_data,
'filter_finish': filter_finish
})
# Quick retroengineering of what is injected into the template for now:
# (TODO: flatten and simplify this)
#
# survey: a browse record of the survey
# survey_dict: very messy dict containing all the info to display answers
# {'page_ids': [
#
# ...
#
# {'page': browse record of the page,
# 'question_ids': [
#
# ...
#
# {'graph_data': data to be displayed on the graph
# 'input_summary': number of answered, skipped...
# 'prepare_result': {
# answers displayed in the tables
# }
# 'question': browse record of the question_ids
# }
#
# ...
#
# ]
# }
#
# ...
#
# ]
# }
#
# page_range: pager helper function
# current_filters: a list of ids
# filter_display_data: [{'labels': ['a', 'b'], question_text} ... ]
# filter_finish: boolean => only finished surveys or not
#
def prepare_result_dict(self,survey, current_filters=None):
"""Returns dictionary having values for rendering template"""
current_filters = current_filters if current_filters else []
survey_obj = request.registry['survey.survey']
result = {'page_ids': []}
for page in survey.page_ids:
page_dict = {'page': page, 'question_ids': []}
for question in page.question_ids:
question_dict = {'question':question, 'input_summary':survey_obj.get_input_summary(request.cr, request.uid, question, current_filters, context=request.context), 'prepare_result':survey_obj.prepare_result(request.cr, request.uid, question, current_filters, context=request.context), 'graph_data': self.get_graph_data(question, current_filters)}
page_dict['question_ids'].append(question_dict)
result['page_ids'].append(page_dict)
return result
def get_filter_data(self, post):
"""Returns data used for filtering the result"""
filters = []
for ids in post:
#if user add some random data in query URI, ignore it
try:
row_id, answer_id = ids.split(',')
filters.append({'row_id': int(row_id), 'answer_id': int(answer_id)})
except:
return filters
return filters
def page_range(self, total_record, limit):
'''Returns number of pages required for pagination'''
total = ceil(total_record / float(limit))
return range(1, int(total + 1))
def get_graph_data(self, question, current_filters=None):
'''Returns formatted data required by graph library on basis of filter'''
# TODO refactor this terrible method and merge it with prepare_result_dict
current_filters = current_filters if current_filters else []
survey_obj = request.registry['survey.survey']
result = []
if question.type == 'multiple_choice':
result.append({'key': ustr(question.question),
'values': survey_obj.prepare_result(request.cr, request.uid, question, current_filters, context=request.context)['answers']
})
if question.type == 'simple_choice':
result = survey_obj.prepare_result(request.cr, request.uid, question, current_filters, context=request.context)['answers']
if question.type == 'matrix':
data = survey_obj.prepare_result(request.cr, request.uid, question, current_filters, context=request.context)
for answer in data['answers']:
values = []
for row in data['rows']:
values.append({'text': data['rows'].get(row), 'count': data['result'].get((row, answer))})
result.append({'key': data['answers'].get(answer), 'values': values})
return json.dumps(result)
def dict_soft_update(dictionary, key, value):
''' Insert the pair <key>: <value> into the <dictionary>. If <key> is
already present, this function will append <value> to the list of
existing data (instead of erasing it) '''
if key in dictionary:
dictionary[key].append(value)
else:
dictionary.update({key: [value]})
| agpl-3.0 |
ammarkhann/FinalSeniorCode | lib/python2.7/site-packages/sympy/polys/domains/pythonrationalfield.py | 117 | 2234 | """Implementation of :class:`PythonRationalField` class. """
from __future__ import print_function, division
from sympy.polys.domains.rationalfield import RationalField
from sympy.polys.domains.groundtypes import PythonInteger, PythonRational, SymPyRational
from sympy.polys.polyerrors import CoercionFailed
from sympy.utilities import public
@public
class PythonRationalField(RationalField):
"""Rational field based on Python rational number type. """
dtype = PythonRational
zero = dtype(0)
one = dtype(1)
alias = 'QQ_python'
def __init__(self):
pass
def get_ring(self):
"""Returns ring associated with ``self``. """
from sympy.polys.domains import PythonIntegerRing
return PythonIntegerRing()
def to_sympy(self, a):
"""Convert `a` to a SymPy object. """
return SymPyRational(a.numerator, a.denominator)
def from_sympy(self, a):
"""Convert SymPy's Rational to `dtype`. """
if a.is_Rational:
return PythonRational(a.p, a.q)
elif a.is_Float:
from sympy.polys.domains import RR
p, q = RR.to_rational(a)
return PythonRational(int(p), int(q))
else:
raise CoercionFailed("expected `Rational` object, got %s" % a)
def from_ZZ_python(K1, a, K0):
"""Convert a Python `int` object to `dtype`. """
return PythonRational(a)
def from_QQ_python(K1, a, K0):
"""Convert a Python `Fraction` object to `dtype`. """
return a
def from_ZZ_gmpy(K1, a, K0):
"""Convert a GMPY `mpz` object to `dtype`. """
return PythonRational(PythonInteger(a))
def from_QQ_gmpy(K1, a, K0):
"""Convert a GMPY `mpq` object to `dtype`. """
return PythonRational(PythonInteger(a.numer()),
PythonInteger(a.denom()))
def from_RealField(K1, a, K0):
"""Convert a mpmath `mpf` object to `dtype`. """
p, q = K0.to_rational(a)
return PythonRational(int(p), int(q))
def numer(self, a):
"""Returns numerator of `a`. """
return a.numerator
def denom(self, a):
"""Returns denominator of `a`. """
return a.denominator
| mit |
ABcDexter/cython | tests/run/purecdef.py | 28 | 2308 | import cython
from cython import cfunc, cclass, ccall
@cython.test_assert_path_exists('//CFuncDefNode')
@cython.cfunc
def ftang():
x = 0
@cython.test_assert_path_exists('//CFuncDefNode')
@cfunc
def fpure(a):
return a*2
def test():
"""
>>> test()
4
"""
ftang()
return fpure(2)
with cfunc:
@cython.test_assert_path_exists('//CFuncDefNode')
def fwith1(a):
return a*3
@cython.test_assert_path_exists('//CFuncDefNode')
def fwith2(a):
return a*4
with cclass:
@cython.test_assert_path_exists('//CClassDefNode')
class Egg(object):
pass
@cython.test_assert_path_exists('//CClassDefNode')
class BigEgg(object):
@cython.test_assert_path_exists('//CFuncDefNode')
@cython.cfunc
def f(self, a):
return a*10
def test_with():
"""
>>> test_with()
(3, 4, 50)
"""
return fwith1(1), fwith2(1), BigEgg().f(5)
@cython.test_assert_path_exists('//CClassDefNode')
@cython.cclass
class PureFoo(object):
a = cython.declare(cython.double)
def __init__(self, a):
self.a = a
def __call__(self):
return self.a
@cython.test_assert_path_exists('//CFuncDefNode')
@cython.cfunc
def puremeth(self, a):
return a*2
def test_method():
"""
>>> test_method()
4
True
"""
x = PureFoo(2)
print(x.puremeth(2))
if cython.compiled:
print(isinstance(x(), float))
else:
print(True)
return
@cython.ccall
def ccall_sqr(x):
return x*x
@cclass
class Overidable(object):
@ccall
def meth(self):
return 0
def test_ccall():
"""
>>> test_ccall()
25
>>> ccall_sqr(5)
25
"""
return ccall_sqr(5)
def test_ccall_method(x):
"""
>>> test_ccall_method(Overidable())
0
>>> Overidable().meth()
0
>>> class Foo(Overidable):
... def meth(self):
... return 1
>>> test_ccall_method(Foo())
1
>>> Foo().meth()
1
"""
return x.meth()
@cython.cfunc
@cython.returns(cython.p_int)
@cython.locals(xptr=cython.p_int)
def typed_return(xptr):
return xptr
def test_typed_return():
"""
>>> test_typed_return()
"""
x = cython.declare(int, 5)
assert typed_return(cython.address(x))[0] is x
| apache-2.0 |
ademmers/ansible | lib/ansible/module_utils/facts/namespace.py | 172 | 2366 | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# (c) 2017 Red Hat Inc.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
class FactNamespace:
def __init__(self, namespace_name):
self.namespace_name = namespace_name
def transform(self, name):
'''Take a text name, and transforms it as needed (add a namespace prefix, etc)'''
return name
def _underscore(self, name):
return name.replace('-', '_')
class PrefixFactNamespace(FactNamespace):
def __init__(self, namespace_name, prefix=None):
super(PrefixFactNamespace, self).__init__(namespace_name)
self.prefix = prefix
def transform(self, name):
new_name = self._underscore(name)
return '%s%s' % (self.prefix, new_name)
| gpl-3.0 |
jcai19/smm_gem5 | src/arch/x86/isa/insts/simd128/integer/data_reordering/unpack_and_interleave.py | 91 | 5843 | # Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
def macroop PUNPCKLBW_XMM_XMM {
unpack xmmh, xmml, xmmlm, ext=1, size=1
unpack xmml, xmml, xmmlm, ext=0, size=1
};
def macroop PUNPCKLBW_XMM_M {
ldfp ufp1, seg, sib, disp, dataSize=8
unpack xmmh, xmml, ufp1, ext=1, size=1
unpack xmml, xmml, ufp1, ext=0, size=1
};
def macroop PUNPCKLBW_XMM_P {
rdip t7
ldfp ufp1, seg, riprel, disp, dataSize=8
unpack xmmh, xmml, ufp1, ext=1, size=1
unpack xmml, xmml, ufp1, ext=0, size=1
};
def macroop PUNPCKLWD_XMM_XMM {
unpack xmmh, xmml, xmmlm, ext=1, size=2
unpack xmml, xmml, xmmlm, ext=0, size=2
};
def macroop PUNPCKLWD_XMM_M {
ldfp ufp1, seg, sib, disp, dataSize=8
unpack xmmh, xmml, ufp1, ext=1, size=2
unpack xmml, xmml, ufp1, ext=0, size=2
};
def macroop PUNPCKLWD_XMM_P {
rdip t7
ldfp ufp1, seg, riprel, disp, dataSize=8
unpack xmmh, xmml, ufp1, ext=1, size=2
unpack xmml, xmml, ufp1, ext=0, size=2
};
def macroop PUNPCKLDQ_XMM_XMM {
unpack xmmh, xmml, xmmlm, ext=1, size=4
unpack xmml, xmml, xmmlm, ext=0, size=4
};
def macroop PUNPCKLDQ_XMM_M {
ldfp ufp1, seg, sib, disp, dataSize=8
unpack xmmh, xmml, ufp1, ext=1, size=4
unpack xmml, xmml, ufp1, ext=0, size=4
};
def macroop PUNPCKLDQ_XMM_P {
rdip t7
ldfp ufp1, seg, riprel, disp, dataSize=8
unpack xmmh, xmml, ufp1, ext=1, size=4
unpack xmml, xmml, ufp1, ext=0, size=4
};
def macroop PUNPCKHBW_XMM_XMM {
unpack xmml, xmmh, xmmhm, ext=0, size=1
unpack xmmh, xmmh, xmmhm, ext=1, size=1
};
def macroop PUNPCKHBW_XMM_M {
lea t1, seg, sib, disp, dataSize=asz
ldfp ufp1, seg, [1, t0, t1], 8, dataSize=8
unpack xmml, xmmh, ufp1, ext=0, size=1
unpack xmmh, xmmh, ufp1, ext=1, size=1
};
def macroop PUNPCKHBW_XMM_P {
rdip t7
lea t1, seg, riprel, disp, dataSize=asz
ldfp ufp1, seg, [1, t0, t1], 8, dataSize=8
unpack xmml, xmmh, ufp1, ext=0, size=1
unpack xmmh, xmmh, ufp1, ext=1, size=1
};
def macroop PUNPCKHWD_XMM_XMM {
unpack xmml, xmmh, xmmhm, ext=0, size=2
unpack xmmh, xmmh, xmmhm, ext=1, size=2
};
def macroop PUNPCKHWD_XMM_M {
lea t1, seg, sib, disp, dataSize=asz
ldfp ufp1, seg, [1, t0, t1], 8, dataSize=8
unpack xmml, xmmh, ufp1, ext=0, size=2
unpack xmmh, xmmh, ufp1, ext=1, size=2
};
def macroop PUNPCKHWD_XMM_P {
rdip t7
lea t1, seg, riprel, disp, dataSize=asz
ldfp ufp1, seg, [1, t0, t1], 8, dataSize=8
unpack xmml, xmmh, ufp1, ext=0, size=2
unpack xmmh, xmmh, ufp1, ext=1, size=2
};
def macroop PUNPCKHDQ_XMM_XMM {
unpack xmml, xmmh, xmmhm, ext=0, size=4
unpack xmmh, xmmh, xmmhm, ext=1, size=4
};
def macroop PUNPCKHDQ_XMM_M {
lea t1, seg, sib, disp, dataSize=asz
ldfp ufp1, seg, [1, t0, t1], 8, dataSize=8
unpack xmml, xmmh, ufp1, ext=0, size=4
unpack xmmh, xmmh, ufp1, ext=1, size=4
};
def macroop PUNPCKHDQ_XMM_P {
rdip t7
lea t1, seg, riprel, disp, dataSize=asz
ldfp ufp1, seg, [1, t0, t1], 8, dataSize=8
unpack xmml, xmmh, ufp1, ext=0, size=4
unpack xmmh, xmmh, ufp1, ext=1, size=4
};
def macroop PUNPCKHQDQ_XMM_XMM {
movfp xmml, xmmh
movfp xmmh, xmmhm
};
def macroop PUNPCKHQDQ_XMM_M {
lea t1, seg, sib, disp, dataSize=asz
ldfp ufp1, seg, [1, t0, t1], 8, dataSize=8
movfp xmml, xmmh
movfp xmmh, ufp1
};
def macroop PUNPCKHQDQ_XMM_P {
rdip t7
lea t1, seg, riprel, disp, dataSize=asz
ldfp ufp1, seg, riprel, 8, dataSize=8
movfp xmml, xmmh
movfp xmmh, ufp1
};
def macroop PUNPCKLQDQ_XMM_XMM {
movfp xmmh, xmmlm
};
def macroop PUNPCKLQDQ_XMM_M {
ldfp xmmh, seg, sib, disp, dataSize=8
};
def macroop PUNPCKLQDQ_XMM_P {
rdip t7
ldfp xmmh, seg, riprel, disp, dataSize=8
};
'''
| bsd-3-clause |
Fokko/druid | distribution/bin/jar-notice-lister.py | 13 | 3773 | #!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
import sys
existing_jar_dict_notice = {}
def main():
if len(sys.argv) != 3:
sys.stderr.write('usage: program <full extracted druid distribution path> <full tmp path>\n')
sys.exit(1)
druid_path = sys.argv[1]
tmp_path = sys.argv[2]
# copy everything in lib/ to the staging dir
lib_path = druid_path + "/lib"
tmp_lib_path = tmp_path + "/1-lib"
os.mkdir(tmp_lib_path)
command = "cp -r {}/* {}".format(lib_path, tmp_lib_path)
subprocess.check_output(command, shell=True).decode('UTF-8')
# copy hadoop deps to the staging dir
hdeps_path = druid_path + "/hadoop-dependencies"
tmp_hdeps_path = tmp_path + "/2-hdeps"
os.mkdir(tmp_hdeps_path)
command = "cp -r {}/* {}".format(hdeps_path, tmp_hdeps_path)
subprocess.check_output(command, shell=True).decode('UTF-8')
# copy all extension folders to the staging dir
ext_path = druid_path + "/extensions"
tmp_ext_path = tmp_path + "/3-ext"
os.mkdir(tmp_ext_path)
command = "cp -r {}/* {}".format(ext_path, tmp_ext_path)
subprocess.check_output(command, shell=True).decode('UTF-8')
get_notices(tmp_path)
def get_notices(tmp_jar_path):
print("********** Scanning directory for NOTICE" + tmp_jar_path + " **********")
jar_files = os.listdir(tmp_jar_path)
os.chdir(tmp_jar_path)
for jar_file in jar_files:
if os.path.isdir(jar_file):
get_notices(jar_file)
continue
elif not os.path.isfile(jar_file) or ".jar" not in jar_file:
continue
if existing_jar_dict_notice.get(jar_file) is not None:
print("---------- Already saw file: " + jar_file)
continue
else:
existing_jar_dict_notice[jar_file] = True
try:
command = "jar tf {} | grep NOTICE".format(jar_file)
outstr = subprocess.check_output(command, shell=True).decode('UTF-8')
except:
print("---------- no NOTICE file found in: " + jar_file)
continue
for line in outstr.splitlines():
try:
command = "jar xf {} {}".format(jar_file, line)
outstr = subprocess.check_output(command, shell=True).decode('UTF-8')
command = "mv {} {}.NOTICE-FILE".format(line, jar_file)
outstr = subprocess.check_output(command, shell=True).decode('UTF-8')
command = "cat {}.NOTICE-FILE".format(jar_file)
outstr = subprocess.check_output(command, shell=True).decode('UTF-8')
print("================= " + jar_file + " =================")
print(outstr)
print("\n")
except:
print("Error while grabbing NOTICE file: " + jar_file)
continue
os.chdir("..")
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
print('Interrupted, closing.') | apache-2.0 |
dcroc16/skunk_works | google_appengine/lib/django-1.5/django/contrib/localflavor/hr/hr_choices.py | 109 | 2799 | # -*- coding: utf-8 -*-
"""
Sources:
Croatian Counties: http://en.wikipedia.org/wiki/ISO_3166-2:HR
Croatia doesn't have official abbreviations for counties.
The ones provided are in common use.
"""
from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
HR_COUNTY_CHOICES = (
('GZG', _('Grad Zagreb')),
('BBŽ', _('Bjelovarsko-bilogorska županija')),
('BPŽ', _('Brodsko-posavska županija')),
('DNŽ', _('Dubrovačko-neretvanska županija')),
('IŽ', _('Istarska županija')),
('KŽ', _('Karlovačka županija')),
('KKŽ', _('Koprivničko-križevačka županija')),
('KZŽ', _('Krapinsko-zagorska županija')),
('LSŽ', _('Ličko-senjska županija')),
('MŽ', _('Međimurska županija')),
('OBŽ', _('Osječko-baranjska županija')),
('PSŽ', _('Požeško-slavonska županija')),
('PGŽ', _('Primorsko-goranska županija')),
('SMŽ', _('Sisačko-moslavačka županija')),
('SDŽ', _('Splitsko-dalmatinska županija')),
('ŠKŽ', _('Šibensko-kninska županija')),
('VŽ', _('Varaždinska županija')),
('VPŽ', _('Virovitičko-podravska županija')),
('VSŽ', _('Vukovarsko-srijemska županija')),
('ZDŽ', _('Zadarska županija')),
('ZGŽ', _('Zagrebačka županija')),
)
"""
Sources:
http://hr.wikipedia.org/wiki/Dodatak:Popis_registracijskih_oznaka_za_cestovna_vozila_u_Hrvatskoj
Only common license plate prefixes are provided. Special cases and obsolete prefixes are omitted.
"""
HR_LICENSE_PLATE_PREFIX_CHOICES = (
('BJ', 'BJ'),
('BM', 'BM'),
('ČK', 'ČK'),
('DA', 'DA'),
('DE', 'DE'),
('DJ', 'DJ'),
('DU', 'DU'),
('GS', 'GS'),
('IM', 'IM'),
('KA', 'KA'),
('KC', 'KC'),
('KR', 'KR'),
('KT', 'KT'),
('KŽ', 'KŽ'),
('MA', 'MA'),
('NA', 'NA'),
('NG', 'NG'),
('OG', 'OG'),
('OS', 'OS'),
('PU', 'PU'),
('PŽ', 'PŽ'),
('RI', 'RI'),
('SB', 'SB'),
('SK', 'SK'),
('SL', 'SL'),
('ST', 'ST'),
('ŠI', 'ŠI'),
('VK', 'VK'),
('VT', 'VT'),
('VU', 'VU'),
('VŽ', 'VŽ'),
('ZD', 'ZD'),
('ZG', 'ZG'),
('ŽU', 'ŽU'),
)
"""
The list includes county and cellular network phone number prefixes.
"""
HR_PHONE_NUMBER_PREFIX_CHOICES = (
('1', '01'),
('20', '020'),
('21', '021'),
('22', '022'),
('23', '023'),
('31', '031'),
('32', '032'),
('33', '033'),
('34', '034'),
('35', '035'),
('40', '040'),
('42', '042'),
('43', '043'),
('44', '044'),
('47', '047'),
('48', '048'),
('49', '049'),
('51', '051'),
('52', '052'),
('53', '053'),
('91', '091'),
('92', '092'),
('95', '095'),
('97', '097'),
('98', '098'),
('99', '099'),
)
| mit |
kisna72/django | django/contrib/gis/feeds.py | 336 | 5978 | from __future__ import unicode_literals
from django.contrib.syndication.views import Feed as BaseFeed
from django.utils.feedgenerator import Atom1Feed, Rss201rev2Feed
class GeoFeedMixin(object):
"""
This mixin provides the necessary routines for SyndicationFeed subclasses
to produce simple GeoRSS or W3C Geo elements.
"""
def georss_coords(self, coords):
"""
In GeoRSS coordinate pairs are ordered by lat/lon and separated by
a single white space. Given a tuple of coordinates, this will return
a unicode GeoRSS representation.
"""
return ' '.join('%f %f' % (coord[1], coord[0]) for coord in coords)
def add_georss_point(self, handler, coords, w3c_geo=False):
"""
Adds a GeoRSS point with the given coords using the given handler.
Handles the differences between simple GeoRSS and the more popular
W3C Geo specification.
"""
if w3c_geo:
lon, lat = coords[:2]
handler.addQuickElement('geo:lat', '%f' % lat)
handler.addQuickElement('geo:lon', '%f' % lon)
else:
handler.addQuickElement('georss:point', self.georss_coords((coords,)))
def add_georss_element(self, handler, item, w3c_geo=False):
"""
This routine adds a GeoRSS XML element using the given item and handler.
"""
# Getting the Geometry object.
geom = item.get('geometry')
if geom is not None:
if isinstance(geom, (list, tuple)):
# Special case if a tuple/list was passed in. The tuple may be
# a point or a box
box_coords = None
if isinstance(geom[0], (list, tuple)):
# Box: ( (X0, Y0), (X1, Y1) )
if len(geom) == 2:
box_coords = geom
else:
raise ValueError('Only should be two sets of coordinates.')
else:
if len(geom) == 2:
# Point: (X, Y)
self.add_georss_point(handler, geom, w3c_geo=w3c_geo)
elif len(geom) == 4:
# Box: (X0, Y0, X1, Y1)
box_coords = (geom[:2], geom[2:])
else:
raise ValueError('Only should be 2 or 4 numeric elements.')
# If a GeoRSS box was given via tuple.
if box_coords is not None:
if w3c_geo:
raise ValueError('Cannot use simple GeoRSS box in W3C Geo feeds.')
handler.addQuickElement('georss:box', self.georss_coords(box_coords))
else:
# Getting the lower-case geometry type.
gtype = str(geom.geom_type).lower()
if gtype == 'point':
self.add_georss_point(handler, geom.coords, w3c_geo=w3c_geo)
else:
if w3c_geo:
raise ValueError('W3C Geo only supports Point geometries.')
# For formatting consistent w/the GeoRSS simple standard:
# http://georss.org/1.0#simple
if gtype in ('linestring', 'linearring'):
handler.addQuickElement('georss:line', self.georss_coords(geom.coords))
elif gtype in ('polygon',):
# Only support the exterior ring.
handler.addQuickElement('georss:polygon', self.georss_coords(geom[0].coords))
else:
raise ValueError('Geometry type "%s" not supported.' % geom.geom_type)
# ### SyndicationFeed subclasses ###
class GeoRSSFeed(Rss201rev2Feed, GeoFeedMixin):
def rss_attributes(self):
attrs = super(GeoRSSFeed, self).rss_attributes()
attrs['xmlns:georss'] = 'http://www.georss.org/georss'
return attrs
def add_item_elements(self, handler, item):
super(GeoRSSFeed, self).add_item_elements(handler, item)
self.add_georss_element(handler, item)
def add_root_elements(self, handler):
super(GeoRSSFeed, self).add_root_elements(handler)
self.add_georss_element(handler, self.feed)
class GeoAtom1Feed(Atom1Feed, GeoFeedMixin):
def root_attributes(self):
attrs = super(GeoAtom1Feed, self).root_attributes()
attrs['xmlns:georss'] = 'http://www.georss.org/georss'
return attrs
def add_item_elements(self, handler, item):
super(GeoAtom1Feed, self).add_item_elements(handler, item)
self.add_georss_element(handler, item)
def add_root_elements(self, handler):
super(GeoAtom1Feed, self).add_root_elements(handler)
self.add_georss_element(handler, self.feed)
class W3CGeoFeed(Rss201rev2Feed, GeoFeedMixin):
def rss_attributes(self):
attrs = super(W3CGeoFeed, self).rss_attributes()
attrs['xmlns:geo'] = 'http://www.w3.org/2003/01/geo/wgs84_pos#'
return attrs
def add_item_elements(self, handler, item):
super(W3CGeoFeed, self).add_item_elements(handler, item)
self.add_georss_element(handler, item, w3c_geo=True)
def add_root_elements(self, handler):
super(W3CGeoFeed, self).add_root_elements(handler)
self.add_georss_element(handler, self.feed, w3c_geo=True)
# ### Feed subclass ###
class Feed(BaseFeed):
"""
This is a subclass of the `Feed` from `django.contrib.syndication`.
This allows users to define a `geometry(obj)` and/or `item_geometry(item)`
methods on their own subclasses so that geo-referenced information may
placed in the feed.
"""
feed_type = GeoRSSFeed
def feed_extra_kwargs(self, obj):
return {'geometry': self.__get_dynamic_attr('geometry', obj)}
def item_extra_kwargs(self, item):
return {'geometry': self.__get_dynamic_attr('item_geometry', item)}
| bsd-3-clause |
maxtorete/frappe | frappe/integrations/doctype/social_login_keys/social_login_keys.py | 3 | 1099 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
import requests
import socket
from frappe.model.document import Document
from frappe import _
from six.moves.urllib.parse import urlparse
class SocialLoginKeys(Document):
def validate(self):
self.validate_frappe_server_url()
def validate_frappe_server_url(self):
if self.frappe_server_url:
if self.frappe_server_url.endswith('/'):
self.frappe_server_url = self.frappe_server_url[:-1]
try:
frappe_server_hostname = urlparse(self.frappe_server_url).netloc
except:
frappe.throw(_("Check Frappe Server URL"))
if socket.gethostname() != frappe_server_hostname or \
(frappe.local.conf.domains is not None) and \
(frappe_server_hostname not in frappe.local.conf.domains):
try:
requests.get(self.frappe_server_url + "/api/method/frappe.handler.version", timeout=5)
except:
frappe.throw(_("Unable to make request to the Frappe Server URL"))
| mit |
crossbario/autobahn-python | examples/asyncio/wamp/component/backend.py | 3 | 1163 |
from autobahn.asyncio.component import Component, run
from autobahn.wamp.types import RegisterOptions
import asyncio
import ssl
context = ssl.create_default_context(
purpose=ssl.Purpose.SERVER_AUTH,
cafile='../../../router/.crossbar/server.crt',
)
component = Component(
transports=[
{
"type": "websocket",
"url": "wss://localhost:8083/ws",
"endpoint": {
"type": "tcp",
"host": "localhost",
"port": 8083,
"tls": context,
},
"options": {
"open_handshake_timeout": 100,
}
},
],
realm="crossbardemo",
)
@component.on_join
def join(session, details):
print("joined {}".format(details))
@component.register(
"example.foo",
options=RegisterOptions(details_arg='details'),
)
@asyncio.coroutine
def foo(*args, **kw):
print("foo({}, {})".format(args, kw))
for x in range(5, 0, -1):
print(" returning in {}".format(x))
yield from asyncio.sleep(1)
print("returning '42'")
return 42
if __name__ == "__main__":
run([component])
| mit |
msrb/samba | examples/scripts/shares/python/modify_samba_config.py | 90 | 2399 | #!/usr/bin/env python
######################################################################
##
## Simple add/delete/change share command script for Samba
##
## Copyright (C) Gerald Carter 2004.
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, see <http://www.gnu.org/licenses/>.
##
######################################################################
import sys, os
from SambaConfig import SambaConf
## ##
## check the command line args ##
## ##
delete_mode = False
if len(sys.argv) == 3:
delete_mode = True
print "Deleting share..."
elif len(sys.argv) == 5:
print "Adding/Updating share..."
else:
print "Usage: %s configfile share [path] [comments]" % sys.argv[0]
sys.exit(1)
## ##
## read and parse the config file ##
## ##
confFile = SambaConf()
confFile.ReadConfig( sys.argv[1] )
if not confFile.valid:
exit( 1 )
if delete_mode:
if not confFile.isService( sys.argv[2] ):
sys.stderr.write( "Asked to delete non-existent service! [%s]\n" % sys.argv[2] )
sys.exit( 1 )
confFile.DelService( sys.argv[2] )
else:
## make the path if it doesn't exist. Bail out if that fails
if ( not os.path.isdir(sys.argv[3]) ):
try:
os.makedirs( sys.argv[3] )
os.chmod( sys.argv[3], 0777 )
except os.error:
sys.exit( 1 )
## only add a new service -- if it already exists, then
## just set the options
if not confFile.isService( sys.argv[2] ):
confFile.AddService( sys.argv[2], ['##', '## Added by modify_samba_config.py', '##'] )
confFile.SetServiceOption( sys.argv[2], "path", sys.argv[3] )
confFile.SetServiceOption( sys.argv[2], "comment", sys.argv[4] )
confFile.SetServiceOption( sys.argv[2], "read only", "no" )
ret = confFile.Flush()
sys.exit( ret )
| gpl-3.0 |
webmasterraj/FogOrNot | flask/lib/python2.7/site-packages/sqlalchemy/sql/crud.py | 39 | 19284 | # sql/crud.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Functions used by compiler.py to determine the parameters rendered
within INSERT and UPDATE statements.
"""
from .. import util
from .. import exc
from . import elements
import operator
REQUIRED = util.symbol('REQUIRED', """
Placeholder for the value within a :class:`.BindParameter`
which is required to be present when the statement is passed
to :meth:`.Connection.execute`.
This symbol is typically used when a :func:`.expression.insert`
or :func:`.expression.update` statement is compiled without parameter
values present.
""")
def _get_crud_params(compiler, stmt, **kw):
"""create a set of tuples representing column/string pairs for use
in an INSERT or UPDATE statement.
Also generates the Compiled object's postfetch, prefetch, and
returning column collections, used for default handling and ultimately
populating the ResultProxy's prefetch_cols() and postfetch_cols()
collections.
"""
compiler.postfetch = []
compiler.prefetch = []
compiler.returning = []
# no parameters in the statement, no parameters in the
# compiled params - return binds for all columns
if compiler.column_keys is None and stmt.parameters is None:
return [
(c, _create_bind_param(
compiler, c, None, required=True))
for c in stmt.table.columns
]
if stmt._has_multi_parameters:
stmt_parameters = stmt.parameters[0]
else:
stmt_parameters = stmt.parameters
# getters - these are normally just column.key,
# but in the case of mysql multi-table update, the rules for
# .key must conditionally take tablename into account
_column_as_key, _getattr_col_key, _col_bind_name = \
_key_getters_for_crud_column(compiler)
# if we have statement parameters - set defaults in the
# compiled params
if compiler.column_keys is None:
parameters = {}
else:
parameters = dict((_column_as_key(key), REQUIRED)
for key in compiler.column_keys
if not stmt_parameters or
key not in stmt_parameters)
# create a list of column assignment clauses as tuples
values = []
if stmt_parameters is not None:
_get_stmt_parameters_params(
compiler,
parameters, stmt_parameters, _column_as_key, values, kw)
check_columns = {}
# special logic that only occurs for multi-table UPDATE
# statements
if compiler.isupdate and stmt._extra_froms and stmt_parameters:
_get_multitable_params(
compiler, stmt, stmt_parameters, check_columns,
_col_bind_name, _getattr_col_key, values, kw)
if compiler.isinsert and stmt.select_names:
_scan_insert_from_select_cols(
compiler, stmt, parameters,
_getattr_col_key, _column_as_key,
_col_bind_name, check_columns, values, kw)
else:
_scan_cols(
compiler, stmt, parameters,
_getattr_col_key, _column_as_key,
_col_bind_name, check_columns, values, kw)
if parameters and stmt_parameters:
check = set(parameters).intersection(
_column_as_key(k) for k in stmt.parameters
).difference(check_columns)
if check:
raise exc.CompileError(
"Unconsumed column names: %s" %
(", ".join("%s" % c for c in check))
)
if stmt._has_multi_parameters:
values = _extend_values_for_multiparams(compiler, stmt, values, kw)
return values
def _create_bind_param(
compiler, col, value, process=True,
required=False, name=None):
if name is None:
name = col.key
bindparam = elements.BindParameter(
name, value, type_=col.type, required=required)
bindparam._is_crud = True
if process:
bindparam = bindparam._compiler_dispatch(compiler)
return bindparam
def _key_getters_for_crud_column(compiler):
if compiler.isupdate and compiler.statement._extra_froms:
# when extra tables are present, refer to the columns
# in those extra tables as table-qualified, including in
# dictionaries and when rendering bind param names.
# the "main" table of the statement remains unqualified,
# allowing the most compatibility with a non-multi-table
# statement.
_et = set(compiler.statement._extra_froms)
def _column_as_key(key):
str_key = elements._column_as_key(key)
if hasattr(key, 'table') and key.table in _et:
return (key.table.name, str_key)
else:
return str_key
def _getattr_col_key(col):
if col.table in _et:
return (col.table.name, col.key)
else:
return col.key
def _col_bind_name(col):
if col.table in _et:
return "%s_%s" % (col.table.name, col.key)
else:
return col.key
else:
_column_as_key = elements._column_as_key
_getattr_col_key = _col_bind_name = operator.attrgetter("key")
return _column_as_key, _getattr_col_key, _col_bind_name
def _scan_insert_from_select_cols(
compiler, stmt, parameters, _getattr_col_key,
_column_as_key, _col_bind_name, check_columns, values, kw):
need_pks, implicit_returning, \
implicit_return_defaults, postfetch_lastrowid = \
_get_returning_modifiers(compiler, stmt)
cols = [stmt.table.c[_column_as_key(name)]
for name in stmt.select_names]
compiler._insert_from_select = stmt.select
add_select_cols = []
if stmt.include_insert_from_select_defaults:
col_set = set(cols)
for col in stmt.table.columns:
if col not in col_set and col.default:
cols.append(col)
for c in cols:
col_key = _getattr_col_key(c)
if col_key in parameters and col_key not in check_columns:
parameters.pop(col_key)
values.append((c, None))
else:
_append_param_insert_select_hasdefault(
compiler, stmt, c, add_select_cols, kw)
if add_select_cols:
values.extend(add_select_cols)
compiler._insert_from_select = compiler._insert_from_select._generate()
compiler._insert_from_select._raw_columns += tuple(
expr for col, expr in add_select_cols)
def _scan_cols(
compiler, stmt, parameters, _getattr_col_key,
_column_as_key, _col_bind_name, check_columns, values, kw):
need_pks, implicit_returning, \
implicit_return_defaults, postfetch_lastrowid = \
_get_returning_modifiers(compiler, stmt)
cols = stmt.table.columns
for c in cols:
col_key = _getattr_col_key(c)
if col_key in parameters and col_key not in check_columns:
_append_param_parameter(
compiler, stmt, c, col_key, parameters, _col_bind_name,
implicit_returning, implicit_return_defaults, values, kw)
elif compiler.isinsert:
if c.primary_key and \
need_pks and \
(
implicit_returning or
not postfetch_lastrowid or
c is not stmt.table._autoincrement_column
):
if implicit_returning:
_append_param_insert_pk_returning(
compiler, stmt, c, values, kw)
else:
_append_param_insert_pk(compiler, stmt, c, values, kw)
elif c.default is not None:
_append_param_insert_hasdefault(
compiler, stmt, c, implicit_return_defaults,
values, kw)
elif c.server_default is not None:
if implicit_return_defaults and \
c in implicit_return_defaults:
compiler.returning.append(c)
elif not c.primary_key:
compiler.postfetch.append(c)
elif implicit_return_defaults and \
c in implicit_return_defaults:
compiler.returning.append(c)
elif compiler.isupdate:
_append_param_update(
compiler, stmt, c, implicit_return_defaults, values, kw)
def _append_param_parameter(
compiler, stmt, c, col_key, parameters, _col_bind_name,
implicit_returning, implicit_return_defaults, values, kw):
value = parameters.pop(col_key)
if elements._is_literal(value):
value = _create_bind_param(
compiler, c, value, required=value is REQUIRED,
name=_col_bind_name(c)
if not stmt._has_multi_parameters
else "%s_0" % _col_bind_name(c)
)
else:
if isinstance(value, elements.BindParameter) and \
value.type._isnull:
value = value._clone()
value.type = c.type
if c.primary_key and implicit_returning:
compiler.returning.append(c)
value = compiler.process(value.self_group(), **kw)
elif implicit_return_defaults and \
c in implicit_return_defaults:
compiler.returning.append(c)
value = compiler.process(value.self_group(), **kw)
else:
compiler.postfetch.append(c)
value = compiler.process(value.self_group(), **kw)
values.append((c, value))
def _append_param_insert_pk_returning(compiler, stmt, c, values, kw):
if c.default is not None:
if c.default.is_sequence:
if compiler.dialect.supports_sequences and \
(not c.default.optional or
not compiler.dialect.sequences_optional):
proc = compiler.process(c.default, **kw)
values.append((c, proc))
compiler.returning.append(c)
elif c.default.is_clause_element:
values.append(
(c, compiler.process(
c.default.arg.self_group(), **kw))
)
compiler.returning.append(c)
else:
values.append(
(c, _create_prefetch_bind_param(compiler, c))
)
else:
compiler.returning.append(c)
def _create_prefetch_bind_param(compiler, c, process=True, name=None):
param = _create_bind_param(compiler, c, None, process=process, name=name)
compiler.prefetch.append(c)
return param
class _multiparam_column(elements.ColumnElement):
def __init__(self, original, index):
self.key = "%s_%d" % (original.key, index + 1)
self.original = original
self.default = original.default
def __eq__(self, other):
return isinstance(other, _multiparam_column) and \
other.key == self.key and \
other.original == self.original
def _process_multiparam_default_bind(compiler, c, index, kw):
if not c.default:
raise exc.CompileError(
"INSERT value for column %s is explicitly rendered as a bound"
"parameter in the VALUES clause; "
"a Python-side value or SQL expression is required" % c)
elif c.default.is_clause_element:
return compiler.process(c.default.arg.self_group(), **kw)
else:
col = _multiparam_column(c, index)
return _create_prefetch_bind_param(compiler, col)
def _append_param_insert_pk(compiler, stmt, c, values, kw):
if (
(c.default is not None and
(not c.default.is_sequence or
compiler.dialect.supports_sequences)) or
c is stmt.table._autoincrement_column and
(compiler.dialect.supports_sequences or
compiler.dialect.
preexecute_autoincrement_sequences)
):
values.append(
(c, _create_prefetch_bind_param(compiler, c))
)
def _append_param_insert_hasdefault(
compiler, stmt, c, implicit_return_defaults, values, kw):
if c.default.is_sequence:
if compiler.dialect.supports_sequences and \
(not c.default.optional or
not compiler.dialect.sequences_optional):
proc = compiler.process(c.default, **kw)
values.append((c, proc))
if implicit_return_defaults and \
c in implicit_return_defaults:
compiler.returning.append(c)
elif not c.primary_key:
compiler.postfetch.append(c)
elif c.default.is_clause_element:
proc = compiler.process(c.default.arg.self_group(), **kw)
values.append((c, proc))
if implicit_return_defaults and \
c in implicit_return_defaults:
compiler.returning.append(c)
elif not c.primary_key:
# don't add primary key column to postfetch
compiler.postfetch.append(c)
else:
values.append(
(c, _create_prefetch_bind_param(compiler, c))
)
def _append_param_insert_select_hasdefault(
compiler, stmt, c, values, kw):
if c.default.is_sequence:
if compiler.dialect.supports_sequences and \
(not c.default.optional or
not compiler.dialect.sequences_optional):
proc = c.default
values.append((c, proc))
elif c.default.is_clause_element:
proc = c.default.arg.self_group()
values.append((c, proc))
else:
values.append(
(c, _create_prefetch_bind_param(compiler, c, process=False))
)
def _append_param_update(
compiler, stmt, c, implicit_return_defaults, values, kw):
if c.onupdate is not None and not c.onupdate.is_sequence:
if c.onupdate.is_clause_element:
values.append(
(c, compiler.process(
c.onupdate.arg.self_group(), **kw))
)
if implicit_return_defaults and \
c in implicit_return_defaults:
compiler.returning.append(c)
else:
compiler.postfetch.append(c)
else:
values.append(
(c, _create_prefetch_bind_param(compiler, c))
)
elif c.server_onupdate is not None:
if implicit_return_defaults and \
c in implicit_return_defaults:
compiler.returning.append(c)
else:
compiler.postfetch.append(c)
elif implicit_return_defaults and \
c in implicit_return_defaults:
compiler.returning.append(c)
def _get_multitable_params(
compiler, stmt, stmt_parameters, check_columns,
_col_bind_name, _getattr_col_key, values, kw):
normalized_params = dict(
(elements._clause_element_as_expr(c), param)
for c, param in stmt_parameters.items()
)
affected_tables = set()
for t in stmt._extra_froms:
for c in t.c:
if c in normalized_params:
affected_tables.add(t)
check_columns[_getattr_col_key(c)] = c
value = normalized_params[c]
if elements._is_literal(value):
value = _create_bind_param(
compiler, c, value, required=value is REQUIRED,
name=_col_bind_name(c))
else:
compiler.postfetch.append(c)
value = compiler.process(value.self_group(), **kw)
values.append((c, value))
# determine tables which are actually to be updated - process onupdate
# and server_onupdate for these
for t in affected_tables:
for c in t.c:
if c in normalized_params:
continue
elif (c.onupdate is not None and not
c.onupdate.is_sequence):
if c.onupdate.is_clause_element:
values.append(
(c, compiler.process(
c.onupdate.arg.self_group(),
**kw)
)
)
compiler.postfetch.append(c)
else:
values.append(
(c, _create_prefetch_bind_param(
compiler, c, name=_col_bind_name(c)))
)
elif c.server_onupdate is not None:
compiler.postfetch.append(c)
def _extend_values_for_multiparams(compiler, stmt, values, kw):
values_0 = values
values = [values]
values.extend(
[
(
c,
(_create_bind_param(
compiler, c, row[c.key],
name="%s_%d" % (c.key, i + 1)
) if elements._is_literal(row[c.key])
else compiler.process(
row[c.key].self_group(), **kw))
if c.key in row else
_process_multiparam_default_bind(compiler, c, i, kw)
)
for (c, param) in values_0
]
for i, row in enumerate(stmt.parameters[1:])
)
return values
def _get_stmt_parameters_params(
compiler, parameters, stmt_parameters, _column_as_key, values, kw):
for k, v in stmt_parameters.items():
colkey = _column_as_key(k)
if colkey is not None:
parameters.setdefault(colkey, v)
else:
# a non-Column expression on the left side;
# add it to values() in an "as-is" state,
# coercing right side to bound param
if elements._is_literal(v):
v = compiler.process(
elements.BindParameter(None, v, type_=k.type),
**kw)
else:
v = compiler.process(v.self_group(), **kw)
values.append((k, v))
def _get_returning_modifiers(compiler, stmt):
need_pks = compiler.isinsert and \
not compiler.inline and \
not stmt._returning and \
not stmt._has_multi_parameters
implicit_returning = need_pks and \
compiler.dialect.implicit_returning and \
stmt.table.implicit_returning
if compiler.isinsert:
implicit_return_defaults = (implicit_returning and
stmt._return_defaults)
elif compiler.isupdate:
implicit_return_defaults = (compiler.dialect.implicit_returning and
stmt.table.implicit_returning and
stmt._return_defaults)
else:
implicit_return_defaults = False
if implicit_return_defaults:
if stmt._return_defaults is True:
implicit_return_defaults = set(stmt.table.c)
else:
implicit_return_defaults = set(stmt._return_defaults)
postfetch_lastrowid = need_pks and compiler.dialect.postfetch_lastrowid
return need_pks, implicit_returning, \
implicit_return_defaults, postfetch_lastrowid
| gpl-2.0 |
Arkapravo/morse-0.6 | src/morse/middleware/yarp_request_manager.py | 1 | 7609 | import logging; logger = logging.getLogger("morse." + __name__)
import sys
import yarp
from morse.core.request_manager import RequestManager, MorseRPCInvokationError
from morse.core import status
class YarpRequestManager(RequestManager):
"""Implements services to control the MORSE simulator over YARP
The syntax of requests is:
>>> id component_name service [params with Python syntax]
'id' is an identifier set by the client to conveniently identify
the request. It must be less that 80 chars in [a-zA-Z0-9].
The server answers:
>>> id OK|FAIL result_in_python|error_msg
"""
def __str__(self):
return "Yarp service manager"
def initialization(self):
# Create dictionaries for the input and output ports
self._yarp_request_ports = dict()
self._yarp_reply_ports = dict()
# Create a dictionary for the port names
self._component_ports = dict()
# For asynchronous request, this holds the mapping between a
# request_id and the socket which requested it.
self._pending_ports = dict()
# Stores for each port the pending results to write back.
self._results_to_output = dict()
# Create a dictionary for the evailable bottles
self._in_bottles = dict()
self._reply_bottles = dict()
return True
def finalization(self):
logger.info("Closing yarp request ports...")
for port in self._yarp_request_ports.values():
port.close()
return True
def on_service_completion(self, request_id, results):
port = None
try:
port, id = self._pending_ports[request_id]
except KeyError:
logger.info(str(self) + ": ERROR: I can not find the port which requested " + request_id)
return
if port in self._results_to_output:
self._results_to_output[port].append((id, results))
else:
self._results_to_output[port] = [(id, results)]
def post_registration(self, component_name, service, is_async):
""" Register a connection of a service with YARP """
# Get the Network attribute of yarp,
# then call its init method
self._yarp_module = sys.modules['yarp']
self.yarp_object = self._yarp_module.Network()
# Create the names of the ports
request_port_name = '/morse/services/{0}/request'.format(component_name)
reply_port_name = '/morse/services/{0}/reply'.format(component_name)
if not component_name in self._yarp_request_ports.keys():
# Create the ports to accept and reply to requests
request_port = self._yarp_module.BufferedPortBottle()
reply_port = self._yarp_module.BufferedPortBottle()
request_port.open(request_port_name)
reply_port.open(reply_port_name)
self._yarp_request_ports[component_name] = request_port
self._yarp_reply_ports[component_name] = reply_port
# Create bottles to use in the responses
bottle_in = self._yarp_module.Bottle()
self._in_bottles[component_name] = bottle_in
bottle_reply = self._yarp_module.Bottle()
self._reply_bottles[component_name] = bottle_reply
logger.info("Yarp service manager now listening on port " + request_port_name + ".")
logger.info("Yarp service manager will reply on port " + reply_port_name + ".")
return True
def main(self):
""" Read commands from the ports, and prepare the response"""
# Read data from available ports
for component_name, port in self._yarp_request_ports.items():
# Get the bottles to read and write
bottle_in = self._in_bottles[component_name]
bottle_reply = self._reply_bottles[component_name]
bottle_in = port.read(False)
if bottle_in != None:
logger.debug("Received command from port '%s'" % (component_name))
try:
try:
id, component_name, service, params = self._parse_request(bottle_in)
except ValueError: # Request contains < 2 tokens.
raise MorseRPCInvokationError("Malformed request! ")
logger.info("Got '%s | %s | %s' (id = %s) from %s" % (component_name, service, params, id, component_name))
# on_incoming_request returns either
#(True, result) if it's a synchronous
# request that has been immediately executed, or
# (False, request_id) if it's an asynchronous request whose
# termination will be notified via
# on_service_completion.
is_sync, value = self.on_incoming_request(component_name, service, params)
if is_sync:
if port in self._results_to_output:
self._results_to_output[port].append((id, value))
else:
self._results_to_output[port] = [(id, value)]
else:
# Stores the mapping request/socket to notify
# the right port when the service completes.
# (cf :py:meth:on_service_completion)
# Here, 'value' is the internal request id while
# 'id' is the id used by the socket client.
self._pending_ports[value] = (port, id)
except MorseRPCInvokationError as e:
if port in self._results_to_output:
self._results_to_output[port].append((id, (status.FAILED, e.value)))
else:
self._results_to_output[port] = [(id, (status.FAILED, e.value))]
if self._results_to_output:
for component_name, port in self._yarp_request_ports.items():
if port in self._results_to_output:
for r in self._results_to_output[port]:
response = "%s %s %s" % (r[0], r[1][0], str(r[1][1]) if r[1][1] else "")
# Send the reply through the same yarp port
reply_port = self._yarp_reply_ports[component_name]
bottle_reply = reply_port.prepare()
bottle_reply.clear()
bottle_reply.addString(response)
reply_port.write()
logger.debug("Sent back '" + response + "'. Component: " + component_name + ". Port: " + str(port))
del self._results_to_output[port]
def _parse_request(self, bottle):
"""
Parse the incoming bottle.
"""
try:
id = bottle.get(0).asInt()
component_name = bottle.get(1).toString()
service = bottle.get(2).toString()
except IndexError as e:
raise MorseRPCInvokationError("Malformed request: at least 3 values and at most 4 are expected (id, component_name, service, [params])")
try:
params = bottle.get(3).toString()
import ast
p = ast.literal_eval(params)
except (NameError, SyntaxError) as e:
raise MorseRPCInvokationError("Invalid request syntax: error while parsing the parameters. " + str(e))
return (id, component_name, service, p)
| bsd-3-clause |
hkawasaki/kawasaki-aio8-1 | lms/djangoapps/verify_student/models.py | 46 | 33867 | # -*- coding: utf-8 -*-
"""
Models for Student Identity Verification
This is where we put any models relating to establishing the real-life identity
of a student over a period of time. Right now, the only models are the abstract
`PhotoVerification`, and its one concrete implementation
`SoftwareSecurePhotoVerification`. The hope is to keep as much of the
photo verification process as generic as possible.
"""
from datetime import datetime, timedelta
from email.utils import formatdate
import functools
import json
import logging
import uuid
from boto.s3.connection import S3Connection
from boto.s3.key import Key
import pytz
import requests
from django.conf import settings
from django.core.urlresolvers import reverse
from django.db import models
from django.contrib.auth.models import User
from django.utils.translation import ugettext as _
from model_utils.models import StatusModel
from model_utils import Choices
from verify_student.ssencrypt import (
random_aes_key, encrypt_and_encode,
generate_signed_message, rsa_encrypt
)
from reverification.models import MidcourseReverificationWindow
log = logging.getLogger(__name__)
def generateUUID(): # pylint: disable=C0103
""" Utility function; generates UUIDs """
return str(uuid.uuid4())
class VerificationException(Exception):
pass
def status_before_must_be(*valid_start_statuses):
"""
Helper decorator with arguments to make sure that an object with a `status`
attribute is in one of a list of acceptable status states before a method
is called. You could use it in a class definition like:
@status_before_must_be("submitted", "approved", "denied")
def refund_user(self, user_id):
# Do logic here...
If the object has a status that is not listed when the `refund_user` method
is invoked, it will throw a `VerificationException`. This is just to avoid
distracting boilerplate when looking at a Model that needs to go through a
workflow process.
"""
def decorator_func(func):
"""
Decorator function that gets returned
"""
@functools.wraps(func)
def with_status_check(obj, *args, **kwargs):
if obj.status not in valid_start_statuses:
exception_msg = (
u"Error calling {} {}: status is '{}', must be one of: {}"
).format(func, obj, obj.status, valid_start_statuses)
raise VerificationException(exception_msg)
return func(obj, *args, **kwargs)
return with_status_check
return decorator_func
class PhotoVerification(StatusModel):
"""
Each PhotoVerification represents a Student's attempt to establish
their identity by uploading a photo of themselves and a picture ID. An
attempt actually has a number of fields that need to be filled out at
different steps of the approval process. While it's useful as a Django Model
for the querying facilities, **you should only edit a `PhotoVerification`
object through the methods provided**. Initialize them with a user:
attempt = PhotoVerification(user=user)
We track this attempt through various states:
`created`
Initial creation and state we're in after uploading the images.
`ready`
The user has uploaded their images and checked that they can read the
images. There's a separate state here because it may be the case that we
don't actually submit this attempt for review until payment is made.
`submitted`
Submitted for review. The review may be done by a staff member or an
external service. The user cannot make changes once in this state.
`must_retry`
We submitted this, but there was an error on submission (i.e. we did not
get a 200 when we POSTed to Software Secure)
`approved`
An admin or an external service has confirmed that the user's photo and
photo ID match up, and that the photo ID's name matches the user's.
`denied`
The request has been denied. See `error_msg` for details on why. An
admin might later override this and change to `approved`, but the
student cannot re-open this attempt -- they have to create another
attempt and submit it instead.
Because this Model inherits from StatusModel, we can also do things like::
attempt.status == PhotoVerification.STATUS.created
attempt.status == "created"
pending_requests = PhotoVerification.submitted.all()
"""
######################## Fields Set During Creation ########################
# See class docstring for description of status states
STATUS = Choices('created', 'ready', 'submitted', 'must_retry', 'approved', 'denied')
user = models.ForeignKey(User, db_index=True)
# They can change their name later on, so we want to copy the value here so
# we always preserve what it was at the time they requested. We only copy
# this value during the mark_ready() step. Prior to that, you should be
# displaying the user's name from their user.profile.name.
name = models.CharField(blank=True, max_length=255)
# Where we place the uploaded image files (e.g. S3 URLs)
face_image_url = models.URLField(blank=True, max_length=255)
photo_id_image_url = models.URLField(blank=True, max_length=255)
# Randomly generated UUID so that external services can post back the
# results of checking a user's photo submission without use exposing actual
# user IDs or something too easily guessable.
receipt_id = models.CharField(
db_index=True,
default=lambda: generateUUID(),
max_length=255,
)
created_at = models.DateTimeField(auto_now_add=True, db_index=True)
updated_at = models.DateTimeField(auto_now=True, db_index=True)
# Indicates whether or not a user wants to see the verification status
# displayed on their dash. Right now, only relevant for allowing students
# to "dismiss" a failed midcourse reverification message
display = models.BooleanField(db_index=True, default=True)
######################## Fields Set When Submitting ########################
submitted_at = models.DateTimeField(null=True, db_index=True)
#################### Fields Set During Approval/Denial #####################
# If the review was done by an internal staff member, mark who it was.
reviewing_user = models.ForeignKey(
User,
db_index=True,
default=None,
null=True,
related_name="photo_verifications_reviewed"
)
# Mark the name of the service used to evaluate this attempt (e.g
# Software Secure).
reviewing_service = models.CharField(blank=True, max_length=255)
# If status is "denied", this should contain text explaining why.
error_msg = models.TextField(blank=True)
# Non-required field. External services can add any arbitrary codes as time
# goes on. We don't try to define an exhuastive list -- this is just
# capturing it so that we can later query for the common problems.
error_code = models.CharField(blank=True, max_length=50)
class Meta:
abstract = True
ordering = ['-created_at']
##### Methods listed in the order you'd typically call them
@classmethod
def _earliest_allowed_date(cls):
"""
Returns the earliest allowed date given the settings
"""
DAYS_GOOD_FOR = settings.VERIFY_STUDENT["DAYS_GOOD_FOR"]
allowed_date = (
datetime.now(pytz.UTC) - timedelta(days=DAYS_GOOD_FOR)
)
return allowed_date
@classmethod
def user_is_verified(cls, user, earliest_allowed_date=None, window=None):
"""
Return whether or not a user has satisfactorily proved their identity.
Depending on the policy, this can expire after some period of time, so
a user might have to renew periodically.
If window=None, then this will check for the user's *initial* verification.
If window is set to anything else, it will check for the reverification
associated with that window.
"""
return cls.objects.filter(
user=user,
status="approved",
created_at__gte=(earliest_allowed_date
or cls._earliest_allowed_date()),
window=window
).exists()
@classmethod
def user_has_valid_or_pending(cls, user, earliest_allowed_date=None, window=None):
"""
Return whether the user has a complete verification attempt that is or
*might* be good. This means that it's approved, been submitted, or would
have been submitted but had an non-user error when it was being
submitted. It's basically any situation in which the user has signed off
on the contents of the attempt, and we have not yet received a denial.
If window=None, this will check for the user's *initial* verification. If
window is anything else, this will check for the reverification associated
with that window.
"""
valid_statuses = ['submitted', 'approved']
if not window:
valid_statuses.append('must_retry')
return cls.objects.filter(
user=user,
status__in=valid_statuses,
created_at__gte=(earliest_allowed_date
or cls._earliest_allowed_date()),
window=window,
).exists()
@classmethod
def active_for_user(cls, user, window=None):
"""
Return the most recent PhotoVerification that is marked ready (i.e. the
user has said they're set, but we haven't submitted anything yet).
If window=None, this checks for the original verification. If window is set to
anything else, this will check for the reverification associated with that window.
"""
# This should only be one at the most, but just in case we create more
# by mistake, we'll grab the most recently created one.
active_attempts = cls.objects.filter(user=user, status='ready', window=window).order_by('-created_at')
if active_attempts:
return active_attempts[0]
else:
return None
@classmethod
def user_status(cls, user, window=None):
"""
Returns the status of the user based on their past verification attempts
If no such verification exists, returns 'none'
If verification has expired, returns 'expired'
If the verification has been approved, returns 'approved'
If the verification process is still ongoing, returns 'pending'
If the verification has been denied and the user must resubmit photos, returns 'must_reverify'
If window=None, this checks initial verifications
If window is set, this checks for the reverification associated with that window
"""
status = 'none'
error_msg = ''
if cls.user_is_verified(user, window=window):
status = 'approved'
elif cls.user_has_valid_or_pending(user, window=window):
# user_has_valid_or_pending does include 'approved', but if we are
# here, we know that the attempt is still pending
status = 'pending'
else:
# we need to check the most recent attempt to see if we need to ask them to do
# a retry
try:
attempts = cls.objects.filter(user=user, window=window).order_by('-updated_at')
attempt = attempts[0]
except IndexError:
# If no verification exists for a *midcourse* reverification, then that just
# means the student still needs to reverify. For *original* verifications,
# we return 'none'
if(window):
return('must_reverify', error_msg)
else:
return ('none', error_msg)
if attempt.created_at < cls._earliest_allowed_date():
return ('expired', error_msg)
# If someone is denied their original verification attempt, they can try to reverify.
# However, if a midcourse reverification is denied, that denial is permanent.
if attempt.status == 'denied':
if window is None:
status = 'must_reverify'
else:
status = 'denied'
if attempt.error_msg:
error_msg = attempt.parsed_error_msg()
return (status, error_msg)
def parsed_error_msg(self):
"""
Sometimes, the error message we've received needs to be parsed into
something more human readable
The default behavior is to return the current error message as is.
"""
return self.error_msg
@status_before_must_be("created")
def upload_face_image(self, img):
raise NotImplementedError
@status_before_must_be("created")
def upload_photo_id_image(self, img):
raise NotImplementedError
@status_before_must_be("created")
def mark_ready(self):
"""
Mark that the user data in this attempt is correct. In order to
succeed, the user must have uploaded the necessary images
(`face_image_url`, `photo_id_image_url`). This method will also copy
their name from their user profile. Prior to marking it ready, we read
this value directly from their profile, since they're free to change it.
This often happens because people put in less formal versions of their
name on signup, but realize they want something different to go on a
formal document.
Valid attempt statuses when calling this method:
`created`
Status after method completes: `ready`
Other fields that will be set by this method:
`name`
State Transitions:
`created` → `ready`
This is what happens when the user confirms to us that the pictures
they uploaded are good. Note that we don't actually do a submission
anywhere yet.
"""
# At any point prior to this, they can change their names via their
# student dashboard. But at this point, we lock the value into the
# attempt.
self.name = self.user.profile.name
self.status = "ready"
self.save()
@status_before_must_be("must_retry", "submitted", "approved", "denied")
def approve(self, user_id=None, service=""):
"""
Approve this attempt. `user_id`
Valid attempt statuses when calling this method:
`submitted`, `approved`, `denied`
Status after method completes: `approved`
Other fields that will be set by this method:
`reviewed_by_user_id`, `reviewed_by_service`, `error_msg`
State Transitions:
`submitted` → `approved`
This is the usual flow, whether initiated by a staff user or an
external validation service.
`approved` → `approved`
No-op. First one to approve it wins.
`denied` → `approved`
This might happen if a staff member wants to override a decision
made by an external service or another staff member (say, in
response to a support request). In this case, the previous values
of `reviewed_by_user_id` and `reviewed_by_service` will be changed
to whoever is doing the approving, and `error_msg` will be reset.
The only record that this record was ever denied would be in our
logs. This should be a relatively rare occurence.
"""
# If someone approves an outdated version of this, the first one wins
if self.status == "approved":
return
self.error_msg = "" # reset, in case this attempt was denied before
self.error_code = "" # reset, in case this attempt was denied before
self.reviewing_user = user_id
self.reviewing_service = service
self.status = "approved"
self.save()
@status_before_must_be("must_retry", "submitted", "approved", "denied")
def deny(self,
error_msg,
error_code="",
reviewing_user=None,
reviewing_service=""):
"""
Deny this attempt.
Valid attempt statuses when calling this method:
`submitted`, `approved`, `denied`
Status after method completes: `denied`
Other fields that will be set by this method:
`reviewed_by_user_id`, `reviewed_by_service`, `error_msg`,
`error_code`
State Transitions:
`submitted` → `denied`
This is the usual flow, whether initiated by a staff user or an
external validation service.
`approved` → `denied`
This might happen if a staff member wants to override a decision
made by an external service or another staff member, or just correct
a mistake made during the approval process. In this case, the
previous values of `reviewed_by_user_id` and `reviewed_by_service`
will be changed to whoever is doing the denying. The only record
that this record was ever approved would be in our logs. This should
be a relatively rare occurence.
`denied` → `denied`
Update the error message and reviewing_user/reviewing_service. Just
lets you amend the error message in case there were additional
details to be made.
"""
self.error_msg = error_msg
self.error_code = error_code
self.reviewing_user = reviewing_user
self.reviewing_service = reviewing_service
self.status = "denied"
self.save()
@status_before_must_be("must_retry", "submitted", "approved", "denied")
def system_error(self,
error_msg,
error_code="",
reviewing_user=None,
reviewing_service=""):
"""
Mark that this attempt could not be completed because of a system error.
Status should be moved to `must_retry`. For example, if Software Secure
reported to us that they couldn't process our submission because they
couldn't decrypt the image we sent.
"""
if self.status in ["approved", "denied"]:
return # If we were already approved or denied, just leave it.
self.error_msg = error_msg
self.error_code = error_code
self.reviewing_user = reviewing_user
self.reviewing_service = reviewing_service
self.status = "must_retry"
self.save()
@classmethod
def display_off(cls, user_id):
"""
Find all failed PhotoVerifications for a user, and sets those verifications' `display`
property to false, so the notification banner can be switched off.
"""
user = User.objects.get(id=user_id)
cls.objects.filter(user=user, status="denied").exclude(window=None).update(display=False)
@classmethod
def display_status(cls, user, window):
"""
Finds the `display` property for the PhotoVerification associated with
(user, window). Default is True
"""
attempts = cls.objects.filter(user=user, window=window).order_by('-updated_at')
try:
attempt = attempts[0]
return attempt.display
except IndexError:
return True
class SoftwareSecurePhotoVerification(PhotoVerification):
"""
Model to verify identity using a service provided by Software Secure. Much
of the logic is inherited from `PhotoVerification`, but this class
encrypts the photos.
Software Secure (http://www.softwaresecure.com/) is a remote proctoring
service that also does identity verification. A student uses their webcam
to upload two images: one of their face, one of a photo ID. Due to the
sensitive nature of the data, the following security precautions are taken:
1. The snapshot of their face is encrypted using AES-256 in CBC mode. All
face photos are encypted with the same key, and this key is known to
both Software Secure and edx-platform.
2. The snapshot of a user's photo ID is also encrypted using AES-256, but
the key is randomly generated using pycrypto's Random. Every verification
attempt has a new key. The AES key is then encrypted using a public key
provided by Software Secure. We store only the RSA-encryped AES key.
Since edx-platform does not have Software Secure's private RSA key, it
means that we can no longer even read photo ID.
3. The encrypted photos are base64 encoded and stored in an S3 bucket that
edx-platform does not have read access to.
Note: this model handles both *inital* verifications (which you must perform
at the time you register for a verified cert), and *midcourse reverifications*.
To distinguish between the two, check the value of the property window:
intial verifications of a window of None, whereas midcourse reverifications
* must always be linked to a specific window*.
"""
# This is a base64.urlsafe_encode(rsa_encrypt(photo_id_aes_key), ss_pub_key)
# So first we generate a random AES-256 key to encrypt our photo ID with.
# Then we RSA encrypt it with Software Secure's public key. Then we base64
# encode that. The result is saved here. Actual expected length is 344.
photo_id_key = models.TextField(max_length=1024)
IMAGE_LINK_DURATION = 5 * 60 * 60 * 24 # 5 days in seconds
window = models.ForeignKey(MidcourseReverificationWindow, db_index=True, null=True)
@classmethod
def user_is_reverified_for_all(cls, course_id, user):
"""
Checks to see if the student has successfully reverified for all of the
mandatory re-verification windows associated with a course.
This is used primarily by the certificate generation code... if the user is
not re-verified for all windows, then they cannot receive a certificate.
"""
all_windows = MidcourseReverificationWindow.objects.filter(course_id=course_id)
# if there are no windows for a course, then return True right off
if (not all_windows.exists()):
return True
for window in all_windows:
try:
# The status of the most recent reverification for each window must be "approved"
# for a student to count as completely reverified
attempts = cls.objects.filter(user=user, window=window).order_by('-updated_at')
attempt = attempts[0]
if attempt.status != "approved":
return False
except Exception: # pylint: disable=W0703
return False
return True
@classmethod
def original_verification(cls, user):
"""
Returns the most current SoftwareSecurePhotoVerification object associated with the user.
"""
query = cls.objects.filter(user=user, window=None).order_by('-updated_at')
return query[0]
@status_before_must_be("created")
def upload_face_image(self, img_data):
"""
Upload an image of the user's face to S3. `img_data` should be a raw
bytestream of a PNG image. This method will take the data, encrypt it
using our FACE_IMAGE_AES_KEY, encode it with base64 and save it to S3.
Yes, encoding it to base64 adds compute and disk usage without much real
benefit, but that's what the other end of this API is expecting to get.
"""
# Skip this whole thing if we're running acceptance tests or if we're
# developing and aren't interested in working on student identity
# verification functionality. If you do want to work on it, you have to
# explicitly enable these in your private settings.
if settings.FEATURES.get('AUTOMATIC_VERIFY_STUDENT_IDENTITY_FOR_TESTING'):
return
aes_key_str = settings.VERIFY_STUDENT["SOFTWARE_SECURE"]["FACE_IMAGE_AES_KEY"]
aes_key = aes_key_str.decode("hex")
s3_key = self._generate_s3_key("face")
s3_key.set_contents_from_string(encrypt_and_encode(img_data, aes_key))
@status_before_must_be("created")
def fetch_photo_id_image(self):
"""
Find the user's photo ID image, which was submitted with their original verification.
The image has already been encrypted and stored in s3, so we just need to find that
location
"""
if settings.FEATURES.get('AUTOMATIC_VERIFY_STUDENT_IDENTITY_FOR_TESTING'):
return
self.photo_id_key = self.original_verification(self.user).photo_id_key
self.save()
@status_before_must_be("created")
def upload_photo_id_image(self, img_data):
"""
Upload an the user's photo ID image to S3. `img_data` should be a raw
bytestream of a PNG image. This method will take the data, encrypt it
using a randomly generated AES key, encode it with base64 and save it to
S3. The random key is also encrypted using Software Secure's public RSA
key and stored in our `photo_id_key` field.
Yes, encoding it to base64 adds compute and disk usage without much real
benefit, but that's what the other end of this API is expecting to get.
"""
# Skip this whole thing if we're running acceptance tests or if we're
# developing and aren't interested in working on student identity
# verification functionality. If you do want to work on it, you have to
# explicitly enable these in your private settings.
if settings.FEATURES.get('AUTOMATIC_VERIFY_STUDENT_IDENTITY_FOR_TESTING'):
return
aes_key = random_aes_key()
rsa_key_str = settings.VERIFY_STUDENT["SOFTWARE_SECURE"]["RSA_PUBLIC_KEY"]
rsa_encrypted_aes_key = rsa_encrypt(aes_key, rsa_key_str)
# Upload this to S3
s3_key = self._generate_s3_key("photo_id")
s3_key.set_contents_from_string(encrypt_and_encode(img_data, aes_key))
# Update our record fields
self.photo_id_key = rsa_encrypted_aes_key.encode('base64')
self.save()
@status_before_must_be("must_retry", "ready", "submitted")
def submit(self):
"""
Submit our verification attempt to Software Secure for validation. This
will set our status to "submitted" if the post is successful, and
"must_retry" if the post fails.
"""
try:
response = self.send_request()
if response.ok:
self.submitted_at = datetime.now(pytz.UTC)
self.status = "submitted"
self.save()
else:
self.status = "must_retry"
self.error_msg = response.text
self.save()
except Exception as error:
log.exception(error)
self.status = "must_retry"
self.save()
def parsed_error_msg(self):
"""
Parse the error messages we receive from SoftwareSecure
Error messages are written in the form:
`[{"photoIdReasons": ["Not provided"]}]`
Returns a list of error messages
"""
# Translates the category names and messages into something more human readable
message_dict = {
("photoIdReasons", "Not provided"): _("No photo ID was provided."),
("photoIdReasons", "Text not clear"): _("We couldn't read your name from your photo ID image."),
("generalReasons", "Name mismatch"): _("The name associated with your account and the name on your ID do not match."),
("userPhotoReasons", "Image not clear"): _("The image of your face was not clear."),
("userPhotoReasons", "Face out of view"): _("Your face was not visible in your self-photo"),
}
try:
msg_json = json.loads(self.error_msg)
msg_dict = msg_json[0]
msg = []
for category in msg_dict:
# find the messages associated with this category
category_msgs = msg_dict[category]
for category_msg in category_msgs:
msg.append(message_dict[(category, category_msg)])
return u", ".join(msg)
except (ValueError, KeyError):
# if we can't parse the message as JSON or the category doesn't
# match one of our known categories, show a generic error
log.error('PhotoVerification: Error parsing this error message: %s', self.error_msg)
return _("There was an error verifying your ID photos.")
def image_url(self, name):
"""
We dynamically generate this, since we want it the expiration clock to
start when the message is created, not when the record is created.
"""
s3_key = self._generate_s3_key(name)
return s3_key.generate_url(self.IMAGE_LINK_DURATION)
def _generate_s3_key(self, prefix):
"""
Generates a key for an s3 bucket location
Example: face/4dd1add9-6719-42f7-bea0-115c008c4fca
"""
conn = S3Connection(
settings.VERIFY_STUDENT["SOFTWARE_SECURE"]["AWS_ACCESS_KEY"],
settings.VERIFY_STUDENT["SOFTWARE_SECURE"]["AWS_SECRET_KEY"]
)
bucket = conn.get_bucket(settings.VERIFY_STUDENT["SOFTWARE_SECURE"]["S3_BUCKET"])
key = Key(bucket)
key.key = "{}/{}".format(prefix, self.receipt_id)
return key
def _encrypted_user_photo_key_str(self):
"""
Software Secure needs to have both UserPhoto and PhotoID decrypted in
the same manner. So even though this is going to be the same for every
request, we're also using RSA encryption to encrypt the AES key for
faces.
"""
face_aes_key_str = settings.VERIFY_STUDENT["SOFTWARE_SECURE"]["FACE_IMAGE_AES_KEY"]
face_aes_key = face_aes_key_str.decode("hex")
rsa_key_str = settings.VERIFY_STUDENT["SOFTWARE_SECURE"]["RSA_PUBLIC_KEY"]
rsa_encrypted_face_aes_key = rsa_encrypt(face_aes_key, rsa_key_str)
return rsa_encrypted_face_aes_key.encode("base64")
def create_request(self):
"""return headers, body_dict"""
access_key = settings.VERIFY_STUDENT["SOFTWARE_SECURE"]["API_ACCESS_KEY"]
secret_key = settings.VERIFY_STUDENT["SOFTWARE_SECURE"]["API_SECRET_KEY"]
scheme = "https" if settings.HTTPS == "on" else "http"
callback_url = "{}://{}{}".format(
scheme, settings.SITE_NAME, reverse('verify_student_results_callback')
)
body = {
"EdX-ID": str(self.receipt_id),
"ExpectedName": self.name,
"PhotoID": self.image_url("photo_id"),
"PhotoIDKey": self.photo_id_key,
"SendResponseTo": callback_url,
"UserPhoto": self.image_url("face"),
"UserPhotoKey": self._encrypted_user_photo_key_str(),
}
headers = {
"Content-Type": "application/json",
"Date": formatdate(timeval=None, localtime=False, usegmt=True)
}
_message, _sig, authorization = generate_signed_message(
"POST", headers, body, access_key, secret_key
)
headers['Authorization'] = authorization
return headers, body
def request_message_txt(self):
"""
This is the body of the request we send across. This is never actually
used in the code, but exists for debugging purposes -- you can call
`print attempt.request_message_txt()` on the console and get a readable
rendering of the request that would be sent across, without actually
sending anything.
"""
headers, body = self.create_request()
header_txt = "\n".join(
"{}: {}".format(h, v) for h, v in sorted(headers.items())
)
body_txt = json.dumps(body, indent=2, sort_keys=True, ensure_ascii=False).encode('utf-8')
return header_txt + "\n\n" + body_txt
def send_request(self):
"""
Assembles a submission to Software Secure and sends it via HTTPS.
Returns a request.Response() object with the reply we get from SS.
"""
# If AUTOMATIC_VERIFY_STUDENT_IDENTITY_FOR_TESTING is True, we want to
# skip posting anything to Software Secure. We actually don't even
# create the message because that would require encryption and message
# signing that rely on settings.VERIFY_STUDENT values that aren't set
# in dev. So we just pretend like we successfully posted
if settings.FEATURES.get('AUTOMATIC_VERIFY_STUDENT_IDENTITY_FOR_TESTING'):
fake_response = requests.Response()
fake_response.status_code = 200
return fake_response
headers, body = self.create_request()
response = requests.post(
settings.VERIFY_STUDENT["SOFTWARE_SECURE"]["API_URL"],
headers=headers,
data=json.dumps(body, indent=2, sort_keys=True, ensure_ascii=False).encode('utf-8'),
verify=False
)
log.debug("Sent request to Software Secure for {}".format(self.receipt_id))
log.debug("Headers:\n{}\n\n".format(headers))
log.debug("Body:\n{}\n\n".format(body))
log.debug("Return code: {}".format(response.status_code))
log.debug("Return message:\n\n{}\n\n".format(response.text))
return response
| agpl-3.0 |
RedhawkSDR/integration-gnuhawk | components/pll_carriertracking_cc/tests/test_pll_carriertracking_cc.py | 1 | 4087 | #!/usr/bin/env python
#
# This file is protected by Copyright. Please refer to the COPYRIGHT file
# distributed with this source distribution.
#
# This file is part of GNUHAWK.
#
# GNUHAWK is free software: you can redistribute it and/or modify is under the
# terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# GNUHAWK is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see http://www.gnu.org/licenses/.
#
import unittest
import ossie.utils.testing
import os
from omniORB import any
class ComponentTests(ossie.utils.testing.ScaComponentTestCase):
"""Test for all component implementations in pll_carriertracking_cc"""
def testScaBasicBehavior(self):
#######################################################################
# Launch the component with the default execparams
execparams = self.getPropertySet(kinds=("execparam",), modes=("readwrite", "writeonly"), includeNil=False)
execparams = dict([(x.id, any.from_any(x.value)) for x in execparams])
self.launch(execparams)
#######################################################################
# Verify the basic state of the component
self.assertNotEqual(self.comp, None)
self.assertEqual(self.comp.ref._non_existent(), False)
self.assertEqual(self.comp.ref._is_a("IDL:CF/Resource:1.0"), True)
#######################################################################
# Validate that query returns all expected parameters
# Query of '[]' should return the following set of properties
expectedProps = []
expectedProps.extend(self.getPropertySet(kinds=("configure", "execparam"), modes=("readwrite", "readonly"), includeNil=True))
expectedProps.extend(self.getPropertySet(kinds=("allocate",), action="external", includeNil=True))
props = self.comp.query([])
props = dict((x.id, any.from_any(x.value)) for x in props)
# Query may return more than expected, but not less
for expectedProp in expectedProps:
self.assertEquals(props.has_key(expectedProp.id), True)
#######################################################################
# Verify that all expected ports are available
for port in self.scd.get_componentfeatures().get_ports().get_uses():
port_obj = self.comp.getPort(str(port.get_usesname()))
self.assertNotEqual(port_obj, None)
self.assertEqual(port_obj._non_existent(), False)
self.assertEqual(port_obj._is_a("IDL:CF/Port:1.0"), True)
for port in self.scd.get_componentfeatures().get_ports().get_provides():
port_obj = self.comp.getPort(str(port.get_providesname()))
self.assertNotEqual(port_obj, None)
self.assertEqual(port_obj._non_existent(), False)
self.assertEqual(port_obj._is_a(port.get_repid()), True)
#######################################################################
# Make sure start and stop can be called without throwing exceptions
self.comp.start()
self.comp.stop()
#######################################################################
# Simulate regular component shutdown
self.comp.releaseObject()
# TODO Add additional tests here
#
# See:
# ossie.utils.bulkio.bulkio_helpers,
# ossie.utils.bluefile.bluefile_helpers
# for modules that will assist with testing components with BULKIO ports
if __name__ == "__main__":
ossie.utils.testing.main("../pll_carriertracking_cc.spd.xml") # By default tests all implementations
| gpl-3.0 |
zulip/django | django/utils/translation/trans_real.py | 59 | 27824 | """Translation helper functions."""
from __future__ import unicode_literals
import gettext as gettext_module
import os
import re
import sys
import warnings
from collections import OrderedDict
from threading import local
from django.apps import apps
from django.conf import settings
from django.conf.locale import LANG_INFO
from django.core.exceptions import AppRegistryNotReady
from django.core.signals import setting_changed
from django.dispatch import receiver
from django.utils import lru_cache, six
from django.utils._os import upath
from django.utils.encoding import force_text
from django.utils.safestring import SafeData, mark_safe
from django.utils.six import StringIO
from django.utils.translation import (
LANGUAGE_SESSION_KEY, TranslatorCommentWarning, trim_whitespace,
)
# Translations are cached in a dictionary for every language.
# The active translations are stored by threadid to make them thread local.
_translations = {}
_active = local()
# The default translation is based on the settings file.
_default = None
# magic gettext number to separate context from message
CONTEXT_SEPARATOR = "\x04"
# Format of Accept-Language header values. From RFC 2616, section 14.4 and 3.9
# and RFC 3066, section 2.1
accept_language_re = re.compile(r'''
([A-Za-z]{1,8}(?:-[A-Za-z0-9]{1,8})*|\*) # "en", "en-au", "x-y-z", "es-419", "*"
(?:\s*;\s*q=(0(?:\.\d{,3})?|1(?:.0{,3})?))? # Optional "q=1.00", "q=0.8"
(?:\s*,\s*|$) # Multiple accepts per header.
''', re.VERBOSE)
language_code_re = re.compile(
r'^[a-z]{1,8}(?:-[a-z0-9]{1,8})*(?:@[a-z0-9]{1,20})?$',
re.IGNORECASE
)
language_code_prefix_re = re.compile(r'^/([\w@-]+)(/|$)')
@receiver(setting_changed)
def reset_cache(**kwargs):
"""
Reset global state when LANGUAGES setting has been changed, as some
languages should no longer be accepted.
"""
if kwargs['setting'] in ('LANGUAGES', 'LANGUAGE_CODE'):
check_for_language.cache_clear()
get_languages.cache_clear()
get_supported_language_variant.cache_clear()
def to_locale(language, to_lower=False):
"""
Turns a language name (en-us) into a locale name (en_US). If 'to_lower' is
True, the last component is lower-cased (en_us).
"""
p = language.find('-')
if p >= 0:
if to_lower:
return language[:p].lower() + '_' + language[p + 1:].lower()
else:
# Get correct locale for sr-latn
if len(language[p + 1:]) > 2:
return language[:p].lower() + '_' + language[p + 1].upper() + language[p + 2:].lower()
return language[:p].lower() + '_' + language[p + 1:].upper()
else:
return language.lower()
def to_language(locale):
"""Turns a locale name (en_US) into a language name (en-us)."""
p = locale.find('_')
if p >= 0:
return locale[:p].lower() + '-' + locale[p + 1:].lower()
else:
return locale.lower()
class DjangoTranslation(gettext_module.GNUTranslations):
"""
This class sets up the GNUTranslations context with regard to output
charset.
This translation object will be constructed out of multiple GNUTranslations
objects by merging their catalogs. It will construct an object for the
requested language and add a fallback to the default language, if it's
different from the requested language.
"""
def __init__(self, language):
"""Create a GNUTranslations() using many locale directories"""
gettext_module.GNUTranslations.__init__(self)
self.__language = language
self.__to_language = to_language(language)
self.__locale = to_locale(language)
self._init_translation_catalog()
self._add_installed_apps_translations()
self._add_local_translations()
self._add_fallback()
def __repr__(self):
return "<DjangoTranslation lang:%s>" % self.__language
def _new_gnu_trans(self, localedir, use_null_fallback=True):
"""
Returns a mergeable gettext.GNUTranslations instance.
A convenience wrapper. By default gettext uses 'fallback=False'.
Using param `use_null_fallback` to avoid confusion with any other
references to 'fallback'.
"""
translation = gettext_module.translation(
domain='django',
localedir=localedir,
languages=[self.__locale],
codeset='utf-8',
fallback=use_null_fallback)
if not hasattr(translation, '_catalog'):
# provides merge support for NullTranslations()
translation._catalog = {}
translation._info = {}
translation.plural = lambda n: int(n != 1)
return translation
def _init_translation_catalog(self):
"""Creates a base catalog using global django translations."""
settingsfile = upath(sys.modules[settings.__module__].__file__)
localedir = os.path.join(os.path.dirname(settingsfile), 'locale')
use_null_fallback = True
if self.__language == settings.LANGUAGE_CODE:
# default lang should be present and parseable, if not
# gettext will raise an IOError (refs #18192).
use_null_fallback = False
translation = self._new_gnu_trans(localedir, use_null_fallback)
self.plural = translation.plural
self._info = translation._info.copy()
self._catalog = translation._catalog.copy()
def _add_installed_apps_translations(self):
"""Merges translations from each installed app."""
try:
app_configs = reversed(list(apps.get_app_configs()))
except AppRegistryNotReady:
raise AppRegistryNotReady(
"The translation infrastructure cannot be initialized before the "
"apps registry is ready. Check that you don't make non-lazy "
"gettext calls at import time.")
for app_config in app_configs:
localedir = os.path.join(app_config.path, 'locale')
translation = self._new_gnu_trans(localedir)
self.merge(translation)
def _add_local_translations(self):
"""Merges translations defined in LOCALE_PATHS."""
for localedir in reversed(settings.LOCALE_PATHS):
translation = self._new_gnu_trans(localedir)
self.merge(translation)
def _add_fallback(self):
"""Sets the GNUTranslations() fallback with the default language."""
# Don't set a fallback for the default language or any English variant
# (as it's empty, so it'll ALWAYS fall back to the default language)
if self.__language == settings.LANGUAGE_CODE or self.__language.startswith('en'):
return
default_translation = translation(settings.LANGUAGE_CODE)
self.add_fallback(default_translation)
def merge(self, other):
"""Merge another translation into this catalog."""
self._catalog.update(other._catalog)
def language(self):
"""Returns the translation language."""
return self.__language
def to_language(self):
"""Returns the translation language name."""
return self.__to_language
def translation(language):
"""
Returns a translation object.
"""
global _translations
if language not in _translations:
_translations[language] = DjangoTranslation(language)
return _translations[language]
def activate(language):
"""
Fetches the translation object for a given language and installs it as the
current translation object for the current thread.
"""
if not language:
return
_active.value = translation(language)
def deactivate():
"""
Deinstalls the currently active translation object so that further _ calls
will resolve against the default translation object, again.
"""
if hasattr(_active, "value"):
del _active.value
def deactivate_all():
"""
Makes the active translation object a NullTranslations() instance. This is
useful when we want delayed translations to appear as the original string
for some reason.
"""
_active.value = gettext_module.NullTranslations()
_active.value.to_language = lambda *args: None
def get_language():
"""Returns the currently selected language."""
t = getattr(_active, "value", None)
if t is not None:
try:
return t.to_language()
except AttributeError:
pass
# If we don't have a real translation object, assume it's the default language.
return settings.LANGUAGE_CODE
def get_language_bidi():
"""
Returns selected language's BiDi layout.
* False = left-to-right layout
* True = right-to-left layout
"""
lang = get_language()
if lang is None:
return False
else:
base_lang = get_language().split('-')[0]
return base_lang in settings.LANGUAGES_BIDI
def catalog():
"""
Returns the current active catalog for further processing.
This can be used if you need to modify the catalog or want to access the
whole message catalog instead of just translating one string.
"""
global _default
t = getattr(_active, "value", None)
if t is not None:
return t
if _default is None:
_default = translation(settings.LANGUAGE_CODE)
return _default
def do_translate(message, translation_function):
"""
Translates 'message' using the given 'translation_function' name -- which
will be either gettext or ugettext. It uses the current thread to find the
translation object to use. If no current translation is activated, the
message will be run through the default translation object.
"""
global _default
# str() is allowing a bytestring message to remain bytestring on Python 2
eol_message = message.replace(str('\r\n'), str('\n')).replace(str('\r'), str('\n'))
if len(eol_message) == 0:
# Returns an empty value of the corresponding type if an empty message
# is given, instead of metadata, which is the default gettext behavior.
result = type(message)("")
else:
_default = _default or translation(settings.LANGUAGE_CODE)
translation_object = getattr(_active, "value", _default)
result = getattr(translation_object, translation_function)(eol_message)
if isinstance(message, SafeData):
return mark_safe(result)
return result
def gettext(message):
"""
Returns a string of the translation of the message.
Returns a string on Python 3 and an UTF-8-encoded bytestring on Python 2.
"""
return do_translate(message, 'gettext')
if six.PY3:
ugettext = gettext
else:
def ugettext(message):
return do_translate(message, 'ugettext')
def pgettext(context, message):
msg_with_ctxt = "%s%s%s" % (context, CONTEXT_SEPARATOR, message)
result = ugettext(msg_with_ctxt)
if CONTEXT_SEPARATOR in result:
# Translation not found
# force unicode, because lazy version expects unicode
result = force_text(message)
return result
def gettext_noop(message):
"""
Marks strings for translation but doesn't translate them now. This can be
used to store strings in global variables that should stay in the base
language (because they might be used externally) and will be translated
later.
"""
return message
def do_ntranslate(singular, plural, number, translation_function):
global _default
t = getattr(_active, "value", None)
if t is not None:
return getattr(t, translation_function)(singular, plural, number)
if _default is None:
_default = translation(settings.LANGUAGE_CODE)
return getattr(_default, translation_function)(singular, plural, number)
def ngettext(singular, plural, number):
"""
Returns a string of the translation of either the singular or plural,
based on the number.
Returns a string on Python 3 and an UTF-8-encoded bytestring on Python 2.
"""
return do_ntranslate(singular, plural, number, 'ngettext')
if six.PY3:
ungettext = ngettext
else:
def ungettext(singular, plural, number):
"""
Returns a unicode strings of the translation of either the singular or
plural, based on the number.
"""
return do_ntranslate(singular, plural, number, 'ungettext')
def npgettext(context, singular, plural, number):
msgs_with_ctxt = ("%s%s%s" % (context, CONTEXT_SEPARATOR, singular),
"%s%s%s" % (context, CONTEXT_SEPARATOR, plural),
number)
result = ungettext(*msgs_with_ctxt)
if CONTEXT_SEPARATOR in result:
# Translation not found
result = ungettext(singular, plural, number)
return result
def all_locale_paths():
"""
Returns a list of paths to user-provides languages files.
"""
globalpath = os.path.join(
os.path.dirname(upath(sys.modules[settings.__module__].__file__)), 'locale')
return [globalpath] + list(settings.LOCALE_PATHS)
@lru_cache.lru_cache(maxsize=1000)
def check_for_language(lang_code):
"""
Checks whether there is a global language file for the given language
code. This is used to decide whether a user-provided language is
available.
lru_cache should have a maxsize to prevent from memory exhaustion attacks,
as the provided language codes are taken from the HTTP request. See also
<https://www.djangoproject.com/weblog/2007/oct/26/security-fix/>.
"""
# First, a quick check to make sure lang_code is well-formed (#21458)
if lang_code is None or not language_code_re.search(lang_code):
return False
for path in all_locale_paths():
if gettext_module.find('django', path, [to_locale(lang_code)]) is not None:
return True
return False
@lru_cache.lru_cache()
def get_languages():
"""
Cache of settings.LANGUAGES in an OrderedDict for easy lookups by key.
"""
return OrderedDict(settings.LANGUAGES)
@lru_cache.lru_cache(maxsize=1000)
def get_supported_language_variant(lang_code, strict=False):
"""
Returns the language-code that's listed in supported languages, possibly
selecting a more generic variant. Raises LookupError if nothing found.
If `strict` is False (the default), the function will look for an alternative
country-specific variant when the currently checked is not found.
lru_cache should have a maxsize to prevent from memory exhaustion attacks,
as the provided language codes are taken from the HTTP request. See also
<https://www.djangoproject.com/weblog/2007/oct/26/security-fix/>.
"""
if lang_code:
# If 'fr-ca' is not supported, try special fallback or language-only 'fr'.
possible_lang_codes = [lang_code]
try:
possible_lang_codes.extend(LANG_INFO[lang_code]['fallback'])
except KeyError:
pass
generic_lang_code = lang_code.split('-')[0]
possible_lang_codes.append(generic_lang_code)
supported_lang_codes = get_languages()
for code in possible_lang_codes:
if code in supported_lang_codes and check_for_language(code):
return code
if not strict:
# if fr-fr is not supported, try fr-ca.
for supported_code in supported_lang_codes:
if supported_code.startswith(generic_lang_code + '-'):
return supported_code
raise LookupError(lang_code)
def get_language_from_path(path, strict=False):
"""
Returns the language-code if there is a valid language-code
found in the `path`.
If `strict` is False (the default), the function will look for an alternative
country-specific variant when the currently checked is not found.
"""
regex_match = language_code_prefix_re.match(path)
if not regex_match:
return None
lang_code = regex_match.group(1)
try:
return get_supported_language_variant(lang_code, strict=strict)
except LookupError:
return None
def get_language_from_request(request, check_path=False):
"""
Analyzes the request to find what language the user wants the system to
show. Only languages listed in settings.LANGUAGES are taken into account.
If the user requests a sublanguage where we have a main language, we send
out the main language.
If check_path is True, the URL path prefix will be checked for a language
code, otherwise this is skipped for backwards compatibility.
"""
if check_path:
lang_code = get_language_from_path(request.path_info)
if lang_code is not None:
return lang_code
supported_lang_codes = get_languages()
if hasattr(request, 'session'):
lang_code = request.session.get(LANGUAGE_SESSION_KEY)
if lang_code in supported_lang_codes and lang_code is not None and check_for_language(lang_code):
return lang_code
lang_code = request.COOKIES.get(settings.LANGUAGE_COOKIE_NAME)
try:
return get_supported_language_variant(lang_code)
except LookupError:
pass
accept = request.META.get('HTTP_ACCEPT_LANGUAGE', '')
for accept_lang, unused in parse_accept_lang_header(accept):
if accept_lang == '*':
break
if not language_code_re.search(accept_lang):
continue
try:
return get_supported_language_variant(accept_lang)
except LookupError:
continue
try:
return get_supported_language_variant(settings.LANGUAGE_CODE)
except LookupError:
return settings.LANGUAGE_CODE
dot_re = re.compile(r'\S')
def blankout(src, char):
"""
Changes every non-whitespace character to the given char.
Used in the templatize function.
"""
return dot_re.sub(char, src)
context_re = re.compile(r"""^\s+.*context\s+((?:"[^"]*?")|(?:'[^']*?'))\s*""")
inline_re = re.compile(r"""^\s*trans\s+((?:"[^"]*?")|(?:'[^']*?'))(\s+.*context\s+((?:"[^"]*?")|(?:'[^']*?')))?\s*""")
block_re = re.compile(r"""^\s*blocktrans(\s+.*context\s+((?:"[^"]*?")|(?:'[^']*?')))?(?:\s+|$)""")
endblock_re = re.compile(r"""^\s*endblocktrans$""")
plural_re = re.compile(r"""^\s*plural$""")
constant_re = re.compile(r"""_\(((?:".*?")|(?:'.*?'))\)""")
def templatize(src, origin=None):
"""
Turns a Django template into something that is understood by xgettext. It
does so by translating the Django translation tags into standard gettext
function invocations.
"""
from django.template.base import (Lexer, TOKEN_TEXT, TOKEN_VAR,
TOKEN_BLOCK, TOKEN_COMMENT, TRANSLATOR_COMMENT_MARK)
src = force_text(src, settings.FILE_CHARSET)
out = StringIO('')
message_context = None
intrans = False
inplural = False
trimmed = False
singular = []
plural = []
incomment = False
comment = []
lineno_comment_map = {}
comment_lineno_cache = None
def join_tokens(tokens, trim=False):
message = ''.join(tokens)
if trim:
message = trim_whitespace(message)
return message
for t in Lexer(src).tokenize():
if incomment:
if t.token_type == TOKEN_BLOCK and t.contents == 'endcomment':
content = ''.join(comment)
translators_comment_start = None
for lineno, line in enumerate(content.splitlines(True)):
if line.lstrip().startswith(TRANSLATOR_COMMENT_MARK):
translators_comment_start = lineno
for lineno, line in enumerate(content.splitlines(True)):
if translators_comment_start is not None and lineno >= translators_comment_start:
out.write(' # %s' % line)
else:
out.write(' #\n')
incomment = False
comment = []
else:
comment.append(t.contents)
elif intrans:
if t.token_type == TOKEN_BLOCK:
endbmatch = endblock_re.match(t.contents)
pluralmatch = plural_re.match(t.contents)
if endbmatch:
if inplural:
if message_context:
out.write(' npgettext(%r, %r, %r,count) ' % (
message_context,
join_tokens(singular, trimmed),
join_tokens(plural, trimmed)))
else:
out.write(' ngettext(%r, %r, count) ' % (
join_tokens(singular, trimmed),
join_tokens(plural, trimmed)))
for part in singular:
out.write(blankout(part, 'S'))
for part in plural:
out.write(blankout(part, 'P'))
else:
if message_context:
out.write(' pgettext(%r, %r) ' % (
message_context,
join_tokens(singular, trimmed)))
else:
out.write(' gettext(%r) ' % join_tokens(singular,
trimmed))
for part in singular:
out.write(blankout(part, 'S'))
message_context = None
intrans = False
inplural = False
singular = []
plural = []
elif pluralmatch:
inplural = True
else:
filemsg = ''
if origin:
filemsg = 'file %s, ' % origin
raise SyntaxError(
"Translation blocks must not include other block tags: "
"%s (%sline %d)" % (t.contents, filemsg, t.lineno)
)
elif t.token_type == TOKEN_VAR:
if inplural:
plural.append('%%(%s)s' % t.contents)
else:
singular.append('%%(%s)s' % t.contents)
elif t.token_type == TOKEN_TEXT:
contents = t.contents.replace('%', '%%')
if inplural:
plural.append(contents)
else:
singular.append(contents)
else:
# Handle comment tokens (`{# ... #}`) plus other constructs on
# the same line:
if comment_lineno_cache is not None:
cur_lineno = t.lineno + t.contents.count('\n')
if comment_lineno_cache == cur_lineno:
if t.token_type != TOKEN_COMMENT:
for c in lineno_comment_map[comment_lineno_cache]:
filemsg = ''
if origin:
filemsg = 'file %s, ' % origin
warn_msg = ("The translator-targeted comment '%s' "
"(%sline %d) was ignored, because it wasn't the last item "
"on the line.") % (c, filemsg, comment_lineno_cache)
warnings.warn(warn_msg, TranslatorCommentWarning)
lineno_comment_map[comment_lineno_cache] = []
else:
out.write('# %s' % ' | '.join(lineno_comment_map[comment_lineno_cache]))
comment_lineno_cache = None
if t.token_type == TOKEN_BLOCK:
imatch = inline_re.match(t.contents)
bmatch = block_re.match(t.contents)
cmatches = constant_re.findall(t.contents)
if imatch:
g = imatch.group(1)
if g[0] == '"':
g = g.strip('"')
elif g[0] == "'":
g = g.strip("'")
g = g.replace('%', '%%')
if imatch.group(2):
# A context is provided
context_match = context_re.match(imatch.group(2))
message_context = context_match.group(1)
if message_context[0] == '"':
message_context = message_context.strip('"')
elif message_context[0] == "'":
message_context = message_context.strip("'")
out.write(' pgettext(%r, %r) ' % (message_context, g))
message_context = None
else:
out.write(' gettext(%r) ' % g)
elif bmatch:
for fmatch in constant_re.findall(t.contents):
out.write(' _(%s) ' % fmatch)
if bmatch.group(1):
# A context is provided
context_match = context_re.match(bmatch.group(1))
message_context = context_match.group(1)
if message_context[0] == '"':
message_context = message_context.strip('"')
elif message_context[0] == "'":
message_context = message_context.strip("'")
intrans = True
inplural = False
trimmed = 'trimmed' in t.split_contents()
singular = []
plural = []
elif cmatches:
for cmatch in cmatches:
out.write(' _(%s) ' % cmatch)
elif t.contents == 'comment':
incomment = True
else:
out.write(blankout(t.contents, 'B'))
elif t.token_type == TOKEN_VAR:
parts = t.contents.split('|')
cmatch = constant_re.match(parts[0])
if cmatch:
out.write(' _(%s) ' % cmatch.group(1))
for p in parts[1:]:
if p.find(':_(') >= 0:
out.write(' %s ' % p.split(':', 1)[1])
else:
out.write(blankout(p, 'F'))
elif t.token_type == TOKEN_COMMENT:
if t.contents.lstrip().startswith(TRANSLATOR_COMMENT_MARK):
lineno_comment_map.setdefault(t.lineno,
[]).append(t.contents)
comment_lineno_cache = t.lineno
else:
out.write(blankout(t.contents, 'X'))
return out.getvalue()
def parse_accept_lang_header(lang_string):
"""
Parses the lang_string, which is the body of an HTTP Accept-Language
header, and returns a list of (lang, q-value), ordered by 'q' values.
Any format errors in lang_string results in an empty list being returned.
"""
result = []
pieces = accept_language_re.split(lang_string.lower())
if pieces[-1]:
return []
for i in range(0, len(pieces) - 1, 3):
first, lang, priority = pieces[i:i + 3]
if first:
return []
if priority:
try:
priority = float(priority)
except ValueError:
return []
if not priority: # if priority is 0.0 at this point make it 1.0
priority = 1.0
result.append((lang, priority))
result.sort(key=lambda k: k[1], reverse=True)
return result
| bsd-3-clause |
jetskijoe/SickGear | lib/unidecode/x09c.py | 253 | 4659 | data = (
'Huan ', # 0x00
'Quan ', # 0x01
'Ze ', # 0x02
'Wei ', # 0x03
'Wei ', # 0x04
'Yu ', # 0x05
'Qun ', # 0x06
'Rou ', # 0x07
'Die ', # 0x08
'Huang ', # 0x09
'Lian ', # 0x0a
'Yan ', # 0x0b
'Qiu ', # 0x0c
'Qiu ', # 0x0d
'Jian ', # 0x0e
'Bi ', # 0x0f
'E ', # 0x10
'Yang ', # 0x11
'Fu ', # 0x12
'Sai ', # 0x13
'Jian ', # 0x14
'Xia ', # 0x15
'Tuo ', # 0x16
'Hu ', # 0x17
'Muroaji ', # 0x18
'Ruo ', # 0x19
'Haraka ', # 0x1a
'Wen ', # 0x1b
'Jian ', # 0x1c
'Hao ', # 0x1d
'Wu ', # 0x1e
'Fang ', # 0x1f
'Sao ', # 0x20
'Liu ', # 0x21
'Ma ', # 0x22
'Shi ', # 0x23
'Shi ', # 0x24
'Yin ', # 0x25
'Z ', # 0x26
'Teng ', # 0x27
'Ta ', # 0x28
'Yao ', # 0x29
'Ge ', # 0x2a
'Rong ', # 0x2b
'Qian ', # 0x2c
'Qi ', # 0x2d
'Wen ', # 0x2e
'Ruo ', # 0x2f
'Hatahata ', # 0x30
'Lian ', # 0x31
'Ao ', # 0x32
'Le ', # 0x33
'Hui ', # 0x34
'Min ', # 0x35
'Ji ', # 0x36
'Tiao ', # 0x37
'Qu ', # 0x38
'Jian ', # 0x39
'Sao ', # 0x3a
'Man ', # 0x3b
'Xi ', # 0x3c
'Qiu ', # 0x3d
'Biao ', # 0x3e
'Ji ', # 0x3f
'Ji ', # 0x40
'Zhu ', # 0x41
'Jiang ', # 0x42
'Qiu ', # 0x43
'Zhuan ', # 0x44
'Yong ', # 0x45
'Zhang ', # 0x46
'Kang ', # 0x47
'Xue ', # 0x48
'Bie ', # 0x49
'Jue ', # 0x4a
'Qu ', # 0x4b
'Xiang ', # 0x4c
'Bo ', # 0x4d
'Jiao ', # 0x4e
'Xun ', # 0x4f
'Su ', # 0x50
'Huang ', # 0x51
'Zun ', # 0x52
'Shan ', # 0x53
'Shan ', # 0x54
'Fan ', # 0x55
'Jue ', # 0x56
'Lin ', # 0x57
'Xun ', # 0x58
'Miao ', # 0x59
'Xi ', # 0x5a
'Eso ', # 0x5b
'Kyou ', # 0x5c
'Fen ', # 0x5d
'Guan ', # 0x5e
'Hou ', # 0x5f
'Kuai ', # 0x60
'Zei ', # 0x61
'Sao ', # 0x62
'Zhan ', # 0x63
'Gan ', # 0x64
'Gui ', # 0x65
'Sheng ', # 0x66
'Li ', # 0x67
'Chang ', # 0x68
'Hatahata ', # 0x69
'Shiira ', # 0x6a
'Mutsu ', # 0x6b
'Ru ', # 0x6c
'Ji ', # 0x6d
'Xu ', # 0x6e
'Huo ', # 0x6f
'Shiira ', # 0x70
'Li ', # 0x71
'Lie ', # 0x72
'Li ', # 0x73
'Mie ', # 0x74
'Zhen ', # 0x75
'Xiang ', # 0x76
'E ', # 0x77
'Lu ', # 0x78
'Guan ', # 0x79
'Li ', # 0x7a
'Xian ', # 0x7b
'Yu ', # 0x7c
'Dao ', # 0x7d
'Ji ', # 0x7e
'You ', # 0x7f
'Tun ', # 0x80
'Lu ', # 0x81
'Fang ', # 0x82
'Ba ', # 0x83
'He ', # 0x84
'Bo ', # 0x85
'Ping ', # 0x86
'Nian ', # 0x87
'Lu ', # 0x88
'You ', # 0x89
'Zha ', # 0x8a
'Fu ', # 0x8b
'Bo ', # 0x8c
'Bao ', # 0x8d
'Hou ', # 0x8e
'Pi ', # 0x8f
'Tai ', # 0x90
'Gui ', # 0x91
'Jie ', # 0x92
'Kao ', # 0x93
'Wei ', # 0x94
'Er ', # 0x95
'Tong ', # 0x96
'Ze ', # 0x97
'Hou ', # 0x98
'Kuai ', # 0x99
'Ji ', # 0x9a
'Jiao ', # 0x9b
'Xian ', # 0x9c
'Za ', # 0x9d
'Xiang ', # 0x9e
'Xun ', # 0x9f
'Geng ', # 0xa0
'Li ', # 0xa1
'Lian ', # 0xa2
'Jian ', # 0xa3
'Li ', # 0xa4
'Shi ', # 0xa5
'Tiao ', # 0xa6
'Gun ', # 0xa7
'Sha ', # 0xa8
'Wan ', # 0xa9
'Jun ', # 0xaa
'Ji ', # 0xab
'Yong ', # 0xac
'Qing ', # 0xad
'Ling ', # 0xae
'Qi ', # 0xaf
'Zou ', # 0xb0
'Fei ', # 0xb1
'Kun ', # 0xb2
'Chang ', # 0xb3
'Gu ', # 0xb4
'Ni ', # 0xb5
'Nian ', # 0xb6
'Diao ', # 0xb7
'Jing ', # 0xb8
'Shen ', # 0xb9
'Shi ', # 0xba
'Zi ', # 0xbb
'Fen ', # 0xbc
'Die ', # 0xbd
'Bi ', # 0xbe
'Chang ', # 0xbf
'Shi ', # 0xc0
'Wen ', # 0xc1
'Wei ', # 0xc2
'Sai ', # 0xc3
'E ', # 0xc4
'Qiu ', # 0xc5
'Fu ', # 0xc6
'Huang ', # 0xc7
'Quan ', # 0xc8
'Jiang ', # 0xc9
'Bian ', # 0xca
'Sao ', # 0xcb
'Ao ', # 0xcc
'Qi ', # 0xcd
'Ta ', # 0xce
'Yin ', # 0xcf
'Yao ', # 0xd0
'Fang ', # 0xd1
'Jian ', # 0xd2
'Le ', # 0xd3
'Biao ', # 0xd4
'Xue ', # 0xd5
'Bie ', # 0xd6
'Man ', # 0xd7
'Min ', # 0xd8
'Yong ', # 0xd9
'Wei ', # 0xda
'Xi ', # 0xdb
'Jue ', # 0xdc
'Shan ', # 0xdd
'Lin ', # 0xde
'Zun ', # 0xdf
'Huo ', # 0xe0
'Gan ', # 0xe1
'Li ', # 0xe2
'Zhan ', # 0xe3
'Guan ', # 0xe4
'Niao ', # 0xe5
'Yi ', # 0xe6
'Fu ', # 0xe7
'Li ', # 0xe8
'Jiu ', # 0xe9
'Bu ', # 0xea
'Yan ', # 0xeb
'Fu ', # 0xec
'Diao ', # 0xed
'Ji ', # 0xee
'Feng ', # 0xef
'Nio ', # 0xf0
'Gan ', # 0xf1
'Shi ', # 0xf2
'Feng ', # 0xf3
'Ming ', # 0xf4
'Bao ', # 0xf5
'Yuan ', # 0xf6
'Zhi ', # 0xf7
'Hu ', # 0xf8
'Qin ', # 0xf9
'Fu ', # 0xfa
'Fen ', # 0xfb
'Wen ', # 0xfc
'Jian ', # 0xfd
'Shi ', # 0xfe
'Yu ', # 0xff
)
| gpl-3.0 |
dreamsxin/kbengine | kbe/res/scripts/common/Lib/email/generator.py | 83 | 19858 | # Copyright (C) 2001-2010 Python Software Foundation
# Author: Barry Warsaw
# Contact: email-sig@python.org
"""Classes to generate plain text from a message object tree."""
__all__ = ['Generator', 'DecodedGenerator', 'BytesGenerator']
import re
import sys
import time
import random
from copy import deepcopy
from io import StringIO, BytesIO
from email.utils import _has_surrogates
UNDERSCORE = '_'
NL = '\n' # XXX: no longer used by the code below.
fcre = re.compile(r'^From ', re.MULTILINE)
class Generator:
"""Generates output from a Message object tree.
This basic generator writes the message to the given file object as plain
text.
"""
#
# Public interface
#
def __init__(self, outfp, mangle_from_=True, maxheaderlen=None, *,
policy=None):
"""Create the generator for message flattening.
outfp is the output file-like object for writing the message to. It
must have a write() method.
Optional mangle_from_ is a flag that, when True (the default), escapes
From_ lines in the body of the message by putting a `>' in front of
them.
Optional maxheaderlen specifies the longest length for a non-continued
header. When a header line is longer (in characters, with tabs
expanded to 8 spaces) than maxheaderlen, the header will split as
defined in the Header class. Set maxheaderlen to zero to disable
header wrapping. The default is 78, as recommended (but not required)
by RFC 2822.
The policy keyword specifies a policy object that controls a number of
aspects of the generator's operation. If no policy is specified,
the policy associated with the Message object passed to the
flatten method is used.
"""
self._fp = outfp
self._mangle_from_ = mangle_from_
self.maxheaderlen = maxheaderlen
self.policy = policy
def write(self, s):
# Just delegate to the file object
self._fp.write(s)
def flatten(self, msg, unixfrom=False, linesep=None):
r"""Print the message object tree rooted at msg to the output file
specified when the Generator instance was created.
unixfrom is a flag that forces the printing of a Unix From_ delimiter
before the first object in the message tree. If the original message
has no From_ delimiter, a `standard' one is crafted. By default, this
is False to inhibit the printing of any From_ delimiter.
Note that for subobjects, no From_ line is printed.
linesep specifies the characters used to indicate a new line in
the output. The default value is determined by the policy specified
when the Generator instance was created or, if none was specified,
from the policy associated with the msg.
"""
# We use the _XXX constants for operating on data that comes directly
# from the msg, and _encoded_XXX constants for operating on data that
# has already been converted (to bytes in the BytesGenerator) and
# inserted into a temporary buffer.
policy = msg.policy if self.policy is None else self.policy
if linesep is not None:
policy = policy.clone(linesep=linesep)
if self.maxheaderlen is not None:
policy = policy.clone(max_line_length=self.maxheaderlen)
self._NL = policy.linesep
self._encoded_NL = self._encode(self._NL)
self._EMPTY = ''
self._encoded_EMTPY = self._encode('')
# Because we use clone (below) when we recursively process message
# subparts, and because clone uses the computed policy (not None),
# submessages will automatically get set to the computed policy when
# they are processed by this code.
old_gen_policy = self.policy
old_msg_policy = msg.policy
try:
self.policy = policy
msg.policy = policy
if unixfrom:
ufrom = msg.get_unixfrom()
if not ufrom:
ufrom = 'From nobody ' + time.ctime(time.time())
self.write(ufrom + self._NL)
self._write(msg)
finally:
self.policy = old_gen_policy
msg.policy = old_msg_policy
def clone(self, fp):
"""Clone this generator with the exact same options."""
return self.__class__(fp,
self._mangle_from_,
None, # Use policy setting, which we've adjusted
policy=self.policy)
#
# Protected interface - undocumented ;/
#
# Note that we use 'self.write' when what we are writing is coming from
# the source, and self._fp.write when what we are writing is coming from a
# buffer (because the Bytes subclass has already had a chance to transform
# the data in its write method in that case). This is an entirely
# pragmatic split determined by experiment; we could be more general by
# always using write and having the Bytes subclass write method detect when
# it has already transformed the input; but, since this whole thing is a
# hack anyway this seems good enough.
# Similarly, we have _XXX and _encoded_XXX attributes that are used on
# source and buffer data, respectively.
_encoded_EMPTY = ''
def _new_buffer(self):
# BytesGenerator overrides this to return BytesIO.
return StringIO()
def _encode(self, s):
# BytesGenerator overrides this to encode strings to bytes.
return s
def _write_lines(self, lines):
# We have to transform the line endings.
if not lines:
return
lines = lines.splitlines(True)
for line in lines[:-1]:
self.write(line.rstrip('\r\n'))
self.write(self._NL)
laststripped = lines[-1].rstrip('\r\n')
self.write(laststripped)
if len(lines[-1]) != len(laststripped):
self.write(self._NL)
def _write(self, msg):
# We can't write the headers yet because of the following scenario:
# say a multipart message includes the boundary string somewhere in
# its body. We'd have to calculate the new boundary /before/ we write
# the headers so that we can write the correct Content-Type:
# parameter.
#
# The way we do this, so as to make the _handle_*() methods simpler,
# is to cache any subpart writes into a buffer. The we write the
# headers and the buffer contents. That way, subpart handlers can
# Do The Right Thing, and can still modify the Content-Type: header if
# necessary.
oldfp = self._fp
try:
self._munge_cte = None
self._fp = sfp = self._new_buffer()
self._dispatch(msg)
finally:
self._fp = oldfp
munge_cte = self._munge_cte
del self._munge_cte
# If we munged the cte, copy the message again and re-fix the CTE.
if munge_cte:
msg = deepcopy(msg)
msg.replace_header('content-transfer-encoding', munge_cte[0])
msg.replace_header('content-type', munge_cte[1])
# Write the headers. First we see if the message object wants to
# handle that itself. If not, we'll do it generically.
meth = getattr(msg, '_write_headers', None)
if meth is None:
self._write_headers(msg)
else:
meth(self)
self._fp.write(sfp.getvalue())
def _dispatch(self, msg):
# Get the Content-Type: for the message, then try to dispatch to
# self._handle_<maintype>_<subtype>(). If there's no handler for the
# full MIME type, then dispatch to self._handle_<maintype>(). If
# that's missing too, then dispatch to self._writeBody().
main = msg.get_content_maintype()
sub = msg.get_content_subtype()
specific = UNDERSCORE.join((main, sub)).replace('-', '_')
meth = getattr(self, '_handle_' + specific, None)
if meth is None:
generic = main.replace('-', '_')
meth = getattr(self, '_handle_' + generic, None)
if meth is None:
meth = self._writeBody
meth(msg)
#
# Default handlers
#
def _write_headers(self, msg):
for h, v in msg.raw_items():
self.write(self.policy.fold(h, v))
# A blank line always separates headers from body
self.write(self._NL)
#
# Handlers for writing types and subtypes
#
def _handle_text(self, msg):
payload = msg.get_payload()
if payload is None:
return
if not isinstance(payload, str):
raise TypeError('string payload expected: %s' % type(payload))
if _has_surrogates(msg._payload):
charset = msg.get_param('charset')
if charset is not None:
# XXX: This copy stuff is an ugly hack to avoid modifying the
# existing message.
msg = deepcopy(msg)
del msg['content-transfer-encoding']
msg.set_payload(payload, charset)
payload = msg.get_payload()
self._munge_cte = (msg['content-transfer-encoding'],
msg['content-type'])
if self._mangle_from_:
payload = fcre.sub('>From ', payload)
self._write_lines(payload)
# Default body handler
_writeBody = _handle_text
def _handle_multipart(self, msg):
# The trick here is to write out each part separately, merge them all
# together, and then make sure that the boundary we've chosen isn't
# present in the payload.
msgtexts = []
subparts = msg.get_payload()
if subparts is None:
subparts = []
elif isinstance(subparts, str):
# e.g. a non-strict parse of a message with no starting boundary.
self.write(subparts)
return
elif not isinstance(subparts, list):
# Scalar payload
subparts = [subparts]
for part in subparts:
s = self._new_buffer()
g = self.clone(s)
g.flatten(part, unixfrom=False, linesep=self._NL)
msgtexts.append(s.getvalue())
# BAW: What about boundaries that are wrapped in double-quotes?
boundary = msg.get_boundary()
if not boundary:
# Create a boundary that doesn't appear in any of the
# message texts.
alltext = self._encoded_NL.join(msgtexts)
boundary = self._make_boundary(alltext)
msg.set_boundary(boundary)
# If there's a preamble, write it out, with a trailing CRLF
if msg.preamble is not None:
if self._mangle_from_:
preamble = fcre.sub('>From ', msg.preamble)
else:
preamble = msg.preamble
self._write_lines(preamble)
self.write(self._NL)
# dash-boundary transport-padding CRLF
self.write('--' + boundary + self._NL)
# body-part
if msgtexts:
self._fp.write(msgtexts.pop(0))
# *encapsulation
# --> delimiter transport-padding
# --> CRLF body-part
for body_part in msgtexts:
# delimiter transport-padding CRLF
self.write(self._NL + '--' + boundary + self._NL)
# body-part
self._fp.write(body_part)
# close-delimiter transport-padding
self.write(self._NL + '--' + boundary + '--' + self._NL)
if msg.epilogue is not None:
if self._mangle_from_:
epilogue = fcre.sub('>From ', msg.epilogue)
else:
epilogue = msg.epilogue
self._write_lines(epilogue)
def _handle_multipart_signed(self, msg):
# The contents of signed parts has to stay unmodified in order to keep
# the signature intact per RFC1847 2.1, so we disable header wrapping.
# RDM: This isn't enough to completely preserve the part, but it helps.
p = self.policy
self.policy = p.clone(max_line_length=0)
try:
self._handle_multipart(msg)
finally:
self.policy = p
def _handle_message_delivery_status(self, msg):
# We can't just write the headers directly to self's file object
# because this will leave an extra newline between the last header
# block and the boundary. Sigh.
blocks = []
for part in msg.get_payload():
s = self._new_buffer()
g = self.clone(s)
g.flatten(part, unixfrom=False, linesep=self._NL)
text = s.getvalue()
lines = text.split(self._encoded_NL)
# Strip off the unnecessary trailing empty line
if lines and lines[-1] == self._encoded_EMPTY:
blocks.append(self._encoded_NL.join(lines[:-1]))
else:
blocks.append(text)
# Now join all the blocks with an empty line. This has the lovely
# effect of separating each block with an empty line, but not adding
# an extra one after the last one.
self._fp.write(self._encoded_NL.join(blocks))
def _handle_message(self, msg):
s = self._new_buffer()
g = self.clone(s)
# The payload of a message/rfc822 part should be a multipart sequence
# of length 1. The zeroth element of the list should be the Message
# object for the subpart. Extract that object, stringify it, and
# write it out.
# Except, it turns out, when it's a string instead, which happens when
# and only when HeaderParser is used on a message of mime type
# message/rfc822. Such messages are generated by, for example,
# Groupwise when forwarding unadorned messages. (Issue 7970.) So
# in that case we just emit the string body.
payload = msg._payload
if isinstance(payload, list):
g.flatten(msg.get_payload(0), unixfrom=False, linesep=self._NL)
payload = s.getvalue()
else:
payload = self._encode(payload)
self._fp.write(payload)
# This used to be a module level function; we use a classmethod for this
# and _compile_re so we can continue to provide the module level function
# for backward compatibility by doing
# _make_boundary = Generator._make_boundary
# at the end of the module. It *is* internal, so we could drop that...
@classmethod
def _make_boundary(cls, text=None):
# Craft a random boundary. If text is given, ensure that the chosen
# boundary doesn't appear in the text.
token = random.randrange(sys.maxsize)
boundary = ('=' * 15) + (_fmt % token) + '=='
if text is None:
return boundary
b = boundary
counter = 0
while True:
cre = cls._compile_re('^--' + re.escape(b) + '(--)?$', re.MULTILINE)
if not cre.search(text):
break
b = boundary + '.' + str(counter)
counter += 1
return b
@classmethod
def _compile_re(cls, s, flags):
return re.compile(s, flags)
class BytesGenerator(Generator):
"""Generates a bytes version of a Message object tree.
Functionally identical to the base Generator except that the output is
bytes and not string. When surrogates were used in the input to encode
bytes, these are decoded back to bytes for output. If the policy has
cte_type set to 7bit, then the message is transformed such that the
non-ASCII bytes are properly content transfer encoded, using the charset
unknown-8bit.
The outfp object must accept bytes in its write method.
"""
# Bytes versions of this constant for use in manipulating data from
# the BytesIO buffer.
_encoded_EMPTY = b''
def write(self, s):
self._fp.write(s.encode('ascii', 'surrogateescape'))
def _new_buffer(self):
return BytesIO()
def _encode(self, s):
return s.encode('ascii')
def _write_headers(self, msg):
# This is almost the same as the string version, except for handling
# strings with 8bit bytes.
for h, v in msg.raw_items():
self._fp.write(self.policy.fold_binary(h, v))
# A blank line always separates headers from body
self.write(self._NL)
def _handle_text(self, msg):
# If the string has surrogates the original source was bytes, so
# just write it back out.
if msg._payload is None:
return
if _has_surrogates(msg._payload) and not self.policy.cte_type=='7bit':
if self._mangle_from_:
msg._payload = fcre.sub(">From ", msg._payload)
self._write_lines(msg._payload)
else:
super(BytesGenerator,self)._handle_text(msg)
# Default body handler
_writeBody = _handle_text
@classmethod
def _compile_re(cls, s, flags):
return re.compile(s.encode('ascii'), flags)
_FMT = '[Non-text (%(type)s) part of message omitted, filename %(filename)s]'
class DecodedGenerator(Generator):
"""Generates a text representation of a message.
Like the Generator base class, except that non-text parts are substituted
with a format string representing the part.
"""
def __init__(self, outfp, mangle_from_=True, maxheaderlen=78, fmt=None):
"""Like Generator.__init__() except that an additional optional
argument is allowed.
Walks through all subparts of a message. If the subpart is of main
type `text', then it prints the decoded payload of the subpart.
Otherwise, fmt is a format string that is used instead of the message
payload. fmt is expanded with the following keywords (in
%(keyword)s format):
type : Full MIME type of the non-text part
maintype : Main MIME type of the non-text part
subtype : Sub-MIME type of the non-text part
filename : Filename of the non-text part
description: Description associated with the non-text part
encoding : Content transfer encoding of the non-text part
The default value for fmt is None, meaning
[Non-text (%(type)s) part of message omitted, filename %(filename)s]
"""
Generator.__init__(self, outfp, mangle_from_, maxheaderlen)
if fmt is None:
self._fmt = _FMT
else:
self._fmt = fmt
def _dispatch(self, msg):
for part in msg.walk():
maintype = part.get_content_maintype()
if maintype == 'text':
print(part.get_payload(decode=False), file=self)
elif maintype == 'multipart':
# Just skip this
pass
else:
print(self._fmt % {
'type' : part.get_content_type(),
'maintype' : part.get_content_maintype(),
'subtype' : part.get_content_subtype(),
'filename' : part.get_filename('[no filename]'),
'description': part.get('Content-Description',
'[no description]'),
'encoding' : part.get('Content-Transfer-Encoding',
'[no encoding]'),
}, file=self)
# Helper used by Generator._make_boundary
_width = len(repr(sys.maxsize-1))
_fmt = '%%0%dd' % _width
# Backward compatibility
_make_boundary = Generator._make_boundary
| lgpl-3.0 |
adamjmcgrath/glancydesign | src/django-nonrel/django/contrib/admin/views/main.py | 55 | 12023 | from django.contrib.admin.filterspecs import FilterSpec
from django.contrib.admin.options import IncorrectLookupParameters
from django.contrib.admin.util import quote, get_fields_from_path
from django.core.exceptions import SuspiciousOperation
from django.core.paginator import InvalidPage
from django.db import models
from django.utils.encoding import force_unicode, smart_str
from django.utils.translation import ugettext, ugettext_lazy
from django.utils.http import urlencode
import operator
# The system will display a "Show all" link on the change list only if the
# total result count is less than or equal to this setting.
MAX_SHOW_ALL_ALLOWED = 200
# Changelist settings
ALL_VAR = 'all'
ORDER_VAR = 'o'
ORDER_TYPE_VAR = 'ot'
PAGE_VAR = 'p'
SEARCH_VAR = 'q'
TO_FIELD_VAR = 't'
IS_POPUP_VAR = 'pop'
ERROR_FLAG = 'e'
# Text to display within change-list table cells if the value is blank.
EMPTY_CHANGELIST_VALUE = ugettext_lazy('(None)')
def field_needs_distinct(field):
if ((hasattr(field, 'rel') and
isinstance(field.rel, models.ManyToManyRel)) or
(isinstance(field, models.related.RelatedObject) and
not field.field.unique)):
return True
return False
class ChangeList(object):
def __init__(self, request, model, list_display, list_display_links, list_filter, date_hierarchy, search_fields, list_select_related, list_per_page, list_editable, model_admin):
self.model = model
self.opts = model._meta
self.lookup_opts = self.opts
self.root_query_set = model_admin.queryset(request)
self.list_display = list_display
self.list_display_links = list_display_links
self.list_filter = list_filter
self.date_hierarchy = date_hierarchy
self.search_fields = search_fields
self.list_select_related = list_select_related
self.list_per_page = list_per_page
self.model_admin = model_admin
# Get search parameters from the query string.
try:
self.page_num = int(request.GET.get(PAGE_VAR, 0))
except ValueError:
self.page_num = 0
self.show_all = ALL_VAR in request.GET
self.is_popup = IS_POPUP_VAR in request.GET
self.to_field = request.GET.get(TO_FIELD_VAR)
self.params = dict(request.GET.items())
if PAGE_VAR in self.params:
del self.params[PAGE_VAR]
if ERROR_FLAG in self.params:
del self.params[ERROR_FLAG]
if self.is_popup:
self.list_editable = ()
else:
self.list_editable = list_editable
self.order_field, self.order_type = self.get_ordering()
self.query = request.GET.get(SEARCH_VAR, '')
self.query_set = self.get_query_set()
self.get_results(request)
self.title = (self.is_popup and ugettext('Select %s') % force_unicode(self.opts.verbose_name) or ugettext('Select %s to change') % force_unicode(self.opts.verbose_name))
self.filter_specs, self.has_filters = self.get_filters(request)
self.pk_attname = self.lookup_opts.pk.attname
def get_filters(self, request):
filter_specs = []
if self.list_filter:
for filter_name in self.list_filter:
field = get_fields_from_path(self.model, filter_name)[-1]
spec = FilterSpec.create(field, request, self.params,
self.model, self.model_admin,
field_path=filter_name)
if spec and spec.has_output():
filter_specs.append(spec)
return filter_specs, bool(filter_specs)
def get_query_string(self, new_params=None, remove=None):
if new_params is None: new_params = {}
if remove is None: remove = []
p = self.params.copy()
for r in remove:
for k in p.keys():
if k.startswith(r):
del p[k]
for k, v in new_params.items():
if v is None:
if k in p:
del p[k]
else:
p[k] = v
return '?%s' % urlencode(p)
def get_results(self, request):
paginator = self.model_admin.get_paginator(request, self.query_set, self.list_per_page)
# Get the number of objects, with admin filters applied.
result_count = paginator.count
# Get the total number of objects, with no admin filters applied.
# Perform a slight optimization: Check to see whether any filters were
# given. If not, use paginator.hits to calculate the number of objects,
# because we've already done paginator.hits and the value is cached.
if not self.query_set.query.where:
full_result_count = result_count
else:
full_result_count = self.root_query_set.count()
can_show_all = result_count <= MAX_SHOW_ALL_ALLOWED
multi_page = result_count > self.list_per_page
# Get the list of objects to display on this page.
if (self.show_all and can_show_all) or not multi_page:
result_list = self.query_set._clone()
else:
try:
result_list = paginator.page(self.page_num+1).object_list
except InvalidPage:
raise IncorrectLookupParameters
self.result_count = result_count
self.full_result_count = full_result_count
self.result_list = result_list
self.can_show_all = can_show_all
self.multi_page = multi_page
self.paginator = paginator
def get_ordering(self):
lookup_opts, params = self.lookup_opts, self.params
# For ordering, first check the "ordering" parameter in the admin
# options, then check the object's default ordering. If neither of
# those exist, order descending by ID by default. Finally, look for
# manually-specified ordering from the query string.
ordering = self.model_admin.ordering or lookup_opts.ordering or ['-' + lookup_opts.pk.name]
if ordering[0].startswith('-'):
order_field, order_type = ordering[0][1:], 'desc'
else:
order_field, order_type = ordering[0], 'asc'
if ORDER_VAR in params:
try:
field_name = self.list_display[int(params[ORDER_VAR])]
try:
f = lookup_opts.get_field(field_name)
except models.FieldDoesNotExist:
# See whether field_name is a name of a non-field
# that allows sorting.
try:
if callable(field_name):
attr = field_name
elif hasattr(self.model_admin, field_name):
attr = getattr(self.model_admin, field_name)
else:
attr = getattr(self.model, field_name)
order_field = attr.admin_order_field
except AttributeError:
pass
else:
order_field = f.name
except (IndexError, ValueError):
pass # Invalid ordering specified. Just use the default.
if ORDER_TYPE_VAR in params and params[ORDER_TYPE_VAR] in ('asc', 'desc'):
order_type = params[ORDER_TYPE_VAR]
return order_field, order_type
def get_query_set(self):
use_distinct = False
qs = self.root_query_set
lookup_params = self.params.copy() # a dictionary of the query string
for i in (ALL_VAR, ORDER_VAR, ORDER_TYPE_VAR, SEARCH_VAR, IS_POPUP_VAR, TO_FIELD_VAR):
if i in lookup_params:
del lookup_params[i]
for key, value in lookup_params.items():
if not isinstance(key, str):
# 'key' will be used as a keyword argument later, so Python
# requires it to be a string.
del lookup_params[key]
lookup_params[smart_str(key)] = value
if not use_distinct:
# Check if it's a relationship that might return more than one
# instance
field_name = key.split('__', 1)[0]
try:
f = self.lookup_opts.get_field_by_name(field_name)[0]
except models.FieldDoesNotExist:
raise IncorrectLookupParameters
use_distinct = field_needs_distinct(f)
# if key ends with __in, split parameter into separate values
if key.endswith('__in'):
value = value.split(',')
lookup_params[key] = value
# if key ends with __isnull, special case '' and false
if key.endswith('__isnull'):
if value.lower() in ('', 'false'):
value = False
else:
value = True
lookup_params[key] = value
if not self.model_admin.lookup_allowed(key, value):
raise SuspiciousOperation(
"Filtering by %s not allowed" % key
)
# Apply lookup parameters from the query string.
try:
qs = qs.filter(**lookup_params)
# Naked except! Because we don't have any other way of validating "params".
# They might be invalid if the keyword arguments are incorrect, or if the
# values are not in the correct type, so we might get FieldError, ValueError,
# ValicationError, or ? from a custom field that raises yet something else
# when handed impossible data.
except:
raise IncorrectLookupParameters
# Use select_related() if one of the list_display options is a field
# with a relationship and the provided queryset doesn't already have
# select_related defined.
if not qs.query.select_related:
if self.list_select_related:
qs = qs.select_related()
else:
for field_name in self.list_display:
try:
f = self.lookup_opts.get_field(field_name)
except models.FieldDoesNotExist:
pass
else:
if isinstance(f.rel, models.ManyToOneRel):
qs = qs.select_related()
break
# Set ordering.
if self.order_field:
qs = qs.order_by('%s%s' % ((self.order_type == 'desc' and '-' or ''), self.order_field))
# Apply keyword searches.
def construct_search(field_name):
if field_name.startswith('^'):
return "%s__istartswith" % field_name[1:]
elif field_name.startswith('='):
return "%s__iexact" % field_name[1:]
elif field_name.startswith('@'):
return "%s__search" % field_name[1:]
else:
return "%s__icontains" % field_name
if self.search_fields and self.query:
orm_lookups = [construct_search(str(search_field))
for search_field in self.search_fields]
for bit in self.query.split():
or_queries = [models.Q(**{orm_lookup: bit})
for orm_lookup in orm_lookups]
qs = qs.filter(reduce(operator.or_, or_queries))
if not use_distinct:
for search_spec in orm_lookups:
field_name = search_spec.split('__', 1)[0]
f = self.lookup_opts.get_field_by_name(field_name)[0]
if field_needs_distinct(f):
use_distinct = True
break
if use_distinct:
return qs.distinct()
else:
return qs
def url_for_result(self, result):
return "%s/" % quote(getattr(result, self.pk_attname))
| bsd-3-clause |
xmaruto/mcord | xos/services/vtr/models.py | 2 | 2740 | from django.db import models
from core.models import Service, PlCoreBase, Slice, Instance, Tenant, TenantWithContainer, Node, Image, User, Flavor, Subscriber, NetworkParameter, NetworkParameterType, Port, AddressPool
from core.models.plcorebase import StrippedCharField
import os
from django.db import models, transaction
from django.forms.models import model_to_dict
from django.db.models import Q
from operator import itemgetter, attrgetter, methodcaller
from core.models import Tag
from core.models.service import LeastLoadedNodeScheduler
from services.cord.models import CordSubscriberRoot
import traceback
from xos.exceptions import *
from xos.config import Config
class ConfigurationError(Exception):
pass
VTR_KIND = "vTR"
CORD_USE_VTN = getattr(Config(), "networking_use_vtn", False)
# -------------------------------------------
# VOLT
# -------------------------------------------
class VTRService(Service):
KIND = VTR_KIND
class Meta:
app_label = "vtr"
verbose_name = "vTR Service"
proxy = True
class VTRTenant(Tenant):
class Meta:
proxy = True
KIND = VTR_KIND
TEST_CHOICES = ( ("ping", "Ping"), ("traceroute", "Trace Route"), ("tcpdump", "Tcp Dump") )
SCOPE_CHOICES = ( ("container", "Container"), ("vm", "VM") )
simple_attributes = ( ("test", None),
("argument", None),
("result", None),
("result_code", None),
("target_id", None),
("scope", "container") )
sync_attributes = ( 'test', 'argument', "scope" )
def __init__(self, *args, **kwargs):
vtr_services = VTRService.get_service_objects().all()
if vtr_services:
self._meta.get_field("provider_service").default = vtr_services[0].id
super(VTRTenant, self).__init__(*args, **kwargs)
@property
def target(self):
if getattr(self, "cached_target", None):
return self.cached_target
target_id=self.target_id
if not target_id:
return None
users=CordSubscriberRoot.objects.filter(id=target_id)
if not users:
return None
user=users[0]
self.cached_target = users[0]
return user
@target.setter
def target(self, value):
if value:
value = value.id
if (value != self.get_attribute("target_id", None)):
self.cached_target=None
self.target_id = value
def save(self, *args, **kwargs):
super(VTRTenant, self).save(*args, **kwargs)
def delete(self, *args, **kwargs):
super(VTRTenant, self).delete(*args, **kwargs)
VTRTenant.setup_simple_attributes()
| apache-2.0 |
angelapper/edx-platform | openedx/core/djangoapps/credit/admin.py | 20 | 2478 | """
Django admin page for credit eligibility
"""
from django.contrib import admin
from openedx.core.djangoapps.credit.models import (
CreditConfig,
CreditCourse,
CreditEligibility,
CreditProvider,
CreditRequest,
CreditRequirement,
CreditRequirementStatus
)
class CreditCourseAdmin(admin.ModelAdmin):
"""Admin for credit courses. """
list_display = ('course_key', 'enabled',)
list_filter = ('enabled',)
search_fields = ('course_key',)
class Meta(object):
model = CreditCourse
class CreditProviderAdmin(admin.ModelAdmin):
"""Admin for credit providers. """
list_display = ('provider_id', 'display_name', 'active',)
list_filter = ('active',)
search_fields = ('provider_id', 'display_name')
class Meta(object):
model = CreditProvider
class CreditEligibilityAdmin(admin.ModelAdmin):
"""Admin for credit eligibility. """
list_display = ('course', 'username', 'deadline')
search_fields = ('username', 'course__course_key')
class Meta(object):
model = CreditEligibility
class CreditRequestAdmin(admin.ModelAdmin):
"""Admin for credit requests. """
list_display = ('provider', 'course', 'status', 'username')
list_filter = ('provider', 'status',)
readonly_fields = ('uuid',)
search_fields = ('uuid', 'username', 'course__course_key', 'provider__provider_id')
class Meta(object):
model = CreditRequest
class CreditRequirementAdmin(admin.ModelAdmin):
""" Admin for CreditRequirement. """
list_display = ('course', 'namespace', 'name', 'display_name', 'active',)
list_filter = ('active', 'namespace',)
search_fields = ('course__course_key', 'namespace', 'name',)
class Meta(object):
model = CreditRequirement
class CreditRequirementStatusAdmin(admin.ModelAdmin):
""" Admin for CreditRequirementStatus. """
list_display = ('username', 'requirement', 'status',)
search_fields = ('username', 'requirement__course__course_key',)
class Meta(object):
model = CreditRequirementStatus
admin.site.register(CreditCourse, CreditCourseAdmin)
admin.site.register(CreditProvider, CreditProviderAdmin)
admin.site.register(CreditEligibility, CreditEligibilityAdmin)
admin.site.register(CreditRequest, CreditRequestAdmin)
admin.site.register(CreditConfig)
admin.site.register(CreditRequirement, CreditRequirementAdmin)
admin.site.register(CreditRequirementStatus, CreditRequirementStatusAdmin)
| agpl-3.0 |
davidzchen/tensorflow | tensorflow/python/distribute/vars_test.py | 2 | 53170 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the distributed values library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import uuid
from absl.testing import parameterized
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import distribution_strategy_context as ds_context
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.distribute import tpu_strategy
from tensorflow.python.distribute import values
from tensorflow.python.distribute.cluster_resolver import tpu_cluster_resolver
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import indexed_slices
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.tpu import tpu_strategy_util
from tensorflow.python.training import checkpoint_management as ckpt_manager
from tensorflow.python.training.tracking import util as trackable_utils
_TPU_STRATEGIES = (tpu_strategy.TPUStrategy, tpu_strategy.TPUStrategyV1)
def strategy_and_run_tf_function_combinations():
# Test the combination of different strategies and whether a tf.function
# is passed into strategy.run."""
return combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
],
mode=["graph", "eager"],
experimental_run_tf_function=[True, False],
use_var_policy=[True, False]) + combinations.combine(
distribution=[
strategy_combinations.tpu_strategy,
strategy_combinations.tpu_strategy_packed_var,
],
mode=["graph", "eager"],
experimental_run_tf_function=[True],
use_var_policy=[True, False])
def strategy_with_var_policy():
return combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.tpu_strategy,
strategy_combinations.tpu_strategy_packed_var,
],
mode=["graph", "eager"],
use_var_policy=[True, False])
class OnWriteVariableSync(test.TestCase, parameterized.TestCase):
@combinations.generate(strategy_and_run_tf_function_combinations())
def testAssign(self, distribution, experimental_run_tf_function):
def assign(fn, v, update_value, cross_replica):
update_fn = lambda: getattr(v, fn)(update_value)
if cross_replica:
return update_fn()
else:
if experimental_run_tf_function:
update_fn = def_function.function(update_fn)
return distribution.experimental_local_results(
distribution.run(update_fn))
updates = [("assign", 1.), ("assign_add", 1.), ("assign_sub", -1.)]
aggregations = [
variables_lib.VariableAggregation.NONE,
variables_lib.VariableAggregation.SUM,
variables_lib.VariableAggregation.MEAN,
variables_lib.VariableAggregation.ONLY_FIRST_REPLICA,
]
options = list(
x for x in itertools.product(updates, aggregations, [True, False]))
for update, aggregation, cross_replica in options:
# assign in replica context with SUM does not make sense cause you can
# just do value * num replicas error is 1. is not a distributed value and
# is unsupported for aggregation SUM
if (not cross_replica and aggregation ==
variables_lib.VariableAggregation.SUM):
continue
with distribution.scope():
v = variable_scope.variable(
0.,
aggregation=aggregation)
self.evaluate(variables_lib.global_variables_initializer())
fn, update_value = update
self.evaluate(assign(fn, v, update_value, cross_replica))
for component in v._values:
self.assertAllEqual(self.evaluate(component.read_value()),
self.evaluate(array_ops.ones_like(component)))
@combinations.generate(strategy_and_run_tf_function_combinations())
def testAssignOnWriteVar(self, distribution, experimental_run_tf_function):
with distribution.scope():
v_to_assign = variable_scope.variable(
2., aggregation=variables_lib.VariableAggregation.MEAN)
v_to_assign_sub = variable_scope.variable(
-2., aggregation=variables_lib.VariableAggregation.MEAN)
def assign(fn, v, update_value, cross_replica):
update_fn = lambda: getattr(v, fn)(update_value)
if cross_replica:
return update_fn()
else:
if experimental_run_tf_function:
update_fn = def_function.function(update_fn)
return distribution.experimental_local_results(
distribution.run(update_fn))
updates = [("assign", v_to_assign), ("assign_add", v_to_assign),
("assign_sub", v_to_assign_sub)]
aggregations = [
variables_lib.VariableAggregation.NONE,
variables_lib.VariableAggregation.SUM,
variables_lib.VariableAggregation.MEAN,
variables_lib.VariableAggregation.ONLY_FIRST_REPLICA,
]
options = list(
x for x in itertools.product(updates, aggregations, [True, False]))
for update, aggregation, cross_replica in options:
# assign in replica context with SUM does not make sense cause you can
# just do value * num replicas error is 1. is not a distributed value and
# is unsupported for aggregation SUM
if aggregation == variables_lib.VariableAggregation.SUM:
continue
with distribution.scope():
v = variable_scope.variable(
0.,
aggregation=aggregation)
self.evaluate(variables_lib.global_variables_initializer())
fn, update_value = update
self.evaluate(assign(fn, v, update_value, cross_replica))
for component in v._values:
self.assertAllEqual(2.0, self.evaluate(component.read_value()))
@combinations.generate(strategy_and_run_tf_function_combinations())
def testAssignPerReplicaVal(self, distribution, experimental_run_tf_function):
if isinstance(distribution, _TPU_STRATEGIES):
self.skipTest("Assigning PerReplica values is not supported. See"
" sponge/80ba41f8-4220-4516-98ce-bbad48f9f11a.")
with distribution.scope():
per_replica_value = values.PerReplica(
[constant_op.constant(2.0),
constant_op.constant(2.0)])
per_replica_sub_value = values.PerReplica(
[constant_op.constant(-2.0),
constant_op.constant(-2.0)])
def assign(fn, v, update_value, cross_replica):
update_fn = lambda: getattr(v, fn)(update_value)
if cross_replica:
return update_fn()
else:
if experimental_run_tf_function:
update_fn = def_function.function(update_fn)
return distribution.experimental_local_results(
distribution.run(update_fn))
updates = [("assign", per_replica_value), ("assign_add", per_replica_value),
("assign_sub", per_replica_sub_value)]
# We don't support assigning PerReplica valus to vars in replica context
# with aggregation=NONE.
aggregations = [
variables_lib.VariableAggregation.SUM,
variables_lib.VariableAggregation.MEAN,
variables_lib.VariableAggregation.ONLY_FIRST_REPLICA,
]
options = list(
x for x in itertools.product(updates, aggregations, [True, False]))
for update, aggregation, cross_replica in options:
# assign in replica context with SUM does not make sense cause you can
# just do value * num replicas error is 1. is not a distributed value and
# is unsupported for aggregation SUM
if cross_replica:
# We don't support assigning PerReplica values to MirroredVariables in
# cross replica context
continue
with distribution.scope():
v = variable_scope.variable(
0.,
aggregation=aggregation)
self.evaluate(variables_lib.global_variables_initializer())
fn, update_value = update
self.evaluate(assign(fn, v, update_value, cross_replica))
if aggregation == variables_lib.VariableAggregation.SUM:
expected = 4.0
else:
expected = 2.0
for component in v._values:
self.assertAllEqual(expected, self.evaluate(component.read_value()))
@combinations.generate(strategy_with_var_policy())
def testValueInReplicaContext(self, distribution):
with distribution.scope():
v = variables_lib.Variable(
1., aggregation=variables_lib.VariableAggregation.MEAN)
self.evaluate(variables_lib.global_variables_initializer())
@def_function.function
def f():
with ops.control_dependencies([v.assign_add(1.)]):
return v.value()
results = self.evaluate(
distribution.experimental_local_results(
distribution.run(f)))
for value in results:
self.assertEqual(2., value)
@combinations.generate(strategy_and_run_tf_function_combinations())
def testReadValueInReplicaContext(self, distribution,
experimental_run_tf_function):
aggregations = [
variables_lib.VariableAggregation.NONE,
variables_lib.VariableAggregation.SUM,
variables_lib.VariableAggregation.MEAN,
variables_lib.VariableAggregation.ONLY_FIRST_REPLICA,
]
for aggregation in aggregations:
with distribution.scope():
v = variable_scope.variable(
0.,
aggregation=aggregation)
self.evaluate(variables_lib.global_variables_initializer())
if experimental_run_tf_function:
read_var_fn = def_function.function(v.read_value)
else:
read_var_fn = v.read_value
results = self.evaluate(
distribution.experimental_local_results(
distribution.run(read_var_fn)))
for component, value in zip(v._values, results):
self.assertAllEqual(self.evaluate(component.read_value()), value)
@combinations.generate(strategy_and_run_tf_function_combinations())
def testReadValueInCrossReplicaContext(self, distribution,
experimental_run_tf_function):
aggregations = [
variables_lib.VariableAggregation.NONE,
variables_lib.VariableAggregation.SUM,
variables_lib.VariableAggregation.MEAN,
variables_lib.VariableAggregation.ONLY_FIRST_REPLICA,
]
for aggregation in aggregations:
with distribution.scope():
v = variable_scope.variable(
2.,
aggregation=aggregation)
self.evaluate(variables_lib.global_variables_initializer())
if experimental_run_tf_function:
read_var_fn = def_function.function(v.read_value)
else:
read_var_fn = v.read_value
results = read_var_fn()
for component in v._values:
self.assertEqual(self.evaluate(component.read_value()),
self.evaluate(results))
@combinations.generate(strategy_with_var_policy())
def testAssignOutOfScope(self, distribution):
with distribution.scope():
mirrored = variables_lib.Variable(1.)
self.evaluate(mirrored.assign(3.))
self.assertEqual(self.evaluate(mirrored.read_value()), 3.)
for component in mirrored.values:
self.assertEqual(self.evaluate(component.read_value()), 3.)
@combinations.generate(strategy_with_var_policy())
def testAssignAggregationMeanDTypeNonFloat(self, distribution):
if isinstance(distribution, _TPU_STRATEGIES):
self.skipTest("Fix sponge/6e8ab540-4c0f-4da5-aedf-86505ff810c9 before "
"reenabling test.")
with distribution.scope():
v = variables_lib.Variable(
1,
aggregation=variable_scope.VariableAggregation.MEAN,
dtype=dtypes.int32)
self.evaluate(v.initializer)
@def_function.function
def assign():
ctx = ds_context.get_replica_context()
return v.assign(ctx.replica_id_in_sync_group)
# disallow assign() with distributed value in replica context.
with self.assertRaisesRegex(ValueError,
"Cannot update non-float variables"):
self.evaluate(
distribution.experimental_local_results(
distribution.run(assign)))
# allow assign() with same value in replica context.
@def_function.function
def assign_same():
return v.assign(2)
self.evaluate(
distribution.experimental_local_results(
distribution.run(assign_same)))
self.assertEqual(self.evaluate(v.read_value()), 2)
# allow assign() with mirrored variable in replica context.
with distribution.scope():
v2 = variables_lib.Variable(
3,
aggregation=variable_scope.VariableAggregation.SUM,
dtype=dtypes.int32)
self.evaluate(v2.initializer)
@def_function.function
def assign_mirrored():
return v.assign(v2)
self.evaluate(
distribution.experimental_local_results(
distribution.run(assign_mirrored)))
self.assertEqual(self.evaluate(v.read_value()), 3)
# allow assign() in cross replica context.
with distribution.scope():
self.evaluate(v.assign(4))
self.assertEqual(self.evaluate(v.read_value()), 4)
@combinations.generate(strategy_with_var_policy())
def testInitializedToSameValueInsideEagerRun(self, distribution):
if not context.executing_eagerly(): self.skipTest("eager only test")
v = [None]
@def_function.function
def step():
def f():
if v[0] is None:
v[0] = variables_lib.Variable(random_ops.random_normal([]))
distribution.run(f)
context.set_global_seed(None)
step()
vals = self.evaluate(v[0].values)
self.assertAllEqual(vals[0], vals[1])
@combinations.generate(strategy_with_var_policy())
def testAggregationOnlyFirstReplica(self, distribution):
with distribution.scope():
v = variable_scope.variable(
15.,
synchronization=variables_lib.VariableSynchronization.ON_WRITE,
aggregation=variables_lib.VariableAggregation.ONLY_FIRST_REPLICA)
self.evaluate(variables_lib.global_variables_initializer())
@def_function.function
def assign():
ctx = ds_context.get_replica_context()
replica_id = ctx.replica_id_in_sync_group
return v.assign(math_ops.cast(replica_id, dtypes.float32))
per_replica_results = self.evaluate(distribution.experimental_local_results(
distribution.run(assign)))
# The per-replica values should always match the first replicas value.
self.assertAllEqual(
array_ops.zeros(distribution.num_replicas_in_sync, dtypes.float32),
per_replica_results)
@combinations.generate(strategy_with_var_policy())
def testInitScope(self, distribution):
if not context.executing_eagerly(): self.skipTest("eager only")
class C(object):
pass
obj = C()
obj.w = None
obj.v = None
@def_function.function
def assign():
with ops.init_scope():
if obj.w is None:
obj.w = variables_lib.Variable(
0, aggregation=variables_lib.VariableAggregation.MEAN)
obj.v = variables_lib.Variable(
obj.w.read_value(),
aggregation=variables_lib.VariableAggregation.MEAN)
self.evaluate(variables_lib.global_variables_initializer())
return obj.v.assign_add(2)
per_replica_results = self.evaluate(
distribution.experimental_local_results(distribution.run(assign)))
self.assertAllEqual([2, 2], per_replica_results)
@combinations.generate(strategy_with_var_policy())
def testOperatorOverride(self, distribution):
with distribution.scope():
v = variable_scope.variable(
1, aggregation=variables_lib.VariableAggregation.MEAN)
self.evaluate(variables_lib.global_variables_initializer())
self.assertEqual(2, self.evaluate(v + 1))
@def_function.function
def add():
return v + 1
per_replica_results = self.evaluate(
distribution.experimental_local_results(distribution.run(add)))
self.assertAllEqual([2, 2], per_replica_results)
@combinations.generate(
combinations.combine(
strategy=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.tpu_strategy,
strategy_combinations.tpu_strategy_packed_var,
strategy_combinations.multi_worker_mirrored_2x1_cpu,
strategy_combinations.multi_worker_mirrored_2x1_gpu,
],
mode=["eager"],
use_var_policy=[True, False]))
def testSaveAndRestoreOnWrite(self, strategy):
aggregation = [
variable_scope.VariableAggregation.NONE,
variable_scope.VariableAggregation.ONLY_FIRST_REPLICA,
variable_scope.VariableAggregation.SUM,
variable_scope.VariableAggregation.MEAN
]
for agg in aggregation:
v_normal_restore = variables_lib.Variable(1.0)
v_normal_save = variables_lib.Variable(3.0)
with strategy.scope():
v_on_write = variables_lib.Variable(2.0, aggregation=agg)
# Save ONWRITE Restore ONWRITE
# Save
ckpt = trackable_utils.Checkpoint(var=v_on_write)
manager = ckpt_manager.CheckpointManager(
ckpt, "/tmp/ckpt_" + str(uuid.uuid4()), max_to_keep=None)
manager.save()
# Restore
ckpt.restore(manager.latest_checkpoint)
self.assertEqual(2.0, self.evaluate(v_on_write._values[0]))
self.assertEqual(2.0, self.evaluate(v_on_write.read_value()))
# Save Mirrored Restore Normal
# We've already saved Mirrored, so we only need to restore normal
ckpt_normal = trackable_utils.Checkpoint(var=v_normal_restore)
ckpt_normal.restore(manager.latest_checkpoint)
self.assertEqual(2.0, self.evaluate(v_on_write._values[0]))
self.assertEqual(2.0, self.evaluate(v_normal_restore.read_value()))
# Save Normal Restore Mirrored
# Save
ckpt = trackable_utils.Checkpoint(var=v_normal_save)
manager_2 = ckpt_manager.CheckpointManager(
ckpt, "/tmp/ckptckpt_" + str(uuid.uuid4()), max_to_keep=None)
manager_2.save()
# Restore
ckpt_on_write = trackable_utils.Checkpoint(var=v_on_write)
ckpt_on_write.restore(manager_2.latest_checkpoint)
self.assertEqual(3.0, self.evaluate(v_on_write._values[0]))
self.assertEqual(3.0, self.evaluate(v_on_write.read_value()))
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
],
mode=["graph", "eager"],
use_var_policy=[True, False]))
class OnWriteVariableSyncScatterTests(test.TestCase, parameterized.TestCase):
def testScatterSub(self, distribution):
with distribution.scope():
v = variables_lib.Variable(
[0., 0., 0.], aggregation=variables_lib.VariableAggregation.MEAN)
self.evaluate(v.initializer)
@def_function.function
def scatter_sub():
ctx = ds_context.get_replica_context()
replica_id = ctx.replica_id_in_sync_group
value = indexed_slices.IndexedSlices(
values=array_ops.stack([
math_ops.cast(replica_id, dtypes.float32),
math_ops.cast(replica_id + 1, dtypes.float32)
]),
indices=array_ops.stack([replica_id, replica_id + 1]),
dense_shape=(3,))
return v.scatter_sub(value)
per_replica_results = self.evaluate(
distribution.experimental_local_results(
distribution.run(scatter_sub)))
self.assertAllEqual([[0., -1., -1.], [0., -1., -1.]], per_replica_results)
def testScatterAdd(self, distribution):
with distribution.scope():
v = variables_lib.Variable(
[0, 0, 0], aggregation=variables_lib.VariableAggregation.SUM)
self.evaluate(v.initializer)
@def_function.function
def scatter_add():
ctx = ds_context.get_replica_context()
replica_id = ctx.replica_id_in_sync_group
value = indexed_slices.IndexedSlices(
values=array_ops.stack([replica_id, replica_id + 1]),
indices=array_ops.stack([replica_id, replica_id + 1]),
dense_shape=(3,))
return v.scatter_add(value)
per_replica_results = self.evaluate(
distribution.experimental_local_results(
distribution.run(scatter_add)))
self.assertAllEqual([[0, 2, 2], [0, 2, 2]], per_replica_results)
def testScatterDiv(self, distribution):
with distribution.scope():
v = variables_lib.Variable(
[1, 6, 1], aggregation=variables_lib.VariableAggregation.SUM)
self.evaluate(v.initializer)
@def_function.function
def scatter_div():
ctx = ds_context.get_replica_context()
replica_id = ctx.replica_id_in_sync_group
value = indexed_slices.IndexedSlices(
values=array_ops.reshape(replica_id + 2, [1]),
indices=array_ops.reshape(replica_id, [1]),
dense_shape=(3,))
return v.scatter_div(value)
per_replica_results = self.evaluate(
distribution.experimental_local_results(
distribution.run(scatter_div)))
self.assertAllEqual([[0, 2, 1], [0, 2, 1]], per_replica_results)
def testScatterMul(self, distribution):
with distribution.scope():
v = variables_lib.Variable(
[2., 1., 1.], aggregation=variables_lib.VariableAggregation.MEAN)
self.evaluate(v.initializer)
@def_function.function
def scatter_mul():
ctx = ds_context.get_replica_context()
replica_id = ctx.replica_id_in_sync_group
value = indexed_slices.IndexedSlices(
values=array_ops.reshape(
math_ops.cast(replica_id + 2, dtypes.float32), [1]),
indices=array_ops.reshape(replica_id, [1]),
dense_shape=(3,))
return v.scatter_mul(value)
per_replica_results = self.evaluate(
distribution.experimental_local_results(
distribution.run(scatter_mul)))
self.assertAllClose([[2., 1.5, 1.], [2., 1.5, 1.]], per_replica_results)
def testScatterMin(self, distribution):
with distribution.scope():
v1 = variables_lib.Variable(
[0, 2, 0], aggregation=variables_lib.VariableAggregation.SUM)
v2 = variables_lib.Variable(
[0, 2, 0],
aggregation=variables_lib.VariableAggregation.ONLY_FIRST_REPLICA)
self.evaluate(variables_lib.global_variables_initializer())
@def_function.function
def scatter_min(v):
value = indexed_slices.IndexedSlices(
values=array_ops.identity([1]),
indices=array_ops.identity([1]),
dense_shape=(3,))
return v.scatter_min(value)
with self.assertRaisesRegex(NotImplementedError, "scatter_min.*"):
self.evaluate(
distribution.experimental_local_results(
distribution.run(scatter_min, args=(v1,))))
per_replica_results = self.evaluate(
distribution.experimental_local_results(
distribution.run(scatter_min, args=(v2,))))
self.assertAllClose([[0, 1, 0], [0, 1, 0]], per_replica_results)
def testScatterMax(self, distribution):
with distribution.scope():
v1 = variables_lib.Variable(
[0, 0, 0], aggregation=variables_lib.VariableAggregation.SUM)
v2 = variables_lib.Variable(
[0, 0, 0],
aggregation=variables_lib.VariableAggregation.ONLY_FIRST_REPLICA)
self.evaluate(variables_lib.global_variables_initializer())
@def_function.function
def scatter_max(v):
value = indexed_slices.IndexedSlices(
values=array_ops.identity([1]),
indices=array_ops.identity([0]),
dense_shape=(3,))
return v.scatter_max(value)
with self.assertRaisesRegex(NotImplementedError, "scatter_max.*"):
self.evaluate(
distribution.experimental_local_results(
distribution.run(scatter_max, args=(v1,))))
per_replica_results = self.evaluate(
distribution.experimental_local_results(
distribution.run(scatter_max, args=(v2,))))
self.assertAllClose([[1, 0, 0], [1, 0, 0]], per_replica_results)
def testScatterUpdate(self, distribution):
with distribution.scope():
v1 = variables_lib.Variable(
[0, 0, 0], aggregation=variables_lib.VariableAggregation.SUM)
v2 = variables_lib.Variable(
[0, 0, 0],
aggregation=variables_lib.VariableAggregation.ONLY_FIRST_REPLICA)
self.evaluate(variables_lib.global_variables_initializer())
@def_function.function
def scatter_update(v):
value = indexed_slices.IndexedSlices(
values=array_ops.identity([3]),
indices=array_ops.identity([1]),
dense_shape=(3,))
return v.scatter_update(value)
with self.assertRaisesRegex(NotImplementedError, "scatter_update.*"):
self.evaluate(
distribution.experimental_local_results(
distribution.run(scatter_update, args=(v1,))))
per_replica_results = self.evaluate(
distribution.experimental_local_results(
distribution.run(scatter_update, args=(v2,))))
self.assertAllClose([[0, 3, 0], [0, 3, 0]], per_replica_results)
def testScatterOpsInCrossReplicaContext(self, distribution):
with distribution.scope():
v1 = variables_lib.Variable(
[1, 1, 1], aggregation=variables_lib.VariableAggregation.SUM)
v2 = variables_lib.Variable([1, 1, 1])
self.evaluate(variables_lib.global_variables_initializer())
value = indexed_slices.IndexedSlices(
values=array_ops.identity([2]),
indices=array_ops.identity([0]),
dense_shape=(3,))
with distribution.scope():
self.evaluate(v1.scatter_add(value))
self.assertAllEqual([3, 1, 1], self.evaluate(v1.read_value()))
self.evaluate(v2.scatter_min(value))
self.assertAllEqual([1, 1, 1], self.evaluate(v2.read_value()))
class OnReadVariableSyncTest(test.TestCase, parameterized.TestCase):
@combinations.generate(strategy_and_run_tf_function_combinations())
def testAssign(self, distribution, experimental_run_tf_function):
def assign(fn, v, update_value, cross_replica):
update_fn = lambda: getattr(v, fn)(update_value)
if cross_replica:
return update_fn()
else:
if experimental_run_tf_function:
update_fn = def_function.function(update_fn)
return distribution.experimental_local_results(
distribution.run(update_fn))
updates = [("assign", 1.), ("assign_add", 1.), ("assign_sub", -1.)]
aggregations = [
variables_lib.VariableAggregation.NONE,
variables_lib.VariableAggregation.SUM,
variables_lib.VariableAggregation.MEAN,
variables_lib.VariableAggregation.ONLY_FIRST_REPLICA,
]
options = list(
x for x in itertools.product(updates, aggregations, [True, False]))
for update, aggregation, cross_replica in options:
# VariableAggregation.SUM in cross-replica mode is tested below,
# VariableAggregation.NONE in cross-replica mode is not supported.
if cross_replica and aggregation in [
variables_lib.VariableAggregation.SUM,
variables_lib.VariableAggregation.NONE,
]:
continue
with distribution.scope():
v = variable_scope.variable(
0.,
synchronization=variables_lib.VariableSynchronization.ON_READ,
aggregation=aggregation)
self.evaluate(variables_lib.global_variables_initializer())
fn, update_value = update
self.evaluate(assign(fn, v, update_value, cross_replica))
for component in v._values:
self.assertAllEqual(self.evaluate(component.read_value()),
self.evaluate(array_ops.ones_like(component)))
@combinations.generate(strategy_and_run_tf_function_combinations())
def testAssignOnReadVar(self, distribution, experimental_run_tf_function):
with distribution.scope():
v_to_assign = variable_scope.variable(
2., aggregation=variables_lib.VariableAggregation.MEAN)
v_to_assign_sub = variable_scope.variable(
-2., aggregation=variables_lib.VariableAggregation.MEAN)
def assign(fn, v, update_value, cross_replica):
update_fn = lambda: getattr(v, fn)(update_value)
if cross_replica:
return update_fn()
else:
if experimental_run_tf_function:
update_fn = def_function.function(update_fn)
return distribution.experimental_local_results(
distribution.run(update_fn))
updates = [("assign", v_to_assign), ("assign_add", v_to_assign),
("assign_sub", v_to_assign_sub)]
expected_cross_replica = {
variables_lib.VariableAggregation.SUM: 1.0,
variables_lib.VariableAggregation.MEAN: 2.0,
variables_lib.VariableAggregation.ONLY_FIRST_REPLICA: 2.0
}
expected_replica = {
variables_lib.VariableAggregation.SUM: 2.0,
variables_lib.VariableAggregation.MEAN: 2.0,
variables_lib.VariableAggregation.ONLY_FIRST_REPLICA: 2.0
}
# aggregation=NONE is not supported for OnReadVariables.
aggregations = [
variables_lib.VariableAggregation.SUM,
variables_lib.VariableAggregation.MEAN,
variables_lib.VariableAggregation.ONLY_FIRST_REPLICA,
]
options = list(
x for x in itertools.product(updates, aggregations, [True, False]))
for update, aggregation, cross_replica in options:
# assign in replica context with SUM does not make sense cause you can
# just do value * num replicas error is 1. is not a distributed value and
# is unsupported for aggregation SUM
if aggregation == variables_lib.VariableAggregation.SUM:
continue
with distribution.scope():
v = variable_scope.variable(
0.,
aggregation=aggregation)
self.evaluate(variables_lib.global_variables_initializer())
fn, update_value = update
self.evaluate(assign(fn, v, update_value, cross_replica))
if cross_replica:
for component in v._values:
self.assertAllEqual(expected_cross_replica.get(aggregation),
self.evaluate(component.read_value()))
else:
for component in v._values:
self.assertAllEqual(expected_replica.get(aggregation),
self.evaluate(component.read_value()))
@combinations.generate(strategy_and_run_tf_function_combinations())
def testAssignPerReplicaVal(self, distribution, experimental_run_tf_function):
if isinstance(distribution, _TPU_STRATEGIES):
self.skipTest("Assigning PerReplica values is not supported. See"
" sponge/80ba41f8-4220-4516-98ce-bbad48f9f11a.")
self.skipTest("We don't support assiging PerReplica values in cross "
"replica context or replica context. see error in "
"sponge/2b2e54c1-eda6-4534-82e1-c73b1dcd517f.")
with distribution.scope():
per_replica_value = values.PerReplica(
[constant_op.constant(2.0),
constant_op.constant(2.0)])
def assign(fn, v, update_value, cross_replica):
update_fn = lambda: getattr(v, fn)(update_value)
if cross_replica:
return update_fn()
else:
if experimental_run_tf_function:
update_fn = def_function.function(update_fn)
return distribution.experimental_local_results(
distribution.run(update_fn))
updates = [("assign", per_replica_value)]
# We don't support assigning PerReplica valus to vars in replica context
# with aggregation=NONE.
aggregations = [
variables_lib.VariableAggregation.SUM,
variables_lib.VariableAggregation.MEAN,
variables_lib.VariableAggregation.ONLY_FIRST_REPLICA,
]
options = list(
x for x in itertools.product(updates, aggregations, [True, False]))
for update, aggregation, cross_replica in options:
# assign in replica context with SUM does not make sense cause you can
# just do value * num replicas error is 1. is not a distributed value and
# is unsupported for aggregation SUM
with distribution.scope():
v = variable_scope.variable(
0.,
synchronization=variables_lib.VariableSynchronization.ON_READ,
aggregation=aggregation)
self.evaluate(variables_lib.global_variables_initializer())
fn, update_value = update
# with self.assertRaisesRegex(ValueError, "Attempt to convert a value "):
self.evaluate(assign(fn, v, update_value, cross_replica))
if aggregation == variables_lib.VariableAggregation.SUM:
expected = 4.0
else:
expected = 2.0
for component in v._values:
self.assertAllEqual(expected, self.evaluate(component.read_value()))
@combinations.generate(strategy_and_run_tf_function_combinations())
def testAssignDtypeConversion(self, distribution,
experimental_run_tf_function):
def assign(fn, v, update_value, cross_replica):
update_fn = lambda: getattr(v, fn)(update_value)
if cross_replica:
return update_fn()
else:
if experimental_run_tf_function:
update_fn = def_function.function(update_fn)
return distribution.experimental_local_results(
distribution.run(update_fn))
updates = [("assign", 1), ("assign_add", 1), ("assign_sub", -1)]
aggregations = [
variables_lib.VariableAggregation.NONE,
variables_lib.VariableAggregation.SUM,
variables_lib.VariableAggregation.MEAN,
variables_lib.VariableAggregation.ONLY_FIRST_REPLICA,
]
options = list(
x for x in itertools.product(updates, aggregations, [True, False]))
for update, aggregation, cross_replica in options:
# VariableAggregation.SUM in cross-replica mode is tested below,
# VariableAggregation.NONE in cross-replica mode is not supported.
if cross_replica and aggregation in [
variables_lib.VariableAggregation.SUM,
variables_lib.VariableAggregation.NONE,
]:
continue
with distribution.scope():
v = variable_scope.variable(
0.,
synchronization=variables_lib.VariableSynchronization.ON_READ,
aggregation=aggregation)
self.evaluate(variables_lib.global_variables_initializer())
fn, update_value = update
self.evaluate(assign(fn, v, update_value, cross_replica))
for component in v._values:
self.assertAllEqual(self.evaluate(component.read_value()),
self.evaluate(array_ops.ones_like(component)))
@combinations.generate(strategy_with_var_policy())
def testAssignWithAggregationSum(self, distribution):
with distribution.scope():
v = variable_scope.variable(
0.,
synchronization=variables_lib.VariableSynchronization.ON_READ,
aggregation=variables_lib.VariableAggregation.SUM)
self.evaluate(variables_lib.global_variables_initializer())
self.evaluate(v.assign(1. * distribution.num_replicas_in_sync))
for component in v._values:
self.assertAllEqual(self.evaluate(component.read_value()),
self.evaluate(array_ops.ones_like(component)))
@combinations.generate(strategy_with_var_policy())
def testAssignAddSubWithAggregationSum(self, distribution):
with distribution.scope():
v = variable_scope.variable(
0.,
synchronization=variables_lib.VariableSynchronization.ON_READ,
aggregation=variables_lib.VariableAggregation.SUM)
self.evaluate(variables_lib.global_variables_initializer())
with self.assertRaisesRegex(
ValueError, "SyncOnReadVariable does not support "):
self.evaluate(v.assign_add(1.))
with self.assertRaisesRegex(
ValueError, "SyncOnReadVariable does not support "):
self.evaluate(v.assign_sub(1.))
@combinations.generate(strategy_and_run_tf_function_combinations())
def testReadValueInReplicaContext(self, distribution,
experimental_run_tf_function):
aggregations = [
variables_lib.VariableAggregation.NONE,
variables_lib.VariableAggregation.SUM,
variables_lib.VariableAggregation.MEAN,
variables_lib.VariableAggregation.ONLY_FIRST_REPLICA,
]
for aggregation in aggregations:
with distribution.scope():
v = variable_scope.variable(
0.,
synchronization=variables_lib.VariableSynchronization.ON_READ,
aggregation=aggregation)
self.evaluate(variables_lib.global_variables_initializer())
if experimental_run_tf_function:
read_var_fn = def_function.function(v.read_value)
else:
read_var_fn = v.read_value
results = self.evaluate(
distribution.experimental_local_results(
distribution.run(read_var_fn)))
for component, value in zip(v._values, results):
self.assertAllEqual(self.evaluate(component.read_value()), value)
@combinations.generate(strategy_and_run_tf_function_combinations())
def testReadValueInCrossReplicaContext(self, distribution,
experimental_run_tf_function):
aggregations = [
variables_lib.VariableAggregation.SUM,
variables_lib.VariableAggregation.MEAN,
variables_lib.VariableAggregation.ONLY_FIRST_REPLICA,
]
for aggregation in aggregations:
if isinstance(distribution, _TPU_STRATEGIES):
resolver = tpu_cluster_resolver.TPUClusterResolver("")
tpu_strategy_util.initialize_tpu_system(resolver)
with distribution.scope():
v = variable_scope.variable(
0.,
synchronization=variables_lib.VariableSynchronization.ON_READ,
aggregation=aggregation)
self.evaluate(variables_lib.global_variables_initializer())
def assign(v=v):
ctx = ds_context.get_replica_context()
replica_id = ctx.replica_id_in_sync_group
return v.assign(math_ops.cast(replica_id, dtypes.float32))
if experimental_run_tf_function:
assign = def_function.function(assign)
self.evaluate(
distribution.experimental_local_results(distribution.run(assign)))
num_replicas = distribution.num_replicas_in_sync
sum_of_replica_values = num_replicas * (num_replicas - 1) / 2.
if aggregation == variables_lib.VariableAggregation.SUM:
expected = sum_of_replica_values
elif aggregation == variables_lib.VariableAggregation.MEAN:
expected = sum_of_replica_values / num_replicas
else:
expected = 0
self.assertEqual(expected, self.evaluate(v.read_value()), aggregation)
self.assertEqual(expected, self.evaluate(v.value()), aggregation)
self.assertEqual(expected, self.evaluate(v), aggregation)
self.assertEqual(expected, self.evaluate(array_ops.identity(v)),
aggregation)
# TODO(b/145574622): Re-enable this test once ReduceOp argument is
# respected on GPUs.
@combinations.generate(strategy_and_run_tf_function_combinations())
def disable_testAllReduce(self, distribution,
experimental_run_tf_function):
with distribution.scope():
v = variable_scope.variable(
2.,
synchronization=variables_lib.VariableSynchronization.ON_WRITE,
aggregation=variables_lib.VariableAggregation.MEAN)
self.evaluate(variables_lib.global_variables_initializer())
def all_reduce():
ctx = ds_context.get_replica_context()
replica_id = ctx.replica_id_in_sync_group
return ctx.all_reduce("SUM", v) + math_ops.cast(replica_id,
dtypes.float32)
if experimental_run_tf_function:
all_reduce = def_function.function(all_reduce)
per_replica_results = self.evaluate(
distribution.experimental_local_results(distribution.run(all_reduce)))
expected_result = []
for i in range(distribution.num_replicas_in_sync):
expected_result.append(2.0 * distribution.num_replicas_in_sync +
1.0 * i)
self.assertEqual(per_replica_results, tuple(expected_result))
@combinations.generate(strategy_and_run_tf_function_combinations())
def testAssignPerReplicaBeforeRead(self, distribution,
experimental_run_tf_function):
aggregations = [
variables_lib.VariableAggregation.SUM,
variables_lib.VariableAggregation.MEAN,
variables_lib.VariableAggregation.ONLY_FIRST_REPLICA,
]
for aggregation in aggregations:
with distribution.scope():
v = variable_scope.variable(
0.,
synchronization=variables_lib.VariableSynchronization.ON_READ,
aggregation=aggregation)
self.evaluate(variables_lib.global_variables_initializer())
def assign(var=v):
ctx = ds_context.get_replica_context()
replica_id = ctx.replica_id_in_sync_group
return var.assign(math_ops.cast(replica_id, dtypes.float32))
if experimental_run_tf_function:
assign = def_function.function(assign)
per_replica_results = self.evaluate(
distribution.experimental_local_results(distribution.run(assign)))
expected_result = []
for i in range(distribution.num_replicas_in_sync):
expected_result.append(1.0 * i)
self.assertEqual(per_replica_results, tuple(expected_result))
@combinations.generate(strategy_with_var_policy())
def testReadValueWithAggregationNoneInCrossReplicaContext(self, distribution):
with distribution.scope():
v = variable_scope.variable(
0.,
synchronization=variables_lib.VariableSynchronization.ON_READ,
aggregation=variables_lib.VariableAggregation.NONE)
self.evaluate(variables_lib.global_variables_initializer())
with self.assertRaisesRegex(
ValueError, "Could not convert from .* VariableAggregation\\.NONE"):
self.evaluate(v.read_value())
@combinations.generate(strategy_with_var_policy())
def testInitializedToSameValueInsideEagerRun(self, distribution):
if not context.executing_eagerly(): self.skipTest("eager only")
v = [None]
@def_function.function
def step():
def f():
if v[0] is None:
v[0] = variables_lib.Variable(
random_ops.random_normal([]),
synchronization=variables_lib.VariableSynchronization.ON_READ)
distribution.run(f)
context.set_global_seed(None)
step()
vals = self.evaluate(v[0].values)
self.assertAllEqual(vals[0], vals[1])
@combinations.generate(strategy_with_var_policy())
def testOperatorOverride(self, distribution):
with distribution.scope():
v = variable_scope.variable(
0.0,
synchronization=variables_lib.VariableSynchronization.ON_READ,
aggregation=variables_lib.VariableAggregation.MEAN)
self.evaluate(variables_lib.global_variables_initializer())
@def_function.function
def assign():
ctx = ds_context.get_replica_context()
replica_id = ctx.replica_id_in_sync_group
return v.assign(math_ops.cast(replica_id, dtypes.float32))
# Assign different replicas with different values.
self.evaluate(distribution.experimental_local_results(
distribution.run(assign)))
self.assertEqual(1.5, self.evaluate(v + 1))
@def_function.function
def add():
return v + 1
per_replica_results = self.evaluate(
distribution.experimental_local_results(distribution.run(add)))
self.assertAllEqual([1, 2], per_replica_results)
@combinations.generate(
combinations.combine(
strategy=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
strategy_combinations.tpu_strategy,
strategy_combinations.tpu_strategy_packed_var,
strategy_combinations.multi_worker_mirrored_2x1_cpu,
strategy_combinations.multi_worker_mirrored_2x1_gpu,
],
mode=["eager"],
use_var_policy=[True, False]))
def testSaveAndRestoreOnRead(self, strategy):
aggregation = [variable_scope.VariableAggregation.SUM,
variable_scope.VariableAggregation.MEAN]
for agg in aggregation:
v_normal_restore = variables_lib.Variable(1.0)
v_normal_save = variables_lib.Variable(2.0)
with strategy.scope():
v_on_read = variables_lib.Variable(
1.0, synchronization=variable_scope.VariableSynchronization.ON_READ,
aggregation=agg)
@def_function.function
def assign_fn():
cluster_resolver = strategy.cluster_resolver
replica_ctx = ds_context.get_replica_context()
if ((cluster_resolver and cluster_resolver.task_type == "worker") or
math_ops.equal(replica_ctx.replica_id_in_sync_group,
constant_op.constant(1))):
v_on_read.assign(3.) # pylint:disable=cell-var-from-loop
else:
v_on_read.assign(4.) # pylint:disable=cell-var-from-loop
strategy.run(assign_fn)
# Save ONREAD, restore ONREAD
# Saves v[0] + v[1] = 7 for SUM and 3.5 for MEAN.
ckpt = trackable_utils.Checkpoint(var=v_on_read)
manager = ckpt_manager.CheckpointManager(
ckpt, "/tmp/ckpt_" + str(uuid.uuid4()), max_to_keep=None)
manager.save()
# Restores a value of 7/2 = 3.5 for SUM and 3.5 for MEAN.
ckpt.restore(manager.latest_checkpoint)
self.assertEqual(3.5, self.evaluate(v_on_read._values[0]))
# Save ONREAD, restore normal
ckpt_normal = trackable_utils.Checkpoint(var=v_normal_restore)
ckpt_normal.restore(manager.latest_checkpoint)
if agg == variable_scope.VariableAggregation.SUM:
self.assertEqual(7.0, self.evaluate(v_normal_restore.read_value()))
else:
self.assertEqual(3.5, self.evaluate(v_normal_restore.read_value()))
# Save normal, restore ONREAD
ckpt = trackable_utils.Checkpoint(var=v_normal_save)
manager = ckpt_manager.CheckpointManager(
ckpt, "/tmp/ckpt_" + str(uuid.uuid4()), max_to_keep=None)
manager.save()
# Restores a value of 2/2 = 1.0 for SUM and 2.0 for MEAN.
ckpt_on_read = trackable_utils.Checkpoint(var=v_on_read)
ckpt_on_read.restore(manager.latest_checkpoint)
if agg == variable_scope.VariableAggregation.SUM:
self.assertEqual(1.0, self.evaluate(v_on_read._values[0]))
else:
self.assertEqual(2.0, self.evaluate(v_on_read._values[0]))
@combinations.generate(
combinations.combine(
distribution=[
strategy_combinations.mirrored_strategy_with_gpu_and_cpu,
],
aggregation=[
variables_lib.VariableAggregation.MEAN,
variables_lib.VariableAggregation.SUM,
variables_lib.VariableAggregation.ONLY_FIRST_REPLICA,
],
mode=["graph", "eager"],
use_var_policy=[True, False]))
class SyncOnReadScatterReplicaTest(test.TestCase, parameterized.TestCase):
def testScatterSub(self, distribution, aggregation):
with distribution.scope():
v = variables_lib.Variable(
[1., 1., 1.],
synchronization=variables_lib.VariableSynchronization.ON_READ,
aggregation=aggregation)
self.evaluate(v.initializer)
delta = values.PerReplica([
indexed_slices.IndexedSlices(
values=[[0.], [1.]], indices=[0, 1], dense_shape=(3,)),
indexed_slices.IndexedSlices(
values=[[1.], [2.]], indices=[1, 2], dense_shape=(3,)),
])
with self.assertRaises(NotImplementedError):
self.evaluate(distribution.run(v.scatter_sub, args=(delta,)))
def testScatterAdd(self, distribution, aggregation):
with distribution.scope():
v = variables_lib.Variable(
[1., 1., 1.],
synchronization=variables_lib.VariableSynchronization.ON_READ,
aggregation=aggregation)
self.evaluate(v.initializer)
delta = values.PerReplica([
indexed_slices.IndexedSlices(
values=[[0.], [1.]], indices=[0, 1], dense_shape=(3,)),
indexed_slices.IndexedSlices(
values=[[1.], [2.]], indices=[1, 2], dense_shape=(3,)),
])
with self.assertRaises(NotImplementedError):
self.evaluate(distribution.run(v.scatter_add, args=(delta,)))
def testScatterDiv(self, distribution, aggregation):
with distribution.scope():
v = variables_lib.Variable(
[2., 6., 1.],
synchronization=variables_lib.VariableSynchronization.ON_READ,
aggregation=aggregation)
self.evaluate(v.initializer)
delta = values.PerReplica([
indexed_slices.IndexedSlices(
values=[[2.], [2.]], indices=[0, 1], dense_shape=(3,)),
indexed_slices.IndexedSlices(
values=[[3.], [3.]], indices=[1, 2], dense_shape=(3,)),
])
with self.assertRaises(NotImplementedError):
self.evaluate(distribution.run(v.scatter_div, args=(delta,)))
def testScatterMul(self, distribution, aggregation):
with distribution.scope():
v = variables_lib.Variable(
[2., 1., 1.],
synchronization=variables_lib.VariableSynchronization.ON_READ,
aggregation=aggregation)
self.evaluate(v.initializer)
delta = values.PerReplica([
indexed_slices.IndexedSlices(
values=[[2.], [3.]], indices=[0, 1], dense_shape=(3,)),
indexed_slices.IndexedSlices(
values=[[4.], [5.]], indices=[1, 2], dense_shape=(3,)),
])
with self.assertRaises(NotImplementedError):
self.evaluate(distribution.run(v.scatter_mul, args=(delta,)))
def testScatterMin(self, distribution, aggregation):
with distribution.scope():
v = variables_lib.Variable(
[3., 4., 5.],
synchronization=variables_lib.VariableSynchronization.ON_READ,
aggregation=aggregation)
self.evaluate(v.initializer)
delta = values.PerReplica([
indexed_slices.IndexedSlices(
values=[[1.], [8.]], indices=[0, 1], dense_shape=(3,)),
indexed_slices.IndexedSlices(
values=[[9.], [2.]], indices=[1, 2], dense_shape=(3,)),
])
with self.assertRaises(NotImplementedError):
self.evaluate(distribution.run(v.scatter_min, args=(delta,)))
def testScatterMax(self, distribution, aggregation):
with distribution.scope():
v = variables_lib.Variable(
[3., 4., 5.],
synchronization=variables_lib.VariableSynchronization.ON_READ,
aggregation=aggregation)
self.evaluate(v.initializer)
delta = values.PerReplica([
indexed_slices.IndexedSlices(
values=[[1.], [8.]], indices=[0, 1], dense_shape=(3,)),
indexed_slices.IndexedSlices(
values=[[9.], [2.]], indices=[1, 2], dense_shape=(3,)),
])
with self.assertRaises(NotImplementedError):
self.evaluate(distribution.run(v.scatter_max, args=(delta,)))
def testScatterUpdate(self, distribution, aggregation):
with distribution.scope():
v = variables_lib.Variable(
[0., 0., 0.],
synchronization=variables_lib.VariableSynchronization.ON_READ,
aggregation=aggregation)
self.evaluate(v.initializer)
delta = values.PerReplica([
indexed_slices.IndexedSlices(
values=[[1.], [2.]], indices=[0, 1], dense_shape=(3,)),
indexed_slices.IndexedSlices(
values=[[3.], [4.]], indices=[1, 2], dense_shape=(3,)),
])
with self.assertRaises(NotImplementedError):
self.evaluate(distribution.run(v.scatter_min, args=(delta,)))
if __name__ == "__main__":
combinations.main()
| apache-2.0 |
admed/molgears | model/auth.py | 1 | 7884 | # -*- coding: utf-8 -*-
"""
Auth* related model.
This is where the models used by :mod:`repoze.who` and :mod:`repoze.what` are
defined.
It's perfectly fine to re-use this definition in the molgears application,
though.
"""
import os
from datetime import datetime
import sys
try:
from hashlib import sha256
except ImportError:
sys.exit('ImportError: No module named hashlib\n'
'If you are on python2.4 this library is not part of python. '
'Please install it. Example: easy_install hashlib')
__all__ = ['User', 'Group', 'Permission']
from sqlalchemy import Table, ForeignKey, Column
from sqlalchemy.types import Unicode, Integer, DateTime, Text
from sqlalchemy.orm import relation, synonym, relationship
from molgears.model import DeclarativeBase, metadata, DBSession
#{ Association tables
# This is the association table for the many-to-many relationship between
# groups and permissions. This is required by repoze.what.
group_permission_table = Table('tg_group_permission', metadata,
Column('group_id', Integer, ForeignKey('tg_group.group_id',
onupdate="CASCADE", ondelete="CASCADE"), primary_key=True),
Column('permission_id', Integer, ForeignKey('tg_permission.permission_id',
onupdate="CASCADE", ondelete="CASCADE"), primary_key=True)
)
# This is the association table for the many-to-many relationship between
# groups and members - this is, the memberships. It's required by repoze.what.
user_group_table = Table('tg_user_group', metadata,
Column('user_id', Integer, ForeignKey('tg_user.user_id',
onupdate="CASCADE", ondelete="CASCADE"), primary_key=True),
Column('group_id', Integer, ForeignKey('tg_group.group_id',
onupdate="CASCADE", ondelete="CASCADE"), primary_key=True)
)
permit_users_table = Table('permit_users', metadata,
Column('user_id', Integer, ForeignKey('tg_user.user_id',
onupdate="CASCADE", ondelete="CASCADE"), primary_key=True),
Column('group_id', Integer, ForeignKey('tg_user_lists.id',
onupdate="CASCADE", ondelete="CASCADE"), primary_key=True)
)
#{ The auth* model itself
class UserLists(DeclarativeBase):
__tablename__ = 'tg_user_lists'
id = Column(Integer, autoincrement=True, primary_key=True)
name = Column(Unicode)
table = Column(Unicode)
notes = Column(Text)
elements = Column(Unicode)
pname = Column(Unicode)
permitusers = relation('User', secondary=permit_users_table, backref='tg_user_lists')
tg_user_id = Column(Integer, ForeignKey('tg_user.user_id'))
class Group(DeclarativeBase):
"""
Group definition for :mod:`repoze.what`.
Only the ``group_name`` column is required by :mod:`repoze.what`.
"""
__tablename__ = 'tg_group'
#{ Columns
group_id = Column(Integer, autoincrement=True, primary_key=True)
group_name = Column(Unicode(16), unique=True, nullable=False)
display_name = Column(Unicode(255))
created = Column(DateTime, default=datetime.now)
#{ Relations
users = relation('User', secondary=user_group_table, backref='groups')
#{ Special methods
def __repr__(self):
return ('<Group: name=%s>' % self.group_name).encode('utf-8')
def __unicode__(self):
return self.group_name
#}
# The 'info' argument we're passing to the email_address and password columns
# contain metadata that Rum (http://python-rum.org/) can use generate an
# admin interface for your models.
class User(DeclarativeBase):
"""
User definition.
This is the user definition used by :mod:`repoze.who`, which requires at
least the ``user_name`` column.
"""
__tablename__ = 'tg_user'
#{ Columns
user_id = Column(Integer, autoincrement=True, primary_key=True)
user_name = Column(Unicode(36), unique=True, nullable=False)
email_address = Column(Unicode(255), unique=True, nullable=False,
info={'rum': {'field':'Email'}})
display_name = Column(Unicode(255))
_password = Column('password', Unicode(128),
info={'rum': {'field':'Password'}})
created = Column(DateTime, default=datetime.now)
items_per_page = Column(Integer, default=30)
limit_sim = Column(Integer, default=30)
threshold = Column(Integer, default=35)
lists = relationship("UserLists", backref="tg_user", order_by="UserLists.id")
#{ Special methods
def __repr__(self):
return ('<User: name=%s, email=%s, display=%s>' % (
self.user_name, self.email_address, self.display_name)).encode('utf-8')
def __unicode__(self):
return self.display_name or self.user_name
#{ Getters and setters
@property
def permissions(self):
"""Return a set with all permissions granted to the user."""
perms = set()
for g in self.groups:
perms = perms | set(g.permissions)
return perms
@classmethod
def by_email_address(cls, email):
"""Return the user object whose email address is ``email``."""
return DBSession.query(cls).filter_by(email_address=email).first()
@classmethod
def by_user_name(cls, username):
"""Return the user object whose user name is ``username``."""
return DBSession.query(cls).filter_by(user_name=username).first()
@classmethod
def _hash_password(cls, password):
# Make sure password is a str because we cannot hash unicode objects
if isinstance(password, unicode):
password = password.encode('utf-8')
salt = sha256()
salt.update(os.urandom(60))
hash = sha256()
hash.update(password + salt.hexdigest())
password = salt.hexdigest() + hash.hexdigest()
# Make sure the hashed password is a unicode object at the end of the
# process because SQLAlchemy _wants_ unicode objects for Unicode cols
if not isinstance(password, unicode):
password = password.decode('utf-8')
return password
def _set_password(self, password):
"""Hash ``password`` on the fly and store its hashed version."""
self._password = self._hash_password(password)
def _get_password(self):
"""Return the hashed version of the password."""
return self._password
password = synonym('_password', descriptor=property(_get_password,
_set_password))
#}
def validate_password(self, password):
"""
Check the password against existing credentials.
:param password: the password that was provided by the user to
try and authenticate. This is the clear text version that we will
need to match against the hashed one in the database.
:type password: unicode object.
:return: Whether the password is valid.
:rtype: bool
"""
hash = sha256()
if isinstance(password, unicode):
password = password.encode('utf-8')
hash.update(password + str(self.password[:64]))
return self.password[64:] == hash.hexdigest()
class Permission(DeclarativeBase):
"""
Permission definition for :mod:`repoze.what`.
Only the ``permission_name`` column is required by :mod:`repoze.what`.
"""
__tablename__ = 'tg_permission'
#{ Columns
permission_id = Column(Integer, autoincrement=True, primary_key=True)
permission_name = Column(Unicode(63), unique=True, nullable=False)
description = Column(Unicode(255))
#{ Relations
groups = relation(Group, secondary=group_permission_table,
backref='permissions')
#{ Special methods
def __repr__(self):
return ('<Permission: name=%s>' % self.permission_name).encode('utf-8')
def __unicode__(self):
return self.permission_name
#}
#}
| bsd-3-clause |
ychen820/microblog | flask/lib/python2.7/site-packages/sqlalchemy/orm/descriptor_props.py | 21 | 24455 | # orm/descriptor_props.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Descriptor properties are more "auxiliary" properties
that exist as configurational elements, but don't participate
as actively in the load/persist ORM loop.
"""
from .interfaces import MapperProperty, PropComparator
from .util import _none_set
from . import attributes
from .. import util, sql, exc as sa_exc, event, schema
from ..sql import expression
from . import properties
from . import query
class DescriptorProperty(MapperProperty):
""":class:`.MapperProperty` which proxies access to a
user-defined descriptor."""
doc = None
def instrument_class(self, mapper):
prop = self
class _ProxyImpl(object):
accepts_scalar_loader = False
expire_missing = True
collection = False
def __init__(self, key):
self.key = key
if hasattr(prop, 'get_history'):
def get_history(self, state, dict_,
passive=attributes.PASSIVE_OFF):
return prop.get_history(state, dict_, passive)
if self.descriptor is None:
desc = getattr(mapper.class_, self.key, None)
if mapper._is_userland_descriptor(desc):
self.descriptor = desc
if self.descriptor is None:
def fset(obj, value):
setattr(obj, self.name, value)
def fdel(obj):
delattr(obj, self.name)
def fget(obj):
return getattr(obj, self.name)
self.descriptor = property(
fget=fget,
fset=fset,
fdel=fdel,
)
proxy_attr = attributes.create_proxied_attribute(
self.descriptor)(
self.parent.class_,
self.key,
self.descriptor,
lambda: self._comparator_factory(mapper),
doc=self.doc,
original_property=self
)
proxy_attr.impl = _ProxyImpl(self.key)
mapper.class_manager.instrument_attribute(self.key, proxy_attr)
@util.langhelpers.dependency_for("sqlalchemy.orm.properties")
class CompositeProperty(DescriptorProperty):
"""Defines a "composite" mapped attribute, representing a collection
of columns as one attribute.
:class:`.CompositeProperty` is constructed using the :func:`.composite`
function.
.. seealso::
:ref:`mapper_composite`
"""
def __init__(self, class_, *attrs, **kwargs):
"""Return a composite column-based property for use with a Mapper.
See the mapping documentation section :ref:`mapper_composite` for a
full usage example.
The :class:`.MapperProperty` returned by :func:`.composite`
is the :class:`.CompositeProperty`.
:param class\_:
The "composite type" class.
:param \*cols:
List of Column objects to be mapped.
:param active_history=False:
When ``True``, indicates that the "previous" value for a
scalar attribute should be loaded when replaced, if not
already loaded. See the same flag on :func:`.column_property`.
.. versionchanged:: 0.7
This flag specifically becomes meaningful
- previously it was a placeholder.
:param group:
A group name for this property when marked as deferred.
:param deferred:
When True, the column property is "deferred", meaning that it does
not load immediately, and is instead loaded when the attribute is
first accessed on an instance. See also
:func:`~sqlalchemy.orm.deferred`.
:param comparator_factory: a class which extends
:class:`.CompositeProperty.Comparator` which provides custom SQL
clause generation for comparison operations.
:param doc:
optional string that will be applied as the doc on the
class-bound descriptor.
:param info: Optional data dictionary which will be populated into the
:attr:`.MapperProperty.info` attribute of this object.
.. versionadded:: 0.8
:param extension:
an :class:`.AttributeExtension` instance,
or list of extensions, which will be prepended to the list of
attribute listeners for the resulting descriptor placed on the
class. **Deprecated.** Please see :class:`.AttributeEvents`.
"""
self.attrs = attrs
self.composite_class = class_
self.active_history = kwargs.get('active_history', False)
self.deferred = kwargs.get('deferred', False)
self.group = kwargs.get('group', None)
self.comparator_factory = kwargs.pop('comparator_factory',
self.__class__.Comparator)
if 'info' in kwargs:
self.info = kwargs.pop('info')
util.set_creation_order(self)
self._create_descriptor()
def instrument_class(self, mapper):
super(CompositeProperty, self).instrument_class(mapper)
self._setup_event_handlers()
def do_init(self):
"""Initialization which occurs after the :class:`.CompositeProperty`
has been associated with its parent mapper.
"""
self._setup_arguments_on_columns()
def _create_descriptor(self):
"""Create the Python descriptor that will serve as
the access point on instances of the mapped class.
"""
def fget(instance):
dict_ = attributes.instance_dict(instance)
state = attributes.instance_state(instance)
if self.key not in dict_:
# key not present. Iterate through related
# attributes, retrieve their values. This
# ensures they all load.
values = [
getattr(instance, key)
for key in self._attribute_keys
]
# current expected behavior here is that the composite is
# created on access if the object is persistent or if
# col attributes have non-None. This would be better
# if the composite were created unconditionally,
# but that would be a behavioral change.
if self.key not in dict_ and (
state.key is not None or
not _none_set.issuperset(values)
):
dict_[self.key] = self.composite_class(*values)
state.manager.dispatch.refresh(state, None, [self.key])
return dict_.get(self.key, None)
def fset(instance, value):
dict_ = attributes.instance_dict(instance)
state = attributes.instance_state(instance)
attr = state.manager[self.key]
previous = dict_.get(self.key, attributes.NO_VALUE)
for fn in attr.dispatch.set:
value = fn(state, value, previous, attr.impl)
dict_[self.key] = value
if value is None:
for key in self._attribute_keys:
setattr(instance, key, None)
else:
for key, value in zip(
self._attribute_keys,
value.__composite_values__()):
setattr(instance, key, value)
def fdel(instance):
state = attributes.instance_state(instance)
dict_ = attributes.instance_dict(instance)
previous = dict_.pop(self.key, attributes.NO_VALUE)
attr = state.manager[self.key]
attr.dispatch.remove(state, previous, attr.impl)
for key in self._attribute_keys:
setattr(instance, key, None)
self.descriptor = property(fget, fset, fdel)
@util.memoized_property
def _comparable_elements(self):
return [
getattr(self.parent.class_, prop.key)
for prop in self.props
]
@util.memoized_property
def props(self):
props = []
for attr in self.attrs:
if isinstance(attr, str):
prop = self.parent.get_property(
attr, _configure_mappers=False)
elif isinstance(attr, schema.Column):
prop = self.parent._columntoproperty[attr]
elif isinstance(attr, attributes.InstrumentedAttribute):
prop = attr.property
else:
raise sa_exc.ArgumentError(
"Composite expects Column objects or mapped "
"attributes/attribute names as arguments, got: %r"
% (attr,))
props.append(prop)
return props
@property
def columns(self):
return [a for a in self.attrs if isinstance(a, schema.Column)]
def _setup_arguments_on_columns(self):
"""Propagate configuration arguments made on this composite
to the target columns, for those that apply.
"""
for prop in self.props:
prop.active_history = self.active_history
if self.deferred:
prop.deferred = self.deferred
prop.strategy_class = prop._strategy_lookup(
("deferred", True),
("instrument", True))
prop.group = self.group
def _setup_event_handlers(self):
"""Establish events that populate/expire the composite attribute."""
def load_handler(state, *args):
dict_ = state.dict
if self.key in dict_:
return
# if column elements aren't loaded, skip.
# __get__() will initiate a load for those
# columns
for k in self._attribute_keys:
if k not in dict_:
return
# assert self.key not in dict_
dict_[self.key] = self.composite_class(
*[state.dict[key] for key in
self._attribute_keys]
)
def expire_handler(state, keys):
if keys is None or set(self._attribute_keys).intersection(keys):
state.dict.pop(self.key, None)
def insert_update_handler(mapper, connection, state):
"""After an insert or update, some columns may be expired due
to server side defaults, or re-populated due to client side
defaults. Pop out the composite value here so that it
recreates.
"""
state.dict.pop(self.key, None)
event.listen(self.parent, 'after_insert',
insert_update_handler, raw=True)
event.listen(self.parent, 'after_update',
insert_update_handler, raw=True)
event.listen(self.parent, 'load',
load_handler, raw=True, propagate=True)
event.listen(self.parent, 'refresh',
load_handler, raw=True, propagate=True)
event.listen(self.parent, 'expire',
expire_handler, raw=True, propagate=True)
# TODO: need a deserialize hook here
@util.memoized_property
def _attribute_keys(self):
return [
prop.key for prop in self.props
]
def get_history(self, state, dict_, passive=attributes.PASSIVE_OFF):
"""Provided for userland code that uses attributes.get_history()."""
added = []
deleted = []
has_history = False
for prop in self.props:
key = prop.key
hist = state.manager[key].impl.get_history(state, dict_)
if hist.has_changes():
has_history = True
non_deleted = hist.non_deleted()
if non_deleted:
added.extend(non_deleted)
else:
added.append(None)
if hist.deleted:
deleted.extend(hist.deleted)
else:
deleted.append(None)
if has_history:
return attributes.History(
[self.composite_class(*added)],
(),
[self.composite_class(*deleted)]
)
else:
return attributes.History(
(), [self.composite_class(*added)], ()
)
def _comparator_factory(self, mapper):
return self.comparator_factory(self, mapper)
class CompositeBundle(query.Bundle):
def __init__(self, property, expr):
self.property = property
super(CompositeProperty.CompositeBundle, self).__init__(
property.key, *expr)
def create_row_processor(self, query, procs, labels):
def proc(row, result):
return self.property.composite_class(
*[proc(row, result) for proc in procs])
return proc
class Comparator(PropComparator):
"""Produce boolean, comparison, and other operators for
:class:`.CompositeProperty` attributes.
See the example in :ref:`composite_operations` for an overview
of usage , as well as the documentation for :class:`.PropComparator`.
See also:
:class:`.PropComparator`
:class:`.ColumnOperators`
:ref:`types_operators`
:attr:`.TypeEngine.comparator_factory`
"""
__hash__ = None
@property
def clauses(self):
return self.__clause_element__()
def __clause_element__(self):
return expression.ClauseList(
group=False, *self._comparable_elements)
def _query_clause_element(self):
return CompositeProperty.CompositeBundle(
self.prop, self.__clause_element__())
@util.memoized_property
def _comparable_elements(self):
if self._adapt_to_entity:
return [
getattr(
self._adapt_to_entity.entity,
prop.key
) for prop in self.prop._comparable_elements
]
else:
return self.prop._comparable_elements
def __eq__(self, other):
if other is None:
values = [None] * len(self.prop._comparable_elements)
else:
values = other.__composite_values__()
comparisons = [
a == b
for a, b in zip(self.prop._comparable_elements, values)
]
if self._adapt_to_entity:
comparisons = [self.adapter(x) for x in comparisons]
return sql.and_(*comparisons)
def __ne__(self, other):
return sql.not_(self.__eq__(other))
def __str__(self):
return str(self.parent.class_.__name__) + "." + self.key
@util.langhelpers.dependency_for("sqlalchemy.orm.properties")
class ConcreteInheritedProperty(DescriptorProperty):
"""A 'do nothing' :class:`.MapperProperty` that disables
an attribute on a concrete subclass that is only present
on the inherited mapper, not the concrete classes' mapper.
Cases where this occurs include:
* When the superclass mapper is mapped against a
"polymorphic union", which includes all attributes from
all subclasses.
* When a relationship() is configured on an inherited mapper,
but not on the subclass mapper. Concrete mappers require
that relationship() is configured explicitly on each
subclass.
"""
def _comparator_factory(self, mapper):
comparator_callable = None
for m in self.parent.iterate_to_root():
p = m._props[self.key]
if not isinstance(p, ConcreteInheritedProperty):
comparator_callable = p.comparator_factory
break
return comparator_callable
def __init__(self):
def warn():
raise AttributeError("Concrete %s does not implement "
"attribute %r at the instance level. Add "
"this property explicitly to %s." %
(self.parent, self.key, self.parent))
class NoninheritedConcreteProp(object):
def __set__(s, obj, value):
warn()
def __delete__(s, obj):
warn()
def __get__(s, obj, owner):
if obj is None:
return self.descriptor
warn()
self.descriptor = NoninheritedConcreteProp()
@util.langhelpers.dependency_for("sqlalchemy.orm.properties")
class SynonymProperty(DescriptorProperty):
def __init__(self, name, map_column=None,
descriptor=None, comparator_factory=None,
doc=None):
"""Denote an attribute name as a synonym to a mapped property,
in that the attribute will mirror the value and expression behavior
of another attribute.
:param name: the name of the existing mapped property. This
can refer to the string name of any :class:`.MapperProperty`
configured on the class, including column-bound attributes
and relationships.
:param descriptor: a Python :term:`descriptor` that will be used
as a getter (and potentially a setter) when this attribute is
accessed at the instance level.
:param map_column: if ``True``, the :func:`.synonym` construct will
locate the existing named :class:`.MapperProperty` based on the
attribute name of this :func:`.synonym`, and assign it to a new
attribute linked to the name of this :func:`.synonym`.
That is, given a mapping like::
class MyClass(Base):
__tablename__ = 'my_table'
id = Column(Integer, primary_key=True)
job_status = Column(String(50))
job_status = synonym("_job_status", map_column=True)
The above class ``MyClass`` will now have the ``job_status``
:class:`.Column` object mapped to the attribute named
``_job_status``, and the attribute named ``job_status`` will refer
to the synonym itself. This feature is typically used in
conjunction with the ``descriptor`` argument in order to link a
user-defined descriptor as a "wrapper" for an existing column.
:param comparator_factory: A subclass of :class:`.PropComparator`
that will provide custom comparison behavior at the SQL expression
level.
.. note::
For the use case of providing an attribute which redefines both
Python-level and SQL-expression level behavior of an attribute,
please refer to the Hybrid attribute introduced at
:ref:`mapper_hybrids` for a more effective technique.
.. seealso::
:ref:`synonyms` - examples of functionality.
:ref:`mapper_hybrids` - Hybrids provide a better approach for
more complicated attribute-wrapping schemes than synonyms.
"""
self.name = name
self.map_column = map_column
self.descriptor = descriptor
self.comparator_factory = comparator_factory
self.doc = doc or (descriptor and descriptor.__doc__) or None
util.set_creation_order(self)
# TODO: when initialized, check _proxied_property,
# emit a warning if its not a column-based property
@util.memoized_property
def _proxied_property(self):
return getattr(self.parent.class_, self.name).property
def _comparator_factory(self, mapper):
prop = self._proxied_property
if self.comparator_factory:
comp = self.comparator_factory(prop, mapper)
else:
comp = prop.comparator_factory(prop, mapper)
return comp
def set_parent(self, parent, init):
if self.map_column:
# implement the 'map_column' option.
if self.key not in parent.mapped_table.c:
raise sa_exc.ArgumentError(
"Can't compile synonym '%s': no column on table "
"'%s' named '%s'"
% (self.name, parent.mapped_table.description, self.key))
elif parent.mapped_table.c[self.key] in \
parent._columntoproperty and \
parent._columntoproperty[
parent.mapped_table.c[self.key]
].key == self.name:
raise sa_exc.ArgumentError(
"Can't call map_column=True for synonym %r=%r, "
"a ColumnProperty already exists keyed to the name "
"%r for column %r" %
(self.key, self.name, self.name, self.key)
)
p = properties.ColumnProperty(parent.mapped_table.c[self.key])
parent._configure_property(
self.name, p,
init=init,
setparent=True)
p._mapped_by_synonym = self.key
self.parent = parent
@util.langhelpers.dependency_for("sqlalchemy.orm.properties")
class ComparableProperty(DescriptorProperty):
"""Instruments a Python property for use in query expressions."""
def __init__(self, comparator_factory, descriptor=None, doc=None):
"""Provides a method of applying a :class:`.PropComparator`
to any Python descriptor attribute.
.. versionchanged:: 0.7
:func:`.comparable_property` is superseded by
the :mod:`~sqlalchemy.ext.hybrid` extension. See the example
at :ref:`hybrid_custom_comparators`.
Allows any Python descriptor to behave like a SQL-enabled
attribute when used at the class level in queries, allowing
redefinition of expression operator behavior.
In the example below we redefine :meth:`.PropComparator.operate`
to wrap both sides of an expression in ``func.lower()`` to produce
case-insensitive comparison::
from sqlalchemy.orm import comparable_property
from sqlalchemy.orm.interfaces import PropComparator
from sqlalchemy.sql import func
from sqlalchemy import Integer, String, Column
from sqlalchemy.ext.declarative import declarative_base
class CaseInsensitiveComparator(PropComparator):
def __clause_element__(self):
return self.prop
def operate(self, op, other):
return op(
func.lower(self.__clause_element__()),
func.lower(other)
)
Base = declarative_base()
class SearchWord(Base):
__tablename__ = 'search_word'
id = Column(Integer, primary_key=True)
word = Column(String)
word_insensitive = comparable_property(lambda prop, mapper:
CaseInsensitiveComparator(
mapper.c.word, mapper)
)
A mapping like the above allows the ``word_insensitive`` attribute
to render an expression like::
>>> print SearchWord.word_insensitive == "Trucks"
lower(search_word.word) = lower(:lower_1)
:param comparator_factory:
A PropComparator subclass or factory that defines operator behavior
for this property.
:param descriptor:
Optional when used in a ``properties={}`` declaration. The Python
descriptor or property to layer comparison behavior on top of.
The like-named descriptor will be automatically retrieved from the
mapped class if left blank in a ``properties`` declaration.
"""
self.descriptor = descriptor
self.comparator_factory = comparator_factory
self.doc = doc or (descriptor and descriptor.__doc__) or None
util.set_creation_order(self)
def _comparator_factory(self, mapper):
return self.comparator_factory(self, mapper)
| bsd-3-clause |
noironetworks/group-based-policy | gbpservice/neutron/db/implicitsubnetpool_db.py | 1 | 6324 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import sqlalchemy as sa
from sqlalchemy import orm
from sqlalchemy import sql
from neutron.db import models_v2
from neutron_lib.api.definitions import subnetpool as subnetpool_def
from neutron_lib.api import validators
from neutron_lib.db import model_base
from neutron_lib.db import model_query
from neutron_lib.db import resource_extend
from neutron_lib import exceptions as n_exc
from gbpservice._i18n import _
def _subnetpool_model_hook(context, original_model, query):
query = query.outerjoin(ImplicitSubnetpool,
(original_model.id == ImplicitSubnetpool.subnetpool_id))
return query
def _subnetpool_filter_hook(context, original_model, conditions):
return conditions
def _subnetpool_result_filter_hook(query, filters):
vals = filters and filters.get('is_implicit', [])
if not vals:
return query
return query.filter((ImplicitSubnetpool.is_implicit.in_(vals)))
class ImplicitSubnetpool(model_base.BASEV2):
__tablename__ = "implicit_subnetpools"
subnetpool_id = sa.Column(sa.String(36),
sa.ForeignKey('subnetpools.id',
ondelete="CASCADE"),
primary_key=True)
is_implicit = sa.Column(sa.Boolean, nullable=False,
server_default=sql.false())
subnetpool = orm.relationship(
models_v2.SubnetPool,
backref=orm.backref("implicit",
lazy="joined", cascade="delete"))
@resource_extend.has_resource_extenders
class ImplicitSubnetpoolMixin(object):
"""Mixin class for implicit subnetpool."""
def __new__(cls, *args, **kwargs):
model_query.register_hook(
models_v2.SubnetPool,
"implicit_subnetpool",
query_hook=_subnetpool_model_hook,
filter_hook=_subnetpool_filter_hook,
result_filters=_subnetpool_result_filter_hook)
return super(ImplicitSubnetpoolMixin, cls).__new__(
cls, *args, **kwargs)
def get_implicit_subnetpool_id(self, context, tenant=None, ip_version="4"):
pool = self.get_implicit_subnetpool(context, tenant=tenant,
ip_version=ip_version)
return pool['id'] if pool else None
def get_implicit_subnetpool(self, context, tenant=None, ip_version="4"):
pools = self._get_implicit_subnetpools(context, tenant=tenant,
ip_version=ip_version)
return pools[0] if pools else None
def _get_implicit_subnetpools(self, context, tenant=None, ip_version="4"):
admin_context = context.elevated()
filters = {"is_implicit": [True],
"ip_version": ip_version}
if tenant:
filters["tenant_id"] = [tenant]
else:
filters["shared"] = [True]
with context.session.begin(subtransactions=True):
return self.get_subnetpools(admin_context, filters)
def _get_implicit_subnetpool(self, context, subnetpool_id):
return (context.session.query(ImplicitSubnetpool).
filter_by(subnetpool_id=subnetpool_id)).first()
@staticmethod
@resource_extend.extends([subnetpool_def.COLLECTION_NAME])
def _extend_subnetpool_dict_implicit(subnetpool_res, subnetpool_db):
try:
subnetpool_res["is_implicit"] = (
subnetpool_db.implicit[0].is_implicit)
except (IndexError, AttributeError):
# is_implicit is not created yet when subnetpool is first added
# to the database
pass
return subnetpool_res
def update_implicit_subnetpool(self, context, subnetpool):
is_implicit = False
if validators.is_attr_set(subnetpool.get('is_implicit')):
is_implicit = subnetpool['is_implicit']
with context.session.begin(subtransactions=True):
if is_implicit:
# Verify feasibility. Only one implicit SP must exist per
# tenant (or global)
msg = _('There can be at most one implicit '
'subnetpool per address family per tenant.')
self._validate_implicit_subnetpool(
context, subnetpool['id'], tenant=subnetpool['tenant_id'],
msg=msg, ip_version=subnetpool['ip_version'])
if subnetpool['shared']:
# Check globally too
msg = _('There can be at most one global implicit '
'subnetpool per address family.')
self._validate_implicit_subnetpool(
context, subnetpool['id'],
tenant=None,
msg=msg, ip_version=subnetpool['ip_version'])
db_obj = self._get_implicit_subnetpool(
context, subnetpool['id'])
if db_obj:
db_obj.is_implicit = is_implicit
db_obj = db_obj or ImplicitSubnetpool(
subnetpool_id=subnetpool['id'],
is_implicit=is_implicit)
context.session.add(db_obj)
return is_implicit
def _validate_implicit_subnetpool(self, context, subnetpool_id,
tenant=None, msg=None, ip_version="4"):
current_implicit_sp = self._get_implicit_subnetpools(
context, tenant=tenant, ip_version=ip_version)
if len(current_implicit_sp) > 1:
raise n_exc.BadRequest(resource='subnetpools', msg=msg)
if (len(current_implicit_sp) == 1 and
current_implicit_sp[0]['id'] != subnetpool_id):
raise n_exc.BadRequest(resource='subnetpools', msg=msg)
| apache-2.0 |
jwjohns/PyMySQL | pymysql/tests/thirdparty/test_MySQLdb/test_MySQLdb_capabilities.py | 46 | 3248 | #!/usr/bin/env python
from . import capabilities
try:
import unittest2 as unittest
except ImportError:
import unittest
import pymysql
from pymysql.tests import base
import warnings
warnings.filterwarnings('error')
class test_MySQLdb(capabilities.DatabaseTest):
db_module = pymysql
connect_args = ()
connect_kwargs = base.PyMySQLTestCase.databases[0].copy()
connect_kwargs.update(dict(read_default_file='~/.my.cnf',
use_unicode=True,
charset='utf8', sql_mode="ANSI,STRICT_TRANS_TABLES,TRADITIONAL"))
create_table_extra = "ENGINE=INNODB CHARACTER SET UTF8"
leak_test = False
def quote_identifier(self, ident):
return "`%s`" % ident
def test_TIME(self):
from datetime import timedelta
def generator(row,col):
return timedelta(0, row*8000)
self.check_data_integrity(
('col1 TIME',),
generator)
def test_TINYINT(self):
# Number data
def generator(row,col):
v = (row*row) % 256
if v > 127:
v = v-256
return v
self.check_data_integrity(
('col1 TINYINT',),
generator)
def test_stored_procedures(self):
db = self.connection
c = self.cursor
try:
self.create_table(('pos INT', 'tree CHAR(20)'))
c.executemany("INSERT INTO %s (pos,tree) VALUES (%%s,%%s)" % self.table,
list(enumerate('ash birch cedar larch pine'.split())))
db.commit()
c.execute("""
CREATE PROCEDURE test_sp(IN t VARCHAR(255))
BEGIN
SELECT pos FROM %s WHERE tree = t;
END
""" % self.table)
db.commit()
c.callproc('test_sp', ('larch',))
rows = c.fetchall()
self.assertEqual(len(rows), 1)
self.assertEqual(rows[0][0], 3)
c.nextset()
finally:
c.execute("DROP PROCEDURE IF EXISTS test_sp")
c.execute('drop table %s' % (self.table))
def test_small_CHAR(self):
# Character data
def generator(row,col):
i = ((row+1)*(col+1)+62)%256
if i == 62: return ''
if i == 63: return None
return chr(i)
self.check_data_integrity(
('col1 char(1)','col2 char(1)'),
generator)
def test_bug_2671682(self):
from pymysql.constants import ER
try:
self.cursor.execute("describe some_non_existent_table");
except self.connection.ProgrammingError as msg:
self.assertEqual(msg.args[0], ER.NO_SUCH_TABLE)
def test_ping(self):
self.connection.ping()
def test_literal_int(self):
self.assertTrue("2" == self.connection.literal(2))
def test_literal_float(self):
self.assertTrue("3.1415" == self.connection.literal(3.1415))
def test_literal_string(self):
self.assertTrue("'foo'" == self.connection.literal("foo"))
if __name__ == '__main__':
if test_MySQLdb.leak_test:
import gc
gc.enable()
gc.set_debug(gc.DEBUG_LEAK)
unittest.main()
| mit |
TheoRettisch/p2pool-hirocoin | p2pool/util/p2protocol.py | 216 | 4144 | '''
Generic message-based protocol used by Bitcoin and P2Pool for P2P communication
'''
import hashlib
import struct
from twisted.internet import protocol
from twisted.python import log
import p2pool
from p2pool.util import datachunker, variable
class TooLong(Exception):
pass
class Protocol(protocol.Protocol):
def __init__(self, message_prefix, max_payload_length, traffic_happened=variable.Event(), ignore_trailing_payload=False):
self._message_prefix = message_prefix
self._max_payload_length = max_payload_length
self.dataReceived2 = datachunker.DataChunker(self.dataReceiver())
self.traffic_happened = traffic_happened
self.ignore_trailing_payload = ignore_trailing_payload
def dataReceived(self, data):
self.traffic_happened.happened('p2p/in', len(data))
self.dataReceived2(data)
def dataReceiver(self):
while True:
start = ''
while start != self._message_prefix:
start = (start + (yield 1))[-len(self._message_prefix):]
command = (yield 12).rstrip('\0')
length, = struct.unpack('<I', (yield 4))
if length > self._max_payload_length:
print 'length too large'
continue
checksum = yield 4
payload = yield length
if hashlib.sha256(hashlib.sha256(payload).digest()).digest()[:4] != checksum:
print 'invalid hash for', self.transport.getPeer().host, repr(command), length, checksum.encode('hex')
if p2pool.DEBUG:
print hashlib.sha256(hashlib.sha256(payload).digest()).digest()[:4].encode('hex'), payload.encode('hex')
self.badPeerHappened()
continue
type_ = getattr(self, 'message_' + command, None)
if type_ is None:
if p2pool.DEBUG:
print 'no type for', repr(command)
continue
try:
self.packetReceived(command, type_.unpack(payload, self.ignore_trailing_payload))
except:
print 'RECV', command, payload[:100].encode('hex') + ('...' if len(payload) > 100 else '')
log.err(None, 'Error handling message: (see RECV line)')
self.disconnect()
def packetReceived(self, command, payload2):
handler = getattr(self, 'handle_' + command, None)
if handler is None:
if p2pool.DEBUG:
print 'no handler for', repr(command)
return
if getattr(self, 'connected', True) and not getattr(self, 'disconnecting', False):
handler(**payload2)
def disconnect(self):
if hasattr(self.transport, 'abortConnection'):
# Available since Twisted 11.1
self.transport.abortConnection()
else:
# This doesn't always close timed out connections! warned about in main
self.transport.loseConnection()
def badPeerHappened(self):
self.disconnect()
def sendPacket(self, command, payload2):
if len(command) >= 12:
raise ValueError('command too long')
type_ = getattr(self, 'message_' + command, None)
if type_ is None:
raise ValueError('invalid command')
#print 'SEND', command, repr(payload2)[:500]
payload = type_.pack(payload2)
if len(payload) > self._max_payload_length:
raise TooLong('payload too long')
data = self._message_prefix + struct.pack('<12sI', command, len(payload)) + hashlib.sha256(hashlib.sha256(payload).digest()).digest()[:4] + payload
self.traffic_happened.happened('p2p/out', len(data))
self.transport.write(data)
def __getattr__(self, attr):
prefix = 'send_'
if attr.startswith(prefix):
command = attr[len(prefix):]
return lambda **payload2: self.sendPacket(command, payload2)
#return protocol.Protocol.__getattr__(self, attr)
raise AttributeError(attr)
| gpl-3.0 |
ryanfobel/microdrop | docs/generate_modules.py | 21 | 9982 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
sphinx-autopackage-script
This script parses a directory tree looking for python modules and packages and
creates ReST files appropriately to create code documentation with Sphinx.
It also creates a modules index (named modules.<suffix>).
"""
# Copyright 2008 Société des arts technologiques (SAT), http://www.sat.qc.ca/
# Copyright 2010 Thomas Waldmann <tw AT waldmann-edv DOT de>
# All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import optparse
# automodule options
OPTIONS = ['members',
'undoc-members',
# 'inherited-members', # disabled because there's a bug in sphinx
'show-inheritance',
]
INIT = '__init__.py'
def makename(package, module):
"""Join package and module with a dot."""
# Both package and module can be None/empty.
if package:
name = package
if module:
name += '.' + module
else:
name = module
return name
def write_file(name, text, opts):
"""Write the output file for module/package <name>."""
if opts.dryrun:
return
fname = os.path.join(opts.destdir, "%s.%s" % (name, opts.suffix))
if not opts.force and os.path.isfile(fname):
print 'File %s already exists, skipping.' % fname
else:
print 'Creating file %s.' % fname
f = open(fname, 'w')
f.write(text)
f.close()
def format_heading(level, text):
"""Create a heading of <level> [1, 2 or 3 supported]."""
underlining = ['=', '-', '~', ][level-1] * len(text)
return '%s\n%s\n\n' % (text, underlining)
def format_directive(module, package=None):
"""Create the automodule directive and add the options."""
directive = '.. automodule:: %s\n' % makename(package, module)
for option in OPTIONS:
directive += ' :%s:\n' % option
return directive
def create_module_file(package, module, opts):
"""Build the text of the file and write the file."""
text = format_heading(1, '%s Module' % module)
text += format_heading(2, ':mod:`%s` Module' % module)
text += format_directive(module, package)
write_file(makename(package, module), text, opts)
def create_package_file(root, master_package, subroot, py_files, opts, subs):
"""Build the text of the file and write the file."""
package = os.path.split(root)[-1]
text = format_heading(1, '%s Package' % package)
# add each package's module
for py_file in py_files:
if shall_skip(os.path.join(root, py_file)):
continue
is_package = py_file == INIT
py_file = os.path.splitext(py_file)[0]
py_path = makename(subroot, py_file)
if is_package:
heading = ':mod:`%s` Package' % package
else:
heading = ':mod:`%s` Module' % py_file
text += format_heading(2, heading)
text += format_directive(is_package and subroot or py_path, master_package)
text += '\n'
# build a list of directories that are packages (they contain an INIT file)
subs = [sub for sub in subs if os.path.isfile(os.path.join(root, sub, INIT))]
# if there are some package directories, add a TOC for theses subpackages
if subs:
text += format_heading(2, 'Subpackages')
text += '.. toctree::\n\n'
for sub in subs:
text += ' %s.%s\n' % (makename(master_package, subroot), sub)
text += '\n'
write_file(makename(master_package, subroot), text, opts)
def create_modules_toc_file(master_package, modules, opts, name='modules'):
"""
Create the module's index.
"""
text = format_heading(1, '%s Modules' % opts.header)
text += '.. toctree::\n'
text += ' :maxdepth: %s\n\n' % opts.maxdepth
modules.sort()
prev_module = ''
for module in modules:
# look if the module is a subpackage and, if yes, ignore it
if module.startswith(prev_module + '.'):
continue
prev_module = module
text += ' %s\n' % module
write_file(name, text, opts)
def shall_skip(module):
"""
Check if we want to skip this module.
"""
# skip it, if there is nothing (or just \n or \r\n) in the file
return os.path.getsize(module) < 3
def recurse_tree(path, excludes, opts):
"""
Look for every file in the directory tree and create the corresponding
ReST files.
"""
# use absolute path for root, as relative paths like '../../foo' cause
# 'if "/." in root ...' to filter out *all* modules otherwise
path = os.path.abspath(path)
# check if the base directory is a package and get is name
if INIT in os.listdir(path):
package_name = path.split(os.path.sep)[-1]
else:
package_name = None
toc = []
tree = os.walk(path, False)
for root, subs, files in tree:
# keep only the Python script files
py_files = sorted([f for f in files if os.path.splitext(f)[1] == '.py'])
if INIT in py_files:
py_files.remove(INIT)
py_files.insert(0, INIT)
# remove hidden ('.') and private ('_') directories
subs = sorted([sub for sub in subs if sub[0] not in ['.', '_']])
# check if there are valid files to process
# TODO: could add check for windows hidden files
if "/." in root or "/_" in root \
or not py_files \
or is_excluded(root, excludes):
continue
if INIT in py_files:
# we are in package ...
if (# ... with subpackage(s)
subs
or
# ... with some module(s)
len(py_files) > 1
or
# ... with a not-to-be-skipped INIT file
not shall_skip(os.path.join(root, INIT))
):
subroot = root[len(path):].lstrip(os.path.sep).replace(os.path.sep, '.')
create_package_file(root, package_name, subroot, py_files, opts, subs)
toc.append(makename(package_name, subroot))
elif root == path:
# if we are at the root level, we don't require it to be a package
for py_file in py_files:
if not shall_skip(os.path.join(path, py_file)):
module = os.path.splitext(py_file)[0]
create_module_file(package_name, module, opts)
toc.append(makename(package_name, module))
# create the module's index
if not opts.notoc:
create_modules_toc_file(package_name, toc, opts)
def normalize_excludes(rootpath, excludes):
"""
Normalize the excluded directory list:
* must be either an absolute path or start with rootpath,
* otherwise it is joined with rootpath
* with trailing slash
"""
sep = os.path.sep
f_excludes = []
for exclude in excludes:
if not os.path.isabs(exclude) and not exclude.startswith(rootpath):
exclude = os.path.join(rootpath, exclude)
if not exclude.endswith(sep):
exclude += sep
f_excludes.append(exclude)
return f_excludes
def is_excluded(root, excludes):
"""
Check if the directory is in the exclude list.
Note: by having trailing slashes, we avoid common prefix issues, like
e.g. an exlude "foo" also accidentally excluding "foobar".
"""
sep = os.path.sep
if not root.endswith(sep):
root += sep
for exclude in excludes:
if root.startswith(exclude):
return True
return False
def main():
"""
Parse and check the command line arguments.
"""
parser = optparse.OptionParser(usage="""usage: %prog [options] <package path> [exclude paths, ...]
Note: By default this script will not overwrite already created files.""")
parser.add_option("-n", "--doc-header", action="store", dest="header", help="Documentation Header (default=Project)", default="Project")
parser.add_option("-d", "--dest-dir", action="store", dest="destdir", help="Output destination directory", default="")
parser.add_option("-s", "--suffix", action="store", dest="suffix", help="module suffix (default=txt)", default="txt")
parser.add_option("-m", "--maxdepth", action="store", dest="maxdepth", help="Maximum depth of submodules to show in the TOC (default=4)", type="int", default=4)
parser.add_option("-r", "--dry-run", action="store_true", dest="dryrun", help="Run the script without creating the files")
parser.add_option("-f", "--force", action="store_true", dest="force", help="Overwrite all the files")
parser.add_option("-t", "--no-toc", action="store_true", dest="notoc", help="Don't create the table of content file")
(opts, args) = parser.parse_args()
if not args:
parser.error("package path is required.")
else:
rootpath, excludes = args[0], args[1:]
if os.path.isdir(rootpath):
# check if the output destination is a valid directory
if opts.destdir and os.path.isdir(opts.destdir):
excludes = normalize_excludes(rootpath, excludes)
recurse_tree(rootpath, excludes, opts)
else:
print '%s is not a valid output destination directory.' % opts.destdir
else:
print '%s is not a valid directory.' % rootpath
if __name__ == '__main__':
main()
| gpl-3.0 |
janson123/Like-My-GF | like_my_gf.py | 1 | 3030 | #!/usr/bin/env python
from instagram.client import InstagramAPI
from ConfigParser import SafeConfigParser
import argparse
import datetime
import logging
config_parser = SafeConfigParser()
config_parser.read('config')
arg_parser = argparse.ArgumentParser(description='Auto-like my GF\'s Instagram')
arg_parser.add_argument('-v', '--verbose', action='store_true', help='Increase output verbosity')
args = arg_parser.parse_args()
CONFIG = dict( client_id = config_parser.get('Client', 'client_id'),
client_secret = config_parser.get('Client', 'client_secret'),
redirect_uri = config_parser.get('Client', 'redirect_uri'),
)
OTHER = dict( scope = ['likes'],
access_token = config_parser.get('Access Token', 'access_token'),
target = config_parser.get('Target', 'username'),
log_path = config_parser.get('Path', 'log_path')+'like_my_gf.log',
)
if args.verbose:
logging.basicConfig(filename=OTHER['log_path'], level=logging.DEBUG)
else:
logging.basicConfig(filename=OTHER['log_path'], level=logging.INFO)
def get_auth():
unauth_api = InstagramAPI(**CONFIG)
redirect_uri = unauth_api.get_authorize_login_url(scope = OTHER['scope'])
print "Visit this page and authorize access in your browser:\n", redirect_uri
code = raw_input('Please type in the access code you got\n')
OTHER['access_token'], me = unauth_api.exchange_code_for_access_token(code)
with open('config', 'w') as cf:
config_parser.set('Access Token', 'access_token', OTHER['access_token'])
config_parser.write(cf)
def auth_request():
api = InstagramAPI(access_token=OTHER['access_token'])
target_ids = api.user_search(OTHER['target'], 1)
if len(target_ids) > 1:
logging.error('Found mutiple users, please check username')
return
target_id = target_ids[0].id
my_name = api.user().username
logging.debug('Starting check recent media')
recent_media, url = api.user_recent_media(user_id=target_id, count = 20)
liked_media = []
for media in recent_media:
logging.debug('Processing media %s' % media.id)
users = api.media_likes(media.id)
will_like = True
for user in users:
if user.username == my_name:
will_like = False
break
if will_like:
logging.debug('Liking media %s' % media.id)
api.like_media(media.id)
liked_media.append(media)
else:
logging.debug('Already liked media %s, aborting like' % media.id)
return liked_media
if __name__ == '__main__':
if OTHER['access_token'] == 'None': # Not mistake, but default token is 'None' but not None
get_auth()
liked_media = auth_request()
if len(liked_media) > 0:
logging.info('-'*10+str(datetime.datetime.now())+'-'*10)
logging.info('-'*10+'Liked '+str(len(liked_media))+' medias'+'-'*10)
# TODO
# 1. Send email
| mit |
ehelms/Opus | opus/project/deployment/models.py | 1 | 15917 | ##############################################################################
# Copyright 2010 North Carolina State University #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
##############################################################################
import os.path
import re
import subprocess
import json
import itertools
import opus.lib.deployer
from opus.lib.deployer import DeploymentException
from opus.lib.conf import OpusConfig
import opus.project.deployment.tasks
from opus.project.deployment import database
from opus.lib.log import get_logger
log = get_logger()
from django.conf import settings
from django.db import models
from django.core.validators import RegexValidator, ValidationError
import django.contrib.auth.models
id_re = re.compile(r'^[a-z][a-z0-9_]+$')
validate_identifier = RegexValidator(id_re, u"Enter a valid identifier consisting of letters, numbers, and underscores, not starting with a number.", 'invalid')
class IdentifierField(models.CharField):
default_validators = [validate_identifier]
def __init__(self, *args, **kwargs):
# Set max length to 25 since usernames can only be 30 characters or so
kwargs['max_length'] = kwargs.get('max_length', 25)
models.CharField.__init__(self, *args, **kwargs)
class DeploymentInfo(object):
"""Just a container that holds information about a project deployment"""
def __init__(self):
self.dbengine = None
self.dbname = None
self.dbuser = ""
self.dbpassword = ""
self.dbhost = ""
self.dbport = ""
self.superusername = None
self.superemail = None
self.superpassword = None
def validate(self):
if not (self.dbengine and self.superusername and
self.superemail and self.superpassword):
raise ValidationError("Deployment parameters not specified")
class DeployedProject(models.Model):
"""The actual model for a deployed project. The database doesn't contain
too many fields, but this class contains lots of methods to query information and
edit projects. Most of the state is stored in the filesystem and not the
database. (For example, whether the project is activated is defined by the
presence of an apache config file for the project)
"""
name = IdentifierField(unique=True)
owner = models.ForeignKey(django.contrib.auth.models.User)
def __init__(self, *args, **kwargs):
super(DeployedProject, self).__init__(*args, **kwargs)
self._conf = None
@property
def projectdir(self):
return os.path.join(settings.OPUS_BASE_DIR, self.name)
@property
def apache_conf(self):
return os.path.join(settings.OPUS_APACHE_CONFD, "opus"+self.name+".conf")
@models.permalink
def get_absolute_url(self):
return ("opus.project.deployment.views.edit_or_create",
(),
dict(projectname=self.name))
@property
def serve_http(self):
port = settings.OPUS_HTTP_PORT
if not port:
return
if port != 80:
portline = ":"+str(port)
else:
portline = ""
return "http://{0}{1}{2}/".format(
self.name, settings.OPUS_APACHE_SERVERNAME_SUFFIX,
portline)
@property
def serve_https(self):
port = settings.OPUS_HTTPS_PORT
if not port:
return
if port != 443:
portline = ":"+str(port)
else:
portline = ""
return "https://{0}{1}{2}/".format(
self.name, settings.OPUS_APACHE_SERVERNAME_SUFFIX,
portline)
def get_urls(self):
"""Gets the urls that this project is being served from. This list is
populated even if active is False.
"""
urls = []
http = self.serve_http
if http:
urls.append(http)
https = self.serve_https
if https:
urls.append(https)
return urls
def get_apps(self):
"""Returns an iterator over application names that are currently
installed"""
for app in self.config['INSTALLED_APPS']:
if os.path.exists(os.path.join(self.projectdir, app)):
yield app
@property
def config(self):
"""Returns an opus.lib.conf.OpusConfig object for this project. This is
automatically saved when the model's save() method is called.
This will raise an error if the project doesn't exist (such as before
it's deployed for the first time
"""
if not self._conf:
self._conf = OpusConfig(os.path.join(self.projectdir, "opussettings.json"))
return self._conf
def save(self, *args, **kwargs):
if self._conf:
self._conf.save()
# Touch wsgi file, indicating to mod_wsgi to re-load modules and
# therefore any changed configuration parameters
wsgifile = os.path.join(self.projectdir, "wsgi", 'django.wsgi')
if os.path.exists(wsgifile):
# It may not exist if the project isn't active
os.utime(wsgifile, None)
super(DeployedProject, self).save(*args, **kwargs)
def is_active(self):
return os.path.exists(self.apache_conf)
active = property(is_active)
def _verify_project(self):
"""Verifies that the given project name corresponds to a real un-deployed
project in the base dir.
Returns False if something went wrong.
"""
fullpath = self.projectdir
if not os.path.isdir(fullpath):
return False
if os.path.exists(os.path.join(fullpath, "wsgi")):
# Already deployed?
return False
if not os.path.exists(os.path.join(fullpath, "__init__.py")):
return False
if not os.path.exists(os.path.join(fullpath, "settings.py")):
return False
return True
def deploy(self, info, active=True):
"""Call this to deploy a project. If successful, the model is saved and
this method returns None. If something went wrong, a
DeploymentException is raised with a description of the error, and the
model is not saved. If something is wrong with the given information, a
ValidationError is raised.
Pass in a DeploymentInfo object with the appropriate attributes set.
That information is used to deploy a project, but is not stored within
the model itself.
If active is not True, the deployment will be created inactive, and the
apache configuration file will not be created.
"""
# This should have been called externally before, but do it again just
# to be sure nothing's changed.
self.full_clean()
# Do some validation checks to see if the given project name points to
# a valid un-deployed django project
if not self._verify_project():
raise DeploymentException("Sanity check failed, will not create project with that name")
d = opus.lib.deployer.ProjectDeployer(self.projectdir)
d.create_environment()
# Do this before settings the sensitive database information
d.secure_project(settings.OPUS_SECUREOPS_COMMAND)
d.configure_database(info.dbengine,
info.dbname,
info.dbuser,
info.dbpassword,
info.dbhost,
info.dbport,
)
# This must go before sync_database, in case some settings that are
# set by set_paths are used by a models.py at import time.
d.set_paths()
d.install_requirements(settings.OPUS_SECUREOPS_COMMAND)
d.sync_database(info.superusername,
info.superemail,
info.superpassword,
settings.OPUS_SECUREOPS_COMMAND
)
d.gen_cert(settings.OPUS_APACHE_SERVERNAME_SUFFIX)
d.setup_celery(settings.OPUS_SECUREOPS_COMMAND,
pythonpath=self._get_path_additions())
if active:
self.activate(d)
self.save()
def _get_path_additions(self):
return "{0}".format(
os.path.split(opus.__path__[0])[0],
)
def set_debug(self, d):
"""Sets debug mode on or off. Remember to save afterwards"""
self.config['DEBUG'] = bool(d)
self.config['TEMPLATE_DEBUG'] = bool(d)
if d:
self.config['LOG_LEVEL'] = "DEBUG"
else:
self.config['LOG_LEVEL'] = "INFO"
def activate(self, d=None):
"""Activate this project. This writes out the apache config with the
current parameters. Also writes out the wsgi file. Finally, starts the
supervisord process which starts celeryd and celerybeat
This is normally done during deployment, but this is useful to call
after any change that affects the apache config so that the changes
take effect. If you do this, don't forget to save() too.
Pass in a deployer object, otherwise one will be created.
"""
if not self.all_settings_set():
raise DeploymentException("Tried to activate, but some applications still have settings to set")
if not d:
d = opus.lib.deployer.ProjectDeployer(self.projectdir)
# The opus libraries should be in the path for the deployed app. TODO:
# Find a better way to handle this.
path_additions = self._get_path_additions()
d.configure_apache(settings.OPUS_APACHE_CONFD,
settings.OPUS_HTTP_PORT,
settings.OPUS_HTTPS_PORT,
settings.OPUS_APACHE_SERVERNAME_SUFFIX,
secureops=settings.OPUS_SECUREOPS_COMMAND,
pythonpath=path_additions,
ssl_crt=settings.OPUS_SSL_CRT,
ssl_key=settings.OPUS_SSL_KEY,
ssl_chain=settings.OPUS_SSL_CHAIN,
)
# Schedule celery to start supervisord. Somehow if supervisord is
# started directly by mod_wsgi, strange things happen to supervisord's
# signal handlers
opus.project.deployment.tasks.start_supervisord.delay(self.projectdir)
def deactivate(self):
"""Removes the apache configuration file and restarts apache.
"""
destroyer = opus.lib.deployer.ProjectUndeployer(self.projectdir)
destroyer.remove_apache_conf(settings.OPUS_APACHE_CONFD,
secureops=settings.OPUS_SECUREOPS_COMMAND)
destroyer.stop_celery(
secureops=settings.OPUS_SECUREOPS_COMMAND)
# Make sure all processes are stopped
opus.project.deployment.tasks.kill_processes.apply_async(
args=[self.pk],
countdown=5)
def destroy(self):
"""Destroys the project. Deletes it off the drive, removes the system
user, de-configures apache, and finally removes itself from the
database.
This method is idempotent, it can be called on a non-existant project
or project in an inconsistant or intermediate state.
This method will still error in these cases (not necessarily
exaustive)
* Apache can't be restarted
* There's an error removing the user other than "user doesn't exist"
* The project dir exists but cannot be removed
"""
destroyer = opus.lib.deployer.ProjectUndeployer(self.projectdir)
destroyer.remove_apache_conf(settings.OPUS_APACHE_CONFD,
secureops=settings.OPUS_SECUREOPS_COMMAND)
destroyer.stop_celery(
secureops=settings.OPUS_SECUREOPS_COMMAND)
destroyer.delete_celery(
secureops=settings.OPUS_SECUREOPS_COMMAND)
# This also kills off any remaining processes owned by that user
destroyer.delete_user(
secureops=settings.OPUS_SECUREOPS_COMMAND)
# Remove database and user if automatically created
try:
if self.config['DATABASES']['default']['ENGINE'].endswith(\
"postgresql_psycopg2") and \
settings.OPUS_AUTO_POSTGRES_CONFIG:
database.delete_postgres(self.name)
except Exception, e:
log.warning("Ignoring this error when trying to delete postgres user: %s", e)
destroyer.remove_projectdir(
secureops=settings.OPUS_SECUREOPS_COMMAND)
if self.id is not None:
self.delete()
def get_app_settings(self):
"""Returns a mapping of app names to a list of settings.
The "Default" values are the values from the metadata here, not the
values from the project config. If sending the current values from
settings to the user, you'll need to modify this data. (this is done in
jsonviews.py for the projectinfo() view)
"""
app_settings = {}
for app in self.get_apps():
# Is this application local to the project? If not skip it, since
# we don't have a good way right now to find where it's installed
md_filename = os.path.join(self.projectdir, app, "metadata.json")
if not os.path.exists(md_filename):
continue
with open(md_filename, 'r') as md_file:
app_metadata = json.load(md_file)
usersettings = app_metadata.get("usersettings", None)
if not usersettings:
continue
# Do some really brief validity checking. Most validity checking is
# done in the constructor of UserSettingsForm though
u = []
for s in usersettings:
if len(s) < 3:
log.warning("usersettings line has wrong number of args: %s", s)
continue
# All values except the last (default) must be a string
if not all(isinstance(x, basestring) for x in s[:3]):
log.warning("usersettings line is bad, one of the first three elements is not a string: %s", s)
continue
if s[2] not in ("int", "char", "str", "string", "float", 'choice', 'bool'):
log.warning("usersettings line has bad type: %s", s)
continue
u.append(s)
if u:
app_settings[app] = u
return app_settings
def all_settings_set(self):
"""Returns true if all the application specific settings are set in the
global config. If false is returned, the project shouldn't be activated
yet.
"""
app_settings = self.get_app_settings()
for setting in itertools.chain.from_iterable(app_settings.itervalues()):
if setting[0] not in self.config:
return False
return True
| apache-2.0 |
Azure/azure-sdk-for-python | sdk/resources/azure-mgmt-resource/azure/mgmt/resource/features/v2015_12_01/operations/_features_operations.py | 1 | 17276 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class FeaturesOperations(object):
"""FeaturesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.resource.features.v2015_12_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_all(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.FeatureOperationsListResult"]
"""Gets all the preview features that are available through AFEC for the subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either FeatureOperationsListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.resource.features.v2015_12_01.models.FeatureOperationsListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FeatureOperationsListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2015-12-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('FeatureOperationsListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Features/features'} # type: ignore
def list(
self,
resource_provider_namespace, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.FeatureOperationsListResult"]
"""Gets all the preview features in a provider namespace that are available through AFEC for the
subscription.
:param resource_provider_namespace: The namespace of the resource provider for getting
features.
:type resource_provider_namespace: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either FeatureOperationsListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.resource.features.v2015_12_01.models.FeatureOperationsListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FeatureOperationsListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2015-12-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceProviderNamespace': self._serialize.url("resource_provider_namespace", resource_provider_namespace, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('FeatureOperationsListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Features/providers/{resourceProviderNamespace}/features'} # type: ignore
def get(
self,
resource_provider_namespace, # type: str
feature_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.FeatureResult"
"""Gets the preview feature with the specified name.
:param resource_provider_namespace: The resource provider namespace for the feature.
:type resource_provider_namespace: str
:param feature_name: The name of the feature to get.
:type feature_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FeatureResult, or the result of cls(response)
:rtype: ~azure.mgmt.resource.features.v2015_12_01.models.FeatureResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FeatureResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2015-12-01"
accept = "application/json, text/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceProviderNamespace': self._serialize.url("resource_provider_namespace", resource_provider_namespace, 'str'),
'featureName': self._serialize.url("feature_name", feature_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('FeatureResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Features/providers/{resourceProviderNamespace}/features/{featureName}'} # type: ignore
def register(
self,
resource_provider_namespace, # type: str
feature_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.FeatureResult"
"""Registers the preview feature for the subscription.
:param resource_provider_namespace: The namespace of the resource provider.
:type resource_provider_namespace: str
:param feature_name: The name of the feature to register.
:type feature_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FeatureResult, or the result of cls(response)
:rtype: ~azure.mgmt.resource.features.v2015_12_01.models.FeatureResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FeatureResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2015-12-01"
accept = "application/json, text/json"
# Construct URL
url = self.register.metadata['url'] # type: ignore
path_format_arguments = {
'resourceProviderNamespace': self._serialize.url("resource_provider_namespace", resource_provider_namespace, 'str'),
'featureName': self._serialize.url("feature_name", feature_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('FeatureResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
register.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Features/providers/{resourceProviderNamespace}/features/{featureName}/register'} # type: ignore
def unregister(
self,
resource_provider_namespace, # type: str
feature_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.FeatureResult"
"""Unregisters the preview feature for the subscription.
:param resource_provider_namespace: The namespace of the resource provider.
:type resource_provider_namespace: str
:param feature_name: The name of the feature to unregister.
:type feature_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FeatureResult, or the result of cls(response)
:rtype: ~azure.mgmt.resource.features.v2015_12_01.models.FeatureResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FeatureResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2015-12-01"
accept = "application/json, text/json"
# Construct URL
url = self.unregister.metadata['url'] # type: ignore
path_format_arguments = {
'resourceProviderNamespace': self._serialize.url("resource_provider_namespace", resource_provider_namespace, 'str'),
'featureName': self._serialize.url("feature_name", feature_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('FeatureResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
unregister.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Features/providers/{resourceProviderNamespace}/features/{featureName}/unregister'} # type: ignore
| mit |
shreyasp/erpnext | erpnext/patches/v5_0/index_on_account_and_gl_entry.py | 60 | 1275 | from __future__ import unicode_literals
import frappe
def execute():
index_map = {
"Account": ["parent_account", "lft", "rgt"],
"GL Entry": ["posting_date", "account", 'party', "voucher_no"],
"Sales Invoice": ["posting_date", "debit_to", "customer"],
"Purchase Invoice": ["posting_date", "credit_to", "supplier"]
}
for dt, indexes in index_map.items():
existing_indexes = [(d.Key_name, d.Column_name) for d in frappe.db.sql("""show index from `tab{0}`
where Column_name != 'name'""".format(dt), as_dict=1)]
for old, column in existing_indexes:
if column in ("parent", "group_or_ledger", "is_group", "is_pl_account", "debit_or_credit",
"account_name", "company", "project", "voucher_date", "due_date", "bill_no",
"bill_date", "is_opening", "fiscal_year", "outstanding_amount"):
frappe.db.sql("alter table `tab{0}` drop index {1}".format(dt, old))
existing_indexes = [(d.Key_name, d.Column_name) for d in frappe.db.sql("""show index from `tab{0}`
where Column_name != 'name'""".format(dt), as_dict=1)]
existing_indexed_columns = list(set([x[1] for x in existing_indexes]))
for new in indexes:
if new not in existing_indexed_columns:
frappe.db.sql("alter table `tab{0}` add index ({1})".format(dt, new)) | gpl-3.0 |
andyzsf/django-cms | cms/plugin_rendering.py | 7 | 8554 | # -*- coding: utf-8 -*-
from django.template import Template, Context
from django.template.loader import render_to_string
from django.utils import six
from django.utils.safestring import mark_safe
from cms.models.placeholdermodel import Placeholder
from cms.plugin_processors import (plugin_meta_context_processor, mark_safe_plugin_processor)
from cms.utils import get_language_from_request
from cms.utils.conf import get_cms_setting
from cms.utils.django_load import iterload_objects
from cms.utils.placeholder import get_placeholder_conf, restore_sekizai_context
# these are always called before all other plugin context processors
from sekizai.helpers import Watcher
DEFAULT_PLUGIN_CONTEXT_PROCESSORS = (
plugin_meta_context_processor,
)
# these are always called after all other plugin processors
DEFAULT_PLUGIN_PROCESSORS = (
mark_safe_plugin_processor,
)
class PluginContext(Context):
"""
This subclass of template.Context automatically populates itself using
the processors defined in CMS_PLUGIN_CONTEXT_PROCESSORS.
Additional processors can be specified as a list of callables
using the "processors" keyword argument.
"""
def __init__(self, dict, instance, placeholder, processors=None, current_app=None):
super(PluginContext, self).__init__(dict, current_app=current_app)
if not processors:
processors = []
for processor in DEFAULT_PLUGIN_CONTEXT_PROCESSORS:
self.update(processor(instance, placeholder, self))
for processor in iterload_objects(get_cms_setting('PLUGIN_CONTEXT_PROCESSORS')):
self.update(processor(instance, placeholder, self))
for processor in processors:
self.update(processor(instance, placeholder, self))
def render_plugin(context, instance, placeholder, template, processors=None, current_app=None):
"""
Renders a single plugin and applies the post processors to it's rendered
content.
"""
if not processors:
processors = []
if isinstance(template, six.string_types):
content = render_to_string(template, context_instance=context)
elif isinstance(template, Template):
content = template.render(context)
else:
content = ''
for processor in iterload_objects(get_cms_setting('PLUGIN_PROCESSORS')):
content = processor(instance, placeholder, content, context)
for processor in processors:
content = processor(instance, placeholder, content, context)
for processor in DEFAULT_PLUGIN_PROCESSORS:
content = processor(instance, placeholder, content, context)
return content
def render_plugins(plugins, context, placeholder, processors=None):
"""
Renders a collection of plugins with the given context, using the appropriate processors
for a given placeholder name, and returns a list containing a "rendered content" string
for each plugin.
This is the main plugin rendering utility function, use this function rather than
Plugin.render_plugin().
"""
out = []
total = len(plugins)
for index, plugin in enumerate(plugins):
plugin._render_meta.total = total
plugin._render_meta.index = index
context.push()
out.append(plugin.render_plugin(context, placeholder, processors=processors))
context.pop()
return out
def render_placeholder(placeholder, context_to_copy,
name_fallback="Placeholder", lang=None, default=None, editable=True,
use_cache=True):
"""
Renders plugins for a placeholder on the given page using shallow copies of the
given context, and returns a string containing the rendered output.
Set editable = False to disable front-end editing for this placeholder
during rendering. This is primarily used for the "as" variant of the
render_placeholder tag.
"""
if not placeholder:
return
from cms.utils.plugins import get_plugins
context = context_to_copy
context.push()
request = context['request']
if not hasattr(request, 'placeholders'):
request.placeholders = []
if placeholder.has_change_permission(request) or not placeholder.cache_placeholder:
request.placeholders.append(placeholder)
if hasattr(placeholder, 'content_cache'):
return mark_safe(placeholder.content_cache)
page = placeholder.page if placeholder else None
# It's kind of duplicate of the similar call in `get_plugins`, but it's required
# to have a valid language in this function for `get_fallback_languages` to work
if lang:
save_language = lang
else:
lang = get_language_from_request(request)
save_language = lang
# Prepend frontedit toolbar output if applicable
toolbar = getattr(request, 'toolbar', None)
if getattr(toolbar, 'edit_mode', False) and getattr(placeholder, 'is_editable', True) and editable:
from cms.middleware.toolbar import toolbar_plugin_processor
processors = (toolbar_plugin_processor,)
edit = True
else:
processors = None
edit = False
from django.core.cache import cache
if get_cms_setting('PLACEHOLDER_CACHE') and use_cache:
cache_key = placeholder.get_cache_key(lang)
if not edit and placeholder and not hasattr(placeholder, 'cache_checked'):
cached_value = cache.get(cache_key)
if not cached_value is None:
restore_sekizai_context(context, cached_value['sekizai'])
return mark_safe(cached_value['content'])
if page:
template = page.template
else:
template = None
plugins = [plugin for plugin in get_plugins(request, placeholder, template, lang=lang)]
# Add extra context as defined in settings, but do not overwrite existing context variables,
# since settings are general and database/template are specific
# TODO this should actually happen as a plugin context processor, but these currently overwrite
# existing context -- maybe change this order?
slot = getattr(placeholder, 'slot', None)
extra_context = {}
if slot:
extra_context = get_placeholder_conf("extra_context", slot, template, {})
for key, value in extra_context.items():
if key not in context:
context[key] = value
content = []
watcher = Watcher(context)
content.extend(render_plugins(plugins, context, placeholder, processors))
toolbar_content = ''
if edit and editable:
if not hasattr(request.toolbar, 'placeholders'):
request.toolbar.placeholders = {}
if placeholder.pk not in request.toolbar.placeholders:
request.toolbar.placeholders[placeholder.pk] = placeholder
toolbar_content = mark_safe(render_placeholder_toolbar(placeholder, context, name_fallback, save_language))
if content:
content = mark_safe("".join(content))
elif default:
#should be nodelist from a template
content = mark_safe(default.render(context_to_copy))
else:
content = ''
context['content'] = content
context['placeholder'] = toolbar_content
context['edit'] = edit
result = render_to_string("cms/toolbar/content.html", context)
changes = watcher.get_changes()
if placeholder and not edit and placeholder.cache_placeholder and get_cms_setting('PLACEHOLDER_CACHE') and use_cache:
cache.set(cache_key, {'content': result, 'sekizai': changes}, get_cms_setting('CACHE_DURATIONS')['content'])
context.pop()
return result
def render_placeholder_toolbar(placeholder, context, name_fallback, save_language):
from cms.plugin_pool import plugin_pool
request = context['request']
page = placeholder.page if placeholder else None
if not page:
page = getattr(request, 'current_page', None)
if page:
if name_fallback and not placeholder:
placeholder = Placeholder.objects.create(slot=name_fallback)
page.placeholders.add(placeholder)
placeholder.page = page
if placeholder:
slot = placeholder.slot
else:
slot = None
context.push()
# to restrict child-only plugins from draggables..
context['allowed_plugins'] = [cls.__name__ for cls in plugin_pool.get_all_plugins(slot, page)] + plugin_pool.get_system_plugins()
context['placeholder'] = placeholder
context['language'] = save_language
context['page'] = page
toolbar = render_to_string("cms/toolbar/placeholder.html", context)
context.pop()
return toolbar
| bsd-3-clause |
febert/RoboGym | simulator.py | 1 | 1161 | import mujoco_py
import imp
import argparse
import numpy as np
class Simulator():
"""
Cross Entropy Method Stochastic Optimizer
"""
def __init__(self):
parser = argparse.ArgumentParser(description='simulator')
parser.add_argument('conf', type=str, help='config file')
args = parser.parse_args()
config = args.conf
confmod = imp.load_source('conf','configs/' + config + '.py')
self.conf = confmod.config
self.model = mujoco_py.MjModel(self.conf['modelfile'])
gofast = False
self.viewer = mujoco_py.MjViewer(visible=True, init_width=480,
init_height=480, go_fast=gofast)
self.viewer.start()
self.viewer.set_model(self.model)
# self.viewer.cam.camid = 0
def start(self):
for t in range(self.conf['T']):
# self.model.data.ctrl = np.ones_like(self.model.data.ctrl)*10
self.model.data.ctrl = np.zeros_like(self.model.data.ctrl)
self.viewer.loop_once()
self.model.step()
if __name__ == '__main__':
simulator = Simulator()
simulator.start() | mit |
pferreir/indico | indico/modules/rb/schemas.py | 3 | 20951 | # This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from operator import itemgetter
from flask import session
from marshmallow import ValidationError, fields, post_dump, validate, validates, validates_schema
from marshmallow.fields import Boolean, DateTime, Function, Method, Nested, Number, Pluck, String
from marshmallow_enum import EnumField
from sqlalchemy import func
from indico.core.db.sqlalchemy.links import LinkType
from indico.core.db.sqlalchemy.protection import ProtectionMode
from indico.core.marshmallow import mm
from indico.modules.categories.models.categories import Category
from indico.modules.events.sessions.models.blocks import SessionBlock
from indico.modules.rb.models.blocked_rooms import BlockedRoom, BlockedRoomState
from indico.modules.rb.models.blockings import Blocking
from indico.modules.rb.models.equipment import EquipmentType
from indico.modules.rb.models.locations import Location
from indico.modules.rb.models.map_areas import MapArea
from indico.modules.rb.models.principals import RoomPrincipal
from indico.modules.rb.models.reservation_edit_logs import ReservationEditLog
from indico.modules.rb.models.reservation_occurrences import ReservationOccurrence
from indico.modules.rb.models.reservations import RepeatFrequency, Reservation, ReservationLink, ReservationState
from indico.modules.rb.models.room_attributes import RoomAttribute, RoomAttributeAssociation
from indico.modules.rb.models.room_bookable_hours import BookableHours
from indico.modules.rb.models.room_features import RoomFeature
from indico.modules.rb.models.room_nonbookable_periods import NonBookablePeriod
from indico.modules.rb.models.rooms import Room
from indico.modules.rb.util import rb_is_admin
from indico.modules.users.schemas import UserSchema
from indico.util.i18n import _
from indico.util.marshmallow import (ModelList, NaiveDateTime, Principal, PrincipalList, PrincipalPermissionList,
not_empty)
from indico.util.string import natural_sort_key
class RoomAttributeValuesSchema(mm.SQLAlchemyAutoSchema):
title = String(attribute='attribute.title')
name = String(attribute='attribute.name')
class Meta:
model = RoomAttributeAssociation
fields = ('value', 'title', 'name')
class AttributesSchema(mm.SQLAlchemyAutoSchema):
class Meta:
model = RoomAttribute
fields = ('name', 'title', 'is_required', 'is_hidden')
class RoomSchema(mm.SQLAlchemyAutoSchema):
owner_name = String(attribute='owner.full_name')
class Meta:
model = Room
fields = ('id', 'name', 'capacity', 'building', 'floor', 'number', 'is_public', 'location_name', 'full_name',
'comments', 'division', 'is_reservable', 'reservations_need_confirmation', 'sprite_position',
'surface_area', 'latitude', 'longitude', 'telephone', 'key_location', 'max_advance_days',
'owner_name', 'available_equipment', 'has_photo', 'verbose_name', 'map_url', 'site')
class AdminRoomSchema(mm.SQLAlchemyAutoSchema):
class Meta:
modal = Room
fields = ('id', 'location_id', 'name', 'full_name', 'sprite_position', 'owner_name', 'comments')
class RoomUpdateSchema(RoomSchema):
owner = Principal()
acl_entries = PrincipalPermissionList(RoomPrincipal)
protection_mode = EnumField(ProtectionMode)
class Meta(RoomSchema.Meta):
fields = RoomSchema.Meta.fields + ('notification_before_days', 'notification_before_days_weekly', 'owner',
'notification_before_days_monthly', 'notifications_enabled',
'end_notification_daily', 'end_notification_weekly',
'end_notification_monthly', 'end_notifications_enabled',
'verbose_name', 'site', 'notification_emails', 'booking_limit_days',
'acl_entries', 'protection_mode')
class RoomUpdateArgsSchema(mm.Schema):
verbose_name = fields.String(allow_none=True)
site = fields.String(allow_none=True)
building = fields.String(validate=lambda x: x is not None)
floor = fields.String(validate=lambda x: x is not None)
number = fields.String(validate=lambda x: x is not None)
longitude = fields.Float(allow_none=True)
latitude = fields.Float(allow_none=True)
is_reservable = fields.Boolean(allow_none=True)
reservations_need_confirmation = fields.Boolean(allow_none=True)
notification_emails = fields.List(fields.Email())
notification_before_days = fields.Int(validate=lambda x: 1 <= x <= 30, allow_none=True)
notification_before_days_weekly = fields.Int(validate=lambda x: 1 <= x <= 30, allow_none=True)
notification_before_days_monthly = fields.Int(validate=lambda x: 1 <= x <= 30, allow_none=True)
notifications_enabled = fields.Boolean()
end_notification_daily = fields.Int(validate=lambda x: 1 <= x <= 30, allow_none=True)
end_notification_weekly = fields.Int(validate=lambda x: 1 <= x <= 30, allow_none=True)
end_notification_monthly = fields.Int(validate=lambda x: 1 <= x <= 30, allow_none=True)
end_notifications_enabled = fields.Boolean()
booking_limit_days = fields.Int(validate=lambda x: x >= 1, allow_none=True)
owner = Principal(validate=lambda x: x is not None, allow_none=True)
key_location = fields.String()
telephone = fields.String()
capacity = fields.Int(validate=lambda x: x >= 1)
division = fields.String(allow_none=True)
surface_area = fields.Int(validate=lambda x: x >= 0, allow_none=True)
max_advance_days = fields.Int(validate=lambda x: x >= 1, allow_none=True)
comments = fields.String()
acl_entries = PrincipalPermissionList(RoomPrincipal)
protection_mode = EnumField(ProtectionMode)
class RoomEquipmentSchema(mm.SQLAlchemyAutoSchema):
class Meta:
model = Room
fields = ('available_equipment',)
class MapAreaSchema(mm.SQLAlchemyAutoSchema):
class Meta:
model = MapArea
fields = ('name', 'top_left_latitude', 'top_left_longitude', 'bottom_right_latitude', 'bottom_right_longitude',
'is_default', 'id')
class ReservationSchema(mm.SQLAlchemyAutoSchema):
start_dt = NaiveDateTime()
end_dt = NaiveDateTime()
class Meta:
model = Reservation
fields = ('id', 'booking_reason', 'booked_for_name', 'room_id', 'is_accepted', 'start_dt', 'end_dt')
class ReservationLinkedObjectDataSchema(mm.Schema):
id = Number()
title = Method('_get_title')
event_title = Function(lambda obj: obj.event.title)
event_url = Function(lambda obj: obj.event.url)
own_room_id = Number()
own_room_name = Function(lambda obj: (obj.own_room.name if obj.own_room else obj.own_room_name) or None)
def _get_title(self, obj):
if isinstance(obj, SessionBlock):
return obj.full_title
return obj.title
class ReservationUserEventSchema(mm.Schema):
id = Number()
title = String()
url = String()
start_dt = DateTime()
end_dt = DateTime()
class ReservationOccurrenceSchema(mm.SQLAlchemyAutoSchema):
reservation = Nested(ReservationSchema)
state = EnumField(ReservationState)
start_dt = NaiveDateTime()
end_dt = NaiveDateTime()
class Meta:
model = ReservationOccurrence
fields = ('start_dt', 'end_dt', 'is_valid', 'reservation', 'rejection_reason', 'state')
class ReservationOccurrenceSchemaWithPermissions(ReservationOccurrenceSchema):
permissions = Method('_get_permissions')
class Meta:
fields = ReservationOccurrenceSchema.Meta.fields + ('permissions',)
def _get_permissions(self, occurrence):
methods = ('can_cancel', 'can_reject')
admin_permissions = None
user_permissions = {x: getattr(occurrence, x)(session.user, allow_admin=False) for x in methods}
if rb_is_admin(session.user):
admin_permissions = {x: getattr(occurrence, x)(session.user) for x in methods}
return {'user': user_permissions, 'admin': admin_permissions}
class ReservationConcurrentOccurrenceSchema(ReservationOccurrenceSchema):
reservations = Nested(ReservationSchema, many=True)
class Meta:
fields = ReservationOccurrenceSchema.Meta.fields + ('reservations',)
exclude = ('reservation',)
class ReservationEditLogSchema(UserSchema):
class Meta:
model = ReservationEditLog
fields = ('id', 'timestamp', 'info', 'user_name')
@post_dump(pass_many=True)
def sort_logs(self, data, many, **kwargs):
if many:
data = sorted(data, key=itemgetter('timestamp'), reverse=True)
return data
class ReservationLinkSchema(mm.SQLAlchemyAutoSchema):
type = EnumField(LinkType, attribute='link_type')
id = Function(lambda link: link.object.id)
class Meta:
model = ReservationLink
fields = ('type', 'id')
class ReservationDetailsSchema(mm.SQLAlchemyAutoSchema):
booked_for_user = Nested(UserSchema, only=('id', 'identifier', 'full_name', 'phone', 'email'))
created_by_user = Nested(UserSchema, only=('id', 'identifier', 'full_name', 'email'))
edit_logs = Nested(ReservationEditLogSchema, many=True)
can_accept = Function(lambda booking: booking.can_accept(session.user))
can_cancel = Function(lambda booking: booking.can_cancel(session.user))
can_delete = Function(lambda booking: booking.can_delete(session.user))
can_edit = Function(lambda booking: booking.can_edit(session.user))
can_reject = Function(lambda booking: booking.can_reject(session.user))
permissions = Method('_get_permissions')
state = EnumField(ReservationState)
is_linked_to_object = Function(lambda booking: booking.link is not None)
link = Nested(ReservationLinkSchema)
start_dt = NaiveDateTime()
end_dt = NaiveDateTime()
class Meta:
model = Reservation
fields = ('id', 'start_dt', 'end_dt', 'repetition', 'booking_reason', 'created_dt', 'booked_for_user',
'room_id', 'created_by_user', 'edit_logs', 'permissions',
'is_cancelled', 'is_rejected', 'is_accepted', 'is_pending', 'rejection_reason',
'is_linked_to_object', 'link', 'state', 'external_details_url')
def _get_permissions(self, booking):
methods = ('can_accept', 'can_cancel', 'can_delete', 'can_edit', 'can_reject')
admin_permissions = None
user_permissions = {x: getattr(booking, x)(session.user, allow_admin=False) for x in methods}
if rb_is_admin(session.user):
admin_permissions = {x: getattr(booking, x)(session.user) for x in methods}
return {'user': user_permissions, 'admin': admin_permissions}
class BlockedRoomSchema(mm.SQLAlchemyAutoSchema):
room = Nested(RoomSchema, only=('id', 'name', 'sprite_position', 'full_name'))
state = EnumField(BlockedRoomState)
class Meta:
model = BlockedRoom
fields = ('room', 'state', 'rejection_reason', 'rejected_by')
@post_dump(pass_many=True)
def sort_rooms(self, data, many, **kwargs):
if many:
data = sorted(data, key=lambda x: natural_sort_key(x['room']['full_name']))
return data
class BlockingSchema(mm.SQLAlchemyAutoSchema):
blocked_rooms = Nested(BlockedRoomSchema, many=True)
allowed = PrincipalList()
permissions = Method('_get_permissions')
created_by = Pluck(UserSchema, 'full_name', attribute='created_by_user')
class Meta:
model = Blocking
fields = ('id', 'start_date', 'end_date', 'reason', 'blocked_rooms', 'allowed', 'created_by', 'permissions')
def _get_permissions(self, blocking):
methods = ('can_delete', 'can_edit')
admin_permissions = None
user_permissions = {x: getattr(blocking, x)(session.user, allow_admin=False) for x in methods}
if rb_is_admin(session.user):
admin_permissions = {x: getattr(blocking, x)(session.user) for x in methods}
return {'user': user_permissions, 'admin': admin_permissions}
class NonBookablePeriodSchema(mm.SQLAlchemyAutoSchema):
start_dt = NaiveDateTime()
end_dt = NaiveDateTime()
class Meta:
model = NonBookablePeriod
fields = ('start_dt', 'end_dt')
class BookableHoursSchema(mm.SQLAlchemyAutoSchema):
class Meta:
model = BookableHours
fields = ('start_time', 'end_time')
class LocationsSchema(mm.SQLAlchemyAutoSchema):
rooms = Nested(RoomSchema, many=True, only=('id', 'name', 'full_name', 'sprite_position'))
class Meta:
model = Location
fields = ('id', 'name', 'rooms')
class AdminLocationsSchema(mm.SQLAlchemyAutoSchema):
can_delete = Function(lambda loc: not loc.rooms)
class Meta:
model = Location
fields = ('id', 'name', 'can_delete', 'map_url_template', 'room_name_format')
class RBUserSchema(UserSchema):
has_owned_rooms = mm.Method('has_managed_rooms')
is_rb_admin = mm.Function(lambda user: rb_is_admin(user))
class Meta:
fields = UserSchema.Meta.fields + ('has_owned_rooms', 'is_admin', 'is_rb_admin', 'identifier', 'full_name')
def has_managed_rooms(self, user):
from indico.modules.rb.operations.rooms import has_managed_rooms
return has_managed_rooms(user)
class CreateBookingSchema(mm.Schema):
start_dt = fields.DateTime(required=True)
end_dt = fields.DateTime(required=True)
repeat_frequency = EnumField(RepeatFrequency, required=True)
repeat_interval = fields.Int(missing=0, validate=lambda x: x >= 0)
room_id = fields.Int(required=True)
booked_for_user = Principal(data_key='user', allow_external_users=True)
booking_reason = fields.String(data_key='reason', validate=validate.Length(min=3), required=True)
is_prebooking = fields.Bool(missing=False)
link_type = EnumField(LinkType)
link_id = fields.Int()
link_back = fields.Bool(missing=False)
admin_override_enabled = fields.Bool(missing=False)
@validates_schema(skip_on_field_errors=True)
def validate_dts(self, data, **kwargs):
if data['start_dt'] >= data['end_dt']:
raise ValidationError(_('Booking cannot end before it starts'))
class RoomFeatureSchema(mm.SQLAlchemyAutoSchema):
class Meta:
model = RoomFeature
fields = ('id', 'name', 'title', 'icon')
class EquipmentTypeSchema(mm.SQLAlchemyAutoSchema):
features = Nested(RoomFeatureSchema, many=True)
used = Function(lambda eq, ctx: eq.id in ctx['used_ids'])
class Meta:
model = EquipmentType
fields = ('id', 'name', 'features', 'used')
class AdminEquipmentTypeSchema(mm.SQLAlchemyAutoSchema):
class Meta:
model = EquipmentType
fields = ('id', 'name', 'features')
class RoomAttributeSchema(mm.SQLAlchemyAutoSchema):
hidden = Boolean(attribute='is_hidden')
class Meta:
model = RoomAttribute
fields = ('id', 'name', 'title', 'hidden')
class LocationArgs(mm.Schema):
class Meta:
rh_context = ('location',)
name = fields.String(required=True)
room_name_format = fields.String(required=True)
map_url_template = fields.URL(schemes={'http', 'https'}, allow_none=True, missing='')
@validates('name')
def _check_name_unique(self, name, **kwargs):
location = self.context['location']
query = Location.query.filter(~Location.is_deleted, func.lower(Location.name) == name.lower())
if location:
query = query.filter(Location.id != location.id)
if query.has_rows():
raise ValidationError(_('Name must be unique'))
@validates('room_name_format')
def _check_room_name_format_placeholders(self, room_name_format, **kwargs):
missing = {x for x in ('{building}', '{floor}', '{number}') if x not in room_name_format}
if missing:
# validated client-side, no i18n needed
raise ValidationError('Missing placeholders: {}'.format(', '.join(missing)))
class FeatureArgs(mm.Schema):
class Meta:
rh_context = ('feature',)
name = fields.String(validate=validate.Length(min=2), required=True)
title = fields.String(validate=validate.Length(min=2), required=True)
icon = fields.String(missing='')
@validates('name')
def _check_name_unique(self, name, **kwargs):
feature = self.context['feature']
query = RoomFeature.query.filter(func.lower(RoomFeature.name) == name.lower())
if feature:
query = query.filter(RoomFeature.id != feature.id)
if query.has_rows():
raise ValidationError(_('Name must be unique'))
class EquipmentTypeArgs(mm.Schema):
class Meta:
rh_context = ('equipment_type',)
name = fields.String(validate=validate.Length(min=2), required=True)
features = ModelList(RoomFeature, missing=[])
@validates('name')
def _check_name_unique(self, name, **kwargs):
equipment_type = self.context['equipment_type']
query = EquipmentType.query.filter(func.lower(EquipmentType.name) == name.lower())
if equipment_type:
query = query.filter(EquipmentType.id != equipment_type.id)
if query.has_rows():
raise ValidationError(_('Name must be unique'))
class RoomAttributeArgs(mm.Schema):
class Meta:
rh_context = ('attribute',)
name = fields.String(validate=validate.Length(min=2), required=True)
title = fields.String(validate=validate.Length(min=2), required=True)
hidden = fields.Bool(missing=False)
@validates('name')
def _check_name_unique(self, name, **kwargs):
attribute = self.context['attribute']
query = RoomAttribute.query.filter(func.lower(RoomAttribute.name) == name.lower())
if attribute:
query = query.filter(RoomAttribute.id != attribute.id)
if query.has_rows():
raise ValidationError(_('Name must be unique'))
class SettingsSchema(mm.Schema):
admin_principals = PrincipalList(allow_groups=True)
authorized_principals = PrincipalList(allow_groups=True)
managers_edit_rooms = fields.Bool()
tileserver_url = fields.String(validate=validate.URL(schemes={'http', 'https'}), allow_none=True)
booking_limit = fields.Int(validate=not_empty)
notifications_enabled = fields.Bool()
notification_before_days = fields.Int(validate=validate.Range(min=1, max=30))
notification_before_days_weekly = fields.Int(validate=validate.Range(min=1, max=30))
notification_before_days_monthly = fields.Int(validate=validate.Range(min=1, max=30))
end_notifications_enabled = fields.Bool()
end_notification_daily = fields.Int(validate=validate.Range(min=1, max=30))
end_notification_weekly = fields.Int(validate=validate.Range(min=1, max=30))
end_notification_monthly = fields.Int(validate=validate.Range(min=1, max=30))
excluded_categories = ModelList(Category)
grace_period = fields.Int(validate=validate.Range(min=0, max=24), allow_none=True)
@validates('tileserver_url')
def _check_tileserver_url_placeholders(self, tileserver_url, **kwargs):
if tileserver_url is None:
return
missing = {x for x in ('{x}', '{y}', '{z}') if x not in tileserver_url}
if missing:
# validated client-side, no i18n needed
raise ValidationError('Missing placeholders: {}'.format(', '.join(missing)))
attributes_schema = AttributesSchema(many=True)
rb_user_schema = RBUserSchema()
rooms_schema = RoomSchema(many=True)
room_attribute_values_schema = RoomAttributeValuesSchema(many=True)
room_update_schema = RoomUpdateSchema()
room_equipment_schema = RoomEquipmentSchema()
map_areas_schema = MapAreaSchema(many=True)
reservation_occurrences_schema = ReservationOccurrenceSchema(many=True)
reservation_occurrences_schema_with_permissions = ReservationOccurrenceSchemaWithPermissions(many=True)
concurrent_pre_bookings_schema = ReservationConcurrentOccurrenceSchema(many=True)
reservation_schema = ReservationSchema()
reservation_details_schema = ReservationDetailsSchema()
reservation_linked_object_data_schema = ReservationLinkedObjectDataSchema()
reservation_user_event_schema = ReservationUserEventSchema(many=True)
blockings_schema = BlockingSchema(many=True)
simple_blockings_schema = BlockingSchema(many=True, only=('id', 'reason'))
nonbookable_periods_schema = NonBookablePeriodSchema(many=True)
bookable_hours_schema = BookableHoursSchema()
locations_schema = LocationsSchema(many=True)
admin_locations_schema = AdminLocationsSchema(many=True)
admin_equipment_type_schema = AdminEquipmentTypeSchema()
room_feature_schema = RoomFeatureSchema()
room_attribute_schema = RoomAttributeSchema()
| mit |
adoosii/edx-platform | common/djangoapps/enrollment/tests/fake_data_api.py | 104 | 3705 | """
A Fake Data API for testing purposes.
"""
import copy
import datetime
_DEFAULT_FAKE_MODE = {
"slug": "honor",
"name": "Honor Code Certificate",
"min_price": 0,
"suggested_prices": "",
"currency": "usd",
"expiration_datetime": None,
"description": None
}
_ENROLLMENTS = []
_COURSES = []
_ENROLLMENT_ATTRIBUTES = []
# pylint: disable=unused-argument
def get_course_enrollments(student_id):
"""Stubbed out Enrollment data request."""
return _ENROLLMENTS
def get_course_enrollment(student_id, course_id):
"""Stubbed out Enrollment data request."""
return _get_fake_enrollment(student_id, course_id)
def create_course_enrollment(student_id, course_id, mode='honor', is_active=True):
"""Stubbed out Enrollment creation request. """
return add_enrollment(student_id, course_id, mode=mode, is_active=is_active)
def update_course_enrollment(student_id, course_id, mode=None, is_active=None):
"""Stubbed out Enrollment data request."""
enrollment = _get_fake_enrollment(student_id, course_id)
if enrollment and mode is not None:
enrollment['mode'] = mode
if enrollment and is_active is not None:
enrollment['is_active'] = is_active
return enrollment
def get_course_enrollment_info(course_id, include_expired=False):
"""Stubbed out Enrollment data request."""
return _get_fake_course_info(course_id)
def _get_fake_enrollment(student_id, course_id):
"""Get an enrollment from the enrollments array."""
for enrollment in _ENROLLMENTS:
if student_id == enrollment['student'] and course_id == enrollment['course']['course_id']:
return enrollment
def _get_fake_course_info(course_id):
"""Get a course from the courses array."""
for course in _COURSES:
if course_id == course['course_id']:
return course
def add_enrollment(student_id, course_id, is_active=True, mode='honor'):
"""Append an enrollment to the enrollments array."""
enrollment = {
"created": datetime.datetime.now(),
"mode": mode,
"is_active": is_active,
"course": _get_fake_course_info(course_id),
"student": student_id
}
_ENROLLMENTS.append(enrollment)
return enrollment
# pylint: disable=unused-argument
def add_or_update_enrollment_attr(user_id, course_id, attributes):
"""Add or update enrollment attribute array"""
for attribute in attributes:
_ENROLLMENT_ATTRIBUTES.append({
'namespace': attribute['namespace'],
'name': attribute['name'],
'value': attribute['value']
})
# pylint: disable=unused-argument
def get_enrollment_attributes(user_id, course_id):
"""Retrieve enrollment attribute array"""
return _ENROLLMENT_ATTRIBUTES
def add_course(course_id, enrollment_start=None, enrollment_end=None, invite_only=False, course_modes=None):
"""Append course to the courses array."""
course_info = {
"course_id": course_id,
"enrollment_end": enrollment_end,
"course_modes": [],
"enrollment_start": enrollment_start,
"invite_only": invite_only,
}
if not course_modes:
course_info['course_modes'].append(_DEFAULT_FAKE_MODE)
else:
for mode in course_modes:
new_mode = copy.deepcopy(_DEFAULT_FAKE_MODE)
new_mode['slug'] = mode
course_info['course_modes'].append(new_mode)
_COURSES.append(course_info)
def reset():
"""Set the enrollments and courses arrays to be empty."""
global _COURSES # pylint: disable=global-statement
_COURSES = []
global _ENROLLMENTS # pylint: disable=global-statement
_ENROLLMENTS = []
| agpl-3.0 |
lmazuel/azure-sdk-for-python | azure-keyvault/azure/keyvault/models/certificate_update_parameters.py | 4 | 1565 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class CertificateUpdateParameters(Model):
"""The certificate update parameters.
:param certificate_policy: The management policy for the certificate.
:type certificate_policy: :class:`CertificatePolicy
<azure.keyvault.models.CertificatePolicy>`
:param certificate_attributes: The attributes of the certificate
(optional).
:type certificate_attributes: :class:`CertificateAttributes
<azure.keyvault.models.CertificateAttributes>`
:param tags: Application specific metadata in the form of key-value pairs.
:type tags: dict
"""
_attribute_map = {
'certificate_policy': {'key': 'policy', 'type': 'CertificatePolicy'},
'certificate_attributes': {'key': 'attributes', 'type': 'CertificateAttributes'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(self, certificate_policy=None, certificate_attributes=None, tags=None):
self.certificate_policy = certificate_policy
self.certificate_attributes = certificate_attributes
self.tags = tags
| mit |
JamesMilnerUK/github-map | scraping/github_api.py | 1 | 2743 | __author__ = 'James'
import json
import urllib2
import csv
import base64
import time
import StringIO
from urllib2 import urlopen, Request, HTTPError
from urllib import quote
#https://github.com/settings/tokens
TOKEN = ""
def write_github_csv(cities_csv, output_csv):
with open(cities_csv + '.csv', 'rb') as citiesCsv:
citiesCsv = csv.reader(citiesCsv)
with open(output_csv + '.csv', 'wb') as outputCsv:
outputCsv = csv.writer(outputCsv)
# Write headers
headers = True
for row in citiesCsv:
if headers and row:
row.append("Total")
row.append("Rate")
outputCsv.writerow(row)
headers = False
continue
# city, country, latitude, longitude, population
city = row[0]
country = row[1]
population = int(row[4].replace(",", "").replace('"')) * 1000 # Remove any fluff
try:
total = get_city_github_users(city, country, TOKEN)
except HTTPError as err:
print "There was an HTTP error: " + str(err.code)
print err.read()
return
row.append(total)
rate = round(float(total) / float(population) * 100.0, 2)
print total, population, rate
print
row.append(rate) # Rate
outputCsv.writerow(row)
time.sleep(3)
def request(url, token):
request = Request(url)
request.add_header('Authorization', 'token %s' % token)
response = urlopen(request)
return json.load(response)
def variations(variation, country, token):
github_url = "https://api.github.com/search/users?q=+location:"
cityAddress = quote('"' + variation + ", " + country + '"')
url = github_url + cityAddress
print "Getting...", variation + ", " + country
print "URL: " + url
total = int(request(url, token)["total_count"])
time.sleep(1)
url = github_url + quote('"' + variation + '"')
print "URL: " + url
print ""
total += int(request(url, token)["total_count"])
return total
def get_city_github_users(city, country, token):
total = 0
# Hacky as the cities sometimes have alternative names in brackets
if "(" in city:
cityVariations = city.split("(")
for c in cityVariations:
c = c.replace(")", "").strip()
total += variations(c, country, token)
else:
total += variations(city, country, token)
return total
if __name__ == "__main__":
write_github_csv("cities", "github-cities")
| mit |
AHelper/python-scsi | pyscsi/pyscsi/scsi_cdb_inquiry.py | 1 | 16921 | # coding: utf-8
# Copyright (C) 2014 by Ronnie Sahlberg<ronniesahlberg@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 2.1 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
from scsi_command import SCSICommand
from pyscsi.utils.converter import scsi_int_to_ba, scsi_ba_to_int, encode_dict, decode_bits
import scsi_enum_inquiry as inquiry_enums
#
# SCSI Inquiry command and definitions
#
class Inquiry(SCSICommand):
"""
A class to hold information from a inquiry command to a scsi device
"""
_cdb_bits = {
'opcode': [0xff, 0],
'evpd': [0x01, 1],
'page_code': [0xff, 2],
'alloc_len': [0xffff, 3]
}
_datain_bits = {
'peripheral_qualifier': [0xe0, 0],
'peripheral_device_type': [0x1f, 0]
}
_standard_bits = {
'rmb': [0x80, 1],
'version': [0xff, 2],
'normaca': [0x20, 3],
'hisup': [0x10, 3],
'response_data_format': [0x0f, 3],
'additional_length': [0xff, 4],
'sccs': [0x80, 5],
'acc': [0x40, 5],
'tpgs': [0x30, 5],
'3pc': [0x08, 5],
'protect': [0x01, 5],
'encserv': [0x40, 6],
'vs': [0x20, 6],
'multip': [0x10, 6],
'addr16': [0x01, 6],
'wbus16': [0x20, 7],
'sync': [0x10, 7],
'cmdque': [0x02, 7],
'vs2': [0x01, 7],
'clocking': [0x0c, 56],
'qas': [0x02, 56],
'ius': [0x01, 56]
}
_pagecode_bits = {
'page_code': [0xff, 1],
}
_block_limits_bits = {
'wsnz': [0x01, 4],
'ugavalid': [0x80, 32],
'max_caw_len': [0xff, 5],
'opt_xfer_len_gran': [0xffff, 6],
'max_xfer_len': [0xffffffff, 8],
'opt_xfer_len': [0xffffffff, 12],
'max_pfetch_len': [0xffffffff, 16],
'max_unmap_lba_count': [0xffffffff, 20],
'max_unmap_bd_count': [0xffffffff, 24],
'opt_unmap_gran': [0xffffffff, 28],
'unmap_gran_alignment': [0xffffffff, 32],
'max_ws_len': [0xffffffff, 36]
}
_block_dev_char_bits = {
'medium_rotation_rate': [0xffff, 4],
'product_type': [0xff, 6],
'wabereq': [0xc0, 7],
'wacereq': [0x30, 7],
'nominal_form_factor': [0x0f, 7],
'fuab': [0x02, 8],
'vbuls': [0x01, 8]
}
_logical_block_provisioning_bits = {
'threshold_exponent': [0xff, 4],
'lbpu': [0x80, 5],
'lpbws': [0x40, 5],
'lbpws10': [0x20, 5],
'lbprz': [0x04, 5],
'anc_sup': [0x02, 5],
'dp': [0x01, 5],
'provisioning_type': [0x07, 6]
}
_referrals_bits = {
'user_data_segment_size': [0xffffffff, 8],
'user_data_segment_multiplier': [0xffffffff, 12]
}
_extended_bits = {
'activate_microcode': [0xc0, 4],
'spt': [0x38, 4],
'grd_chk': [0x04, 4],
'app_chk': [0x02, 4],
'ref_chk': [0x01, 4],
'uask_sup': [0x20, 5],
'group_sup': [0x10, 5],
'prior_sup': [0x08, 5],
'headsup': [0x04, 5],
'ordsup': [0x02, 5],
'simpsup': [0x01, 5],
'wu_sup': [0x08, 6],
'crd_sup': [0x04, 6],
'nv_sup': [0x02, 6],
'v_sup': [0x01, 6],
'p_i_i_sup': [0x10, 7],
'luiclr': [0x01, 7],
'r_sup': [0x10, 8],
'cbcs': [0x01, 8],
'multi_it_nexus_microcode_download': [0x0f, 9],
'extended_self_test_completion_minutes': [0xffff, 10],
'poa_sup': [0x80, 12],
'hra_sup': [0x40, 12],
'vsa_sup': [0x20, 12],
'maximum_supported_sense_data_length': [0xff, 13]
}
_designator_bits = {
'protocol_identifier': [0xf0, 0],
'code_set': [0x0f, 0],
'piv': [0x80, 1],
'association': [0x30, 1],
'designator_type': [0x0f, 1],
'designator_length': [0xff, 3]
}
_naa_type_bits = {
'naa': [0xf0, 0]
}
_naa_ieee_extended_bits = {
'vendor_specific_identifier_a': [0x0fff, 0],
'ieee_company_id': [0xffffff, 2],
'vendor_specific_identifier_b': [0xffffff, 5]
}
_naa_locally_assigned_bits = {
'locally_administered_value': [0x0fffffffffffffff, 0]
}
_naa_ieee_registered_bits = {
'ieee_company_id': [0x0ffffff0, 0],
'vendor_specific_identifier': [0x0fffffffff, 3]
}
_naa_ieee_registered_extended_bits = {
'ieee_company_id': [0x0ffffff0, 0],
'vendor_specific_identifier': [0x0fffffffff, 3],
'vendor_specific_identifier_extension': [0xffffffffffffffff, 8]
}
_relative_port_bits = {
'relative_port': [0xffff, 2]
}
_target_portal_group_bits = {
'target_portal_group': [0xffff, 2]
}
_logical_unit_group_bits = {
'logical_unit_group': [0xffff, 2]
}
_pci_express_routing_id_bits = {
'pci_express_routing_id': [0xffff, 0],
}
def __init__(self, scsi, evpd=0, page_code=0, alloclen=96):
"""
initialize a new instance
:param scsi: a SCSI instance
:param evpd: the byte to enable or disable vital product data
:param page_code: the page code for the vpd page
:param alloclen: the max number of bytes allocated for the data_in buffer
"""
SCSICommand.__init__(self, scsi, 0, alloclen)
self._evpd = evpd
self.cdb = self.build_cdb(evpd, page_code, alloclen)
self.execute()
def build_cdb(self, evpd, page_code, alloclen):
"""
method to create a byte array for a Command Descriptor Block with a proper length
init_cdb returns a byte array of 6,10,12 or 16 bytes depending on the operation code and if
vital product data is enabled
:param evpd: the byte to enable or disable vital product data
:param page_code: the page code for the vpd page
:param alloclen: the max number of bytes allocated for the data_in buffer
:return: a byte array representing a code descriptor block
"""
cdb = {
'opcode': self.scsi.device.opcodes.INQUIRY.value,
'evpd': evpd,
'page_code': page_code,
'alloc_len': alloclen
}
return self.marshall_cdb(cdb)
@staticmethod
def marshall_designator(type, data):
if type == inquiry_enums.DESIGNATOR.VENDOR_SPECIFIC:
return data['vendor_specific']
if type == inquiry_enums.DESIGNATOR.T10_VENDOR_ID:
return data['t10_vendor_id'] + data['vendor_specific_id']
if type == inquiry_enums.DESIGNATOR.EUI_64:
if 'identifier_extension' in data:
return data['identifier_extension'] + \
scsi_int_to_ba(data['ieee_company_id'], 3) + \
data['vendor_specific_extension_id']
if 'directory_id' in data:
return scsi_int_to_ba(data['ieee_company_id'], 3) + \
data['vendor_specific_extension_id'] + \
data['directory_id']
return scsi_int_to_ba(data['ieee_company_id'], 3) + \
data['vendor_specific_extension_id']
if type == inquiry_enums.DESIGNATOR.NAA:
_r = bytearray(16)
decode_bits(data, Inquiry._naa_type_bits, _r)
if data['naa'] == inquiry_enums.NAA.IEEE_EXTENDED:
encode_dict(data, Inquiry._naa_ieee_extended_bits, _r)
return _r[:8]
if data['naa'] == inquiry_enums.NAA.LOCALLY_ASSIGNED:
encode_dict(data, Inquiry._naa_locally_assigned_bits, _r)
return _r[:8]
if data['naa'] == inquiry_enums.NAA.IEEE_REGISTERED:
encode_dict(data, Inquiry._naa_ieee_registered_bits, _r)
return _r[:8]
if data['naa'] == inquiry_enums.NAA.IEEE_REGISTERED_EXTENDED:
encode_dict(data, Inquiry._naa_ieee_registered_extended_bits, _r)
return _r[:16]
if type == inquiry_enums.DESIGNATOR.RELATIVE_TARGET_PORT_IDENTIFIER:
_r = bytearray(4)
encode_dict(data, Inquiry._relative_port_bits, _r)
return _r
if type == inquiry_enums.DESIGNATOR.TARGET_PORTAL_GROUP:
_r = bytearray(4)
encode_dict(data, Inquiry._target_portal_group_bits, _r)
return _r
if type == inquiry_enums.DESIGNATOR.LOGICAL_UNIT_GROUP:
_r = bytearray(4)
encode_dict(data, Inquiry._logical_unit_group_bits, _r)
return _r
if type == inquiry_enums.DESIGNATOR.MD5_LOGICAL_IDENTIFIER:
return data['md5_logical_identifier']
if type == inquiry_enums.DESIGNATOR.SCSI_NAME_STRING:
return ['scsi_name_string']
if type == inquiry_enums.DESIGNATOR.PCI_EXPRESS_ROUTING_ID:
_r = bytearray(8)
encode_dict(data, Inquiry._pci_express_routing_id_bits, _r)
return _r
@staticmethod
def marshall_designation_descriptor(data):
_r = bytearray(4)
encode_dict(data, Inquiry._designator_bits, _r)
_r += Inquiry.marshall_designator(data['designator_type'], data['designator'])
_r[3] = len(_r) - 4
return _r
@staticmethod
def unmarshall_designator(type, data):
_d = {}
if type == inquiry_enums.DESIGNATOR.VENDOR_SPECIFIC:
_d['vendor_specific'] = data
if type == inquiry_enums.DESIGNATOR.T10_VENDOR_ID:
_d['t10_vendor_id'] = data[:8]
_d['vendor_specific_id'] = data[8:]
if type == inquiry_enums.DESIGNATOR.EUI_64:
if len(data) == 8:
_d['ieee_company_id'] = scsi_ba_to_int(data[:3])
_d['vendor_specific_extension_id'] = data[3:8]
if len(data) == 12:
_d['ieee_company_id'] = scsi_ba_to_int(data[:3])
_d['vendor_specific_extension_id'] = data[3:8]
_d['directory_id'] = data[8:]
if len(data) == 16:
_d['identifier_extension'] = data[:8]
_d['ieee_company_id'] = scsi_ba_to_int(data[8:11])
_d['vendor_specific_extension_id'] = data[11:]
if type == inquiry_enums.DESIGNATOR.NAA:
decode_bits(data, Inquiry._naa_type_bits, _d)
if _d['naa'] == inquiry_enums.NAA.IEEE_EXTENDED:
decode_bits(data, Inquiry._naa_ieee_extended_bits, _d)
if _d['naa'] == inquiry_enums.NAA.LOCALLY_ASSIGNED:
decode_bits(data, Inquiry._naa_locally_assigned_bits, _d)
if _d['naa'] == inquiry_enums.NAA.IEEE_REGISTERED:
decode_bits(data, Inquiry._naa_ieee_registered_bits, _d)
if _d['naa'] == inquiry_enums.NAA.IEEE_REGISTERED_EXTENDED:
decode_bits(data, Inquiry._naa_ieee_registered_extended_bits, _d)
if type == inquiry_enums.DESIGNATOR.RELATIVE_TARGET_PORT_IDENTIFIER:
decode_bits(data, Inquiry._relative_port_bits, _d)
if type == inquiry_enums.DESIGNATOR.TARGET_PORTAL_GROUP:
decode_bits(data, Inquiry._target_portal_group_bits, _d)
if type == inquiry_enums.DESIGNATOR.LOGICAL_UNIT_GROUP:
decode_bits(data, Inquiry._logical_unit_group_bits, _d)
if type == inquiry_enums.DESIGNATOR.MD5_LOGICAL_IDENTIFIER:
_d['md5_logical_identifier'] = data[0:16]
if type == inquiry_enums.DESIGNATOR.SCSI_NAME_STRING:
_d['scsi_name_string'] = data
if type == inquiry_enums.DESIGNATOR.PCI_EXPRESS_ROUTING_ID:
decode_bits(data, Inquiry._pci_express_routing_id_bits, _d)
return _d
def unmarshall(self):
"""
Unmarshall the Inquiry data.
"""
self.result = self.unmarshall_datain(self.datain, self._evpd)
@staticmethod
def unmarshall_datain(data, evpd=0):
"""
Unmarshall the Inquiry datain
"""
result = {}
decode_bits(data, Inquiry._datain_bits, result)
if evpd == 0:
decode_bits(data, Inquiry._standard_bits, result)
result.update({'t10_vendor_identification': data[8:16]})
result.update({'product_identification': data[16:32]})
result.update({'product_revision_level': data[32:36]})
return result
decode_bits(data, Inquiry._pagecode_bits, result)
data = data[:4 + scsi_ba_to_int(data[2:4])]
if result['page_code'] == inquiry_enums.VPD.SUPPORTED_VPD_PAGES:
vpd_pages = []
for i in data[4:]:
vpd_pages.append(i)
result.update({'vpd_pages': vpd_pages})
return result
if result['page_code'] == inquiry_enums.VPD.BLOCK_LIMITS:
decode_bits(data, Inquiry._block_limits_bits, result)
return result
if result['page_code'] == inquiry_enums.VPD.BLOCK_DEVICE_CHARACTERISTICS:
decode_bits(data, Inquiry._block_dev_char_bits, result)
return result
if result['page_code'] == inquiry_enums.VPD.LOGICAL_BLOCK_PROVISIONING:
decode_bits(data, Inquiry._logical_block_provisioning_bits, result)
return result
if result['page_code'] == inquiry_enums.VPD.REFERRALS:
decode_bits(data, Inquiry._referrals_bits, result)
return result
if result['page_code'] == inquiry_enums.VPD.UNIT_SERIAL_NUMBER:
result.update({'unit_serial_number': data[4:]})
return result
if result['page_code'] == inquiry_enums.VPD.EXTENDED_INQUIRY_DATA:
decode_bits(data, Inquiry._extended_bits, result)
return result
if result['page_code'] == inquiry_enums.VPD.DEVICE_IDENTIFICATION:
data = data[4:]
_d = []
while len(data):
_bc = data[3] + 4
_dd = {}
decode_bits(data, Inquiry._designator_bits, _dd)
if _dd['piv'] == 0 or (_dd['association'] != 1 and _dd['association'] != 2):
del _dd['protocol_identifier']
_dd['designator'] = Inquiry.unmarshall_designator(_dd['designator_type'], data[4:4 + data[3]])
_d.append(_dd)
data = data[_bc:]
result.update({'designator_descriptors': _d})
return result
@staticmethod
def marshall_datain(data):
"""
Marshall the Inquiry datain.
"""
if 'page_code' not in data:
result = bytearray(96)
encode_dict(data, Inquiry._datain_bits, result)
encode_dict(data, Inquiry._standard_bits, result)
result[8:16] = data['t10_vendor_identification']
result[16:32] = data['product_identification']
result[32:36] = data['product_revision_level']
return result
result = bytearray(4)
encode_dict(data, Inquiry._datain_bits, result)
encode_dict(data, Inquiry._pagecode_bits, result)
if data['page_code'] == inquiry_enums.VPD.LOGICAL_BLOCK_PROVISIONING:
result += bytearray(4)
encode_dict(data, Inquiry._logical_block_provisioning_bits, result)
if data['page_code'] == inquiry_enums.VPD.UNIT_SERIAL_NUMBER:
result += data['unit_serial_number']
if data['page_code'] == inquiry_enums.VPD.REFERRALS:
result += bytearray(12)
encode_dict(data, Inquiry._referrals_bits, result)
if data['page_code'] == inquiry_enums.VPD.EXTENDED_INQUIRY_DATA:
result += bytearray(60)
encode_dict(data, Inquiry._extended_bits, result)
if data['page_code'] == inquiry_enums.VPD.DEVICE_IDENTIFICATION:
for _dd in data['designator_descriptors']:
_r = Inquiry.marshall_designation_descriptor(_dd)
result += _r
result[2:4] = scsi_int_to_ba(len(result) - 4, 2)
return result
@staticmethod
def unmarshall_cdb(cdb):
"""
Unmarshall an Inquiry cdb
"""
result = {}
decode_bits(cdb, Inquiry._cdb_bits, result)
return result
@staticmethod
def marshall_cdb(cdb):
"""
Marshall an Inquiry cdb
"""
result = bytearray(12)
encode_dict(cdb, Inquiry._cdb_bits, result)
return result
| lgpl-2.1 |
CTSRD-SOAAP/chromium-42.0.2311.135 | third_party/mesa/src/src/mapi/glapi/gen/gl_table.py | 11 | 7823 | #!/usr/bin/python2
# (C) Copyright IBM Corporation 2004
# All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# on the rights to use, copy, modify, merge, publish, distribute, sub
# license, and/or sell copies of the Software, and to permit persons to whom
# the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice (including the next
# paragraph) shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
# IBM AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# Authors:
# Ian Romanick <idr@us.ibm.com>
import gl_XML
import license
import sys, getopt
class PrintGlTable(gl_XML.gl_print_base):
def __init__(self, es=False):
gl_XML.gl_print_base.__init__(self)
self.es = es
self.header_tag = '_GLAPI_TABLE_H_'
self.name = "gl_table.py (from Mesa)"
self.license = license.bsd_license_template % ( \
"""Copyright (C) 1999-2003 Brian Paul All Rights Reserved.
(C) Copyright IBM Corporation 2004""", "BRIAN PAUL, IBM")
self.ifdef_emitted = False;
return
def printBody(self, api):
for f in api.functionIterateByOffset():
if not f.is_abi() and not self.ifdef_emitted:
print '#if !defined HAVE_SHARED_GLAPI'
self.ifdef_emitted = True
arg_string = f.get_parameter_string()
print ' %s (GLAPIENTRYP %s)(%s); /* %d */' % (f.return_type, f.name, arg_string, f.offset)
print '#endif /* !defined HAVE_SHARED_GLAPI */'
def printRealHeader(self):
print '#ifndef GLAPIENTRYP'
print '# ifndef GLAPIENTRY'
print '# define GLAPIENTRY'
print '# endif'
print ''
print '# define GLAPIENTRYP GLAPIENTRY *'
print '#endif'
print ''
print ''
print 'struct _glapi_table'
print '{'
return
def printRealFooter(self):
print '};'
return
class PrintRemapTable(gl_XML.gl_print_base):
def __init__(self, es=False):
gl_XML.gl_print_base.__init__(self)
self.es = es
self.header_tag = '_DISPATCH_H_'
self.name = "gl_table.py (from Mesa)"
self.license = license.bsd_license_template % ("(C) Copyright IBM Corporation 2005", "IBM")
return
def printRealHeader(self):
print """
/**
* \\file main/dispatch.h
* Macros for handling GL dispatch tables.
*
* For each known GL function, there are 3 macros in this file. The first
* macro is named CALL_FuncName and is used to call that GL function using
* the specified dispatch table. The other 2 macros, called GET_FuncName
* can SET_FuncName, are used to get and set the dispatch pointer for the
* named function in the specified dispatch table.
*/
/* GLXEXT is defined when building the GLX extension in the xserver.
*/
#if !defined(GLXEXT)
#include "main/mfeatures.h"
#endif
"""
return
def printBody(self, api):
print '#define CALL_by_offset(disp, cast, offset, parameters) \\'
print ' (*(cast (GET_by_offset(disp, offset)))) parameters'
print '#define GET_by_offset(disp, offset) \\'
print ' (offset >= 0) ? (((_glapi_proc *)(disp))[offset]) : NULL'
print '#define SET_by_offset(disp, offset, fn) \\'
print ' do { \\'
print ' if ( (offset) < 0 ) { \\'
print ' /* fprintf( stderr, "[%s:%u] SET_by_offset(%p, %d, %s)!\\n", */ \\'
print ' /* __func__, __LINE__, disp, offset, # fn); */ \\'
print ' /* abort(); */ \\'
print ' } \\'
print ' else { \\'
print ' ( (_glapi_proc *) (disp) )[offset] = (_glapi_proc) fn; \\'
print ' } \\'
print ' } while(0)'
print ''
functions = []
abi_functions = []
alias_functions = []
count = 0
for f in api.functionIterateByOffset():
if not f.is_abi():
functions.append( [f, count] )
count += 1
else:
abi_functions.append( [f, -1] )
if self.es:
# remember functions with aliases
if len(f.entry_points) > 1:
alias_functions.append(f)
print '/* total number of offsets below */'
print '#define _gloffset_COUNT %d' % (len(abi_functions + functions))
print ''
for f, index in abi_functions:
print '#define _gloffset_%s %d' % (f.name, f.offset)
print ''
print '#if !FEATURE_remap_table'
print ''
for f, index in functions:
print '#define _gloffset_%s %d' % (f.name, f.offset)
print ''
print '#else /* !FEATURE_remap_table */'
print ''
if self.es:
remap_table = "esLocalRemapTable"
print '#define %s_size %u' % (remap_table, count)
print 'static int %s[ %s_size ];' % (remap_table, remap_table)
print ''
else:
remap_table = "driDispatchRemapTable"
print '#define %s_size %u' % (remap_table, count)
print 'extern int %s[ %s_size ];' % (remap_table, remap_table)
print ''
for f, index in functions:
print '#define %s_remap_index %u' % (f.name, index)
print ''
for f, index in functions:
print '#define _gloffset_%s %s[%s_remap_index]' % (f.name, remap_table, f.name)
print ''
print '#endif /* !FEATURE_remap_table */'
print ''
for f, index in abi_functions + functions:
arg_string = gl_XML.create_parameter_string( f.parameters, 0 )
print 'typedef %s (GLAPIENTRYP _glptr_%s)(%s);' % (f.return_type, f.name, arg_string)
print '#define CALL_%s(disp, parameters) \\' % (f.name)
print ' (* GET_%s(disp)) parameters' % (f.name)
print 'static inline _glptr_%s GET_%s(struct _glapi_table *disp) {' % (f.name, f.name)
print ' return (_glptr_%s) (GET_by_offset(disp, _gloffset_%s));' % (f.name, f.name)
print '}'
print
print 'static inline void SET_%s(struct _glapi_table *disp, %s (GLAPIENTRYP fn)(%s)) {' % (f.name, f.return_type, arg_string)
print ' SET_by_offset(disp, _gloffset_%s, fn);' % (f.name)
print '}'
print
if alias_functions:
print ''
print '/* define aliases for compatibility */'
for f in alias_functions:
for name in f.entry_points:
if name != f.name:
print '#define CALL_%s(disp, parameters) CALL_%s(disp, parameters)' % (name, f.name)
print '#define GET_%s(disp) GET_%s(disp)' % (name, f.name)
print '#define SET_%s(disp, fn) SET_%s(disp, fn)' % (name, f.name)
print ''
print '#if FEATURE_remap_table'
for f in alias_functions:
for name in f.entry_points:
if name != f.name:
print '#define %s_remap_index %s_remap_index' % (name, f.name)
print '#endif /* FEATURE_remap_table */'
print ''
return
def show_usage():
print "Usage: %s [-f input_file_name] [-m mode] [-c ver]" % sys.argv[0]
print " -m mode Mode can be 'table' or 'remap_table'."
print " -c ver Version can be 'es1' or 'es2'."
sys.exit(1)
if __name__ == '__main__':
file_name = "gl_API.xml"
try:
(args, trail) = getopt.getopt(sys.argv[1:], "f:m:c:")
except Exception,e:
show_usage()
mode = "table"
es = None
for (arg,val) in args:
if arg == "-f":
file_name = val
elif arg == "-m":
mode = val
elif arg == "-c":
es = val
if mode == "table":
printer = PrintGlTable(es)
elif mode == "remap_table":
printer = PrintRemapTable(es)
else:
show_usage()
api = gl_XML.parse_GL_API( file_name )
if es is not None:
import gles_api
api_map = {
'es1': gles_api.es1_api,
'es2': gles_api.es2_api,
}
api.filter_functions(api_map[es])
printer.Print( api )
| bsd-3-clause |
grahamsellers/Emerald | Emerald/gtest-1.6.0/test/gtest_test_utils.py | 397 | 10437 | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test utilities for Google C++ Testing Framework."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import atexit
import os
import shutil
import sys
import tempfile
import unittest
_test_module = unittest
# Suppresses the 'Import not at the top of the file' lint complaint.
# pylint: disable-msg=C6204
try:
import subprocess
_SUBPROCESS_MODULE_AVAILABLE = True
except:
import popen2
_SUBPROCESS_MODULE_AVAILABLE = False
# pylint: enable-msg=C6204
GTEST_OUTPUT_VAR_NAME = 'GTEST_OUTPUT'
IS_WINDOWS = os.name == 'nt'
IS_CYGWIN = os.name == 'posix' and 'CYGWIN' in os.uname()[0]
# Here we expose a class from a particular module, depending on the
# environment. The comment suppresses the 'Invalid variable name' lint
# complaint.
TestCase = _test_module.TestCase # pylint: disable-msg=C6409
# Initially maps a flag to its default value. After
# _ParseAndStripGTestFlags() is called, maps a flag to its actual value.
_flag_map = {'source_dir': os.path.dirname(sys.argv[0]),
'build_dir': os.path.dirname(sys.argv[0])}
_gtest_flags_are_parsed = False
def _ParseAndStripGTestFlags(argv):
"""Parses and strips Google Test flags from argv. This is idempotent."""
# Suppresses the lint complaint about a global variable since we need it
# here to maintain module-wide state.
global _gtest_flags_are_parsed # pylint: disable-msg=W0603
if _gtest_flags_are_parsed:
return
_gtest_flags_are_parsed = True
for flag in _flag_map:
# The environment variable overrides the default value.
if flag.upper() in os.environ:
_flag_map[flag] = os.environ[flag.upper()]
# The command line flag overrides the environment variable.
i = 1 # Skips the program name.
while i < len(argv):
prefix = '--' + flag + '='
if argv[i].startswith(prefix):
_flag_map[flag] = argv[i][len(prefix):]
del argv[i]
break
else:
# We don't increment i in case we just found a --gtest_* flag
# and removed it from argv.
i += 1
def GetFlag(flag):
"""Returns the value of the given flag."""
# In case GetFlag() is called before Main(), we always call
# _ParseAndStripGTestFlags() here to make sure the --gtest_* flags
# are parsed.
_ParseAndStripGTestFlags(sys.argv)
return _flag_map[flag]
def GetSourceDir():
"""Returns the absolute path of the directory where the .py files are."""
return os.path.abspath(GetFlag('source_dir'))
def GetBuildDir():
"""Returns the absolute path of the directory where the test binaries are."""
return os.path.abspath(GetFlag('build_dir'))
_temp_dir = None
def _RemoveTempDir():
if _temp_dir:
shutil.rmtree(_temp_dir, ignore_errors=True)
atexit.register(_RemoveTempDir)
def GetTempDir():
"""Returns a directory for temporary files."""
global _temp_dir
if not _temp_dir:
_temp_dir = tempfile.mkdtemp()
return _temp_dir
def GetTestExecutablePath(executable_name, build_dir=None):
"""Returns the absolute path of the test binary given its name.
The function will print a message and abort the program if the resulting file
doesn't exist.
Args:
executable_name: name of the test binary that the test script runs.
build_dir: directory where to look for executables, by default
the result of GetBuildDir().
Returns:
The absolute path of the test binary.
"""
path = os.path.abspath(os.path.join(build_dir or GetBuildDir(),
executable_name))
if (IS_WINDOWS or IS_CYGWIN) and not path.endswith('.exe'):
path += '.exe'
if not os.path.exists(path):
message = (
'Unable to find the test binary. Please make sure to provide path\n'
'to the binary via the --build_dir flag or the BUILD_DIR\n'
'environment variable.')
print >> sys.stderr, message
sys.exit(1)
return path
def GetExitStatus(exit_code):
"""Returns the argument to exit(), or -1 if exit() wasn't called.
Args:
exit_code: the result value of os.system(command).
"""
if os.name == 'nt':
# On Windows, os.WEXITSTATUS() doesn't work and os.system() returns
# the argument to exit() directly.
return exit_code
else:
# On Unix, os.WEXITSTATUS() must be used to extract the exit status
# from the result of os.system().
if os.WIFEXITED(exit_code):
return os.WEXITSTATUS(exit_code)
else:
return -1
class Subprocess:
def __init__(self, command, working_dir=None, capture_stderr=True, env=None):
"""Changes into a specified directory, if provided, and executes a command.
Restores the old directory afterwards.
Args:
command: The command to run, in the form of sys.argv.
working_dir: The directory to change into.
capture_stderr: Determines whether to capture stderr in the output member
or to discard it.
env: Dictionary with environment to pass to the subprocess.
Returns:
An object that represents outcome of the executed process. It has the
following attributes:
terminated_by_signal True iff the child process has been terminated
by a signal.
signal Sygnal that terminated the child process.
exited True iff the child process exited normally.
exit_code The code with which the child process exited.
output Child process's stdout and stderr output
combined in a string.
"""
# The subprocess module is the preferrable way of running programs
# since it is available and behaves consistently on all platforms,
# including Windows. But it is only available starting in python 2.4.
# In earlier python versions, we revert to the popen2 module, which is
# available in python 2.0 and later but doesn't provide required
# functionality (Popen4) under Windows. This allows us to support Mac
# OS X 10.4 Tiger, which has python 2.3 installed.
if _SUBPROCESS_MODULE_AVAILABLE:
if capture_stderr:
stderr = subprocess.STDOUT
else:
stderr = subprocess.PIPE
p = subprocess.Popen(command,
stdout=subprocess.PIPE, stderr=stderr,
cwd=working_dir, universal_newlines=True, env=env)
# communicate returns a tuple with the file obect for the child's
# output.
self.output = p.communicate()[0]
self._return_code = p.returncode
else:
old_dir = os.getcwd()
def _ReplaceEnvDict(dest, src):
# Changes made by os.environ.clear are not inheritable by child
# processes until Python 2.6. To produce inheritable changes we have
# to delete environment items with the del statement.
for key in dest:
del dest[key]
dest.update(src)
# When 'env' is not None, backup the environment variables and replace
# them with the passed 'env'. When 'env' is None, we simply use the
# current 'os.environ' for compatibility with the subprocess.Popen
# semantics used above.
if env is not None:
old_environ = os.environ.copy()
_ReplaceEnvDict(os.environ, env)
try:
if working_dir is not None:
os.chdir(working_dir)
if capture_stderr:
p = popen2.Popen4(command)
else:
p = popen2.Popen3(command)
p.tochild.close()
self.output = p.fromchild.read()
ret_code = p.wait()
finally:
os.chdir(old_dir)
# Restore the old environment variables
# if they were replaced.
if env is not None:
_ReplaceEnvDict(os.environ, old_environ)
# Converts ret_code to match the semantics of
# subprocess.Popen.returncode.
if os.WIFSIGNALED(ret_code):
self._return_code = -os.WTERMSIG(ret_code)
else: # os.WIFEXITED(ret_code) should return True here.
self._return_code = os.WEXITSTATUS(ret_code)
if self._return_code < 0:
self.terminated_by_signal = True
self.exited = False
self.signal = -self._return_code
else:
self.terminated_by_signal = False
self.exited = True
self.exit_code = self._return_code
def Main():
"""Runs the unit test."""
# We must call _ParseAndStripGTestFlags() before calling
# unittest.main(). Otherwise the latter will be confused by the
# --gtest_* flags.
_ParseAndStripGTestFlags(sys.argv)
# The tested binaries should not be writing XML output files unless the
# script explicitly instructs them to.
# TODO(vladl@google.com): Move this into Subprocess when we implement
# passing environment into it as a parameter.
if GTEST_OUTPUT_VAR_NAME in os.environ:
del os.environ[GTEST_OUTPUT_VAR_NAME]
_test_module.main()
| mit |
cristianquaglio/odoo | addons/auth_oauth/auth_oauth.py | 321 | 1135 | from openerp.osv import osv, fields
class auth_oauth_provider(osv.osv):
"""Class defining the configuration values of an OAuth2 provider"""
_name = 'auth.oauth.provider'
_description = 'OAuth2 provider'
_order = 'name'
_columns = {
'name' : fields.char('Provider name', required=True), # Name of the OAuth2 entity, Google, LinkedIn, etc
'client_id' : fields.char('Client ID'), # Our identifier
'auth_endpoint' : fields.char('Authentication URL', required=True), # OAuth provider URL to authenticate users
'scope' : fields.char('Scope'), # OAUth user data desired to access
'validation_endpoint' : fields.char('Validation URL', required=True),# OAuth provider URL to validate tokens
'data_endpoint' : fields.char('Data URL'),
'enabled' : fields.boolean('Allowed'),
'css_class' : fields.char('CSS class'),
'body' : fields.char('Body', required=True),
'sequence' : fields.integer(),
}
_defaults = {
'enabled' : False,
'css_class' : "zocial",
}
| apache-2.0 |
luceatnobis/youtube-dl | youtube_dl/extractor/yesjapan.py | 64 | 2196 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
HEADRequest,
get_element_by_attribute,
parse_iso8601,
)
class YesJapanIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?yesjapan\.com/video/(?P<slug>[A-Za-z0-9\-]*)_(?P<id>[A-Za-z0-9]+)\.html'
_TEST = {
'url': 'http://www.yesjapan.com/video/japanese-in-5-20-wa-and-ga-particle-usages_726497834.html',
'md5': 'f0be416314e5be21a12b499b330c21cf',
'info_dict': {
'id': '726497834',
'title': 'Japanese in 5! #20 - WA And GA Particle Usages',
'description': 'This should clear up some issues most students of Japanese encounter with WA and GA....',
'ext': 'mp4',
'timestamp': 1416391590,
'upload_date': '20141119',
'thumbnail': r're:^https?://.*\.jpg$',
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
title = self._og_search_title(webpage)
video_url = self._og_search_video_url(webpage)
description = self._og_search_description(webpage)
thumbnail = self._og_search_thumbnail(webpage)
timestamp = None
submit_info = get_element_by_attribute('class', 'pm-submit-data', webpage)
if submit_info:
timestamp = parse_iso8601(self._search_regex(
r'datetime="([^"]+)"', submit_info, 'upload date', fatal=False, default=None))
# attempt to resolve the final URL in order to get a proper extension
redirect_req = HEADRequest(video_url)
req = self._request_webpage(
redirect_req, video_id, note='Resolving final URL', errnote='Could not resolve final URL', fatal=False)
if req:
video_url = req.geturl()
formats = [{
'format_id': 'sd',
'url': video_url,
}]
return {
'id': video_id,
'title': title,
'formats': formats,
'description': description,
'timestamp': timestamp,
'thumbnail': thumbnail,
}
| unlicense |
rossgoodwin/musapaedia | musapaedia/muse/lib/python2.7/site-packages/pkg_resources/_vendor/packaging/specifiers.py | 258 | 28472 | # Copyright 2014 Donald Stufft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
import abc
import functools
import itertools
import re
from ._compat import string_types, with_metaclass
from .version import Version, LegacyVersion, parse
class InvalidSpecifier(ValueError):
"""
An invalid specifier was found, users should refer to PEP 440.
"""
class BaseSpecifier(with_metaclass(abc.ABCMeta, object)):
@abc.abstractmethod
def __str__(self):
"""
Returns the str representation of this Specifier like object. This
should be representative of the Specifier itself.
"""
@abc.abstractmethod
def __hash__(self):
"""
Returns a hash value for this Specifier like object.
"""
@abc.abstractmethod
def __eq__(self, other):
"""
Returns a boolean representing whether or not the two Specifier like
objects are equal.
"""
@abc.abstractmethod
def __ne__(self, other):
"""
Returns a boolean representing whether or not the two Specifier like
objects are not equal.
"""
@abc.abstractproperty
def prereleases(self):
"""
Returns whether or not pre-releases as a whole are allowed by this
specifier.
"""
@prereleases.setter
def prereleases(self, value):
"""
Sets whether or not pre-releases as a whole are allowed by this
specifier.
"""
@abc.abstractmethod
def contains(self, item, prereleases=None):
"""
Determines if the given item is contained within this specifier.
"""
@abc.abstractmethod
def filter(self, iterable, prereleases=None):
"""
Takes an iterable of items and filters them so that only items which
are contained within this specifier are allowed in it.
"""
class _IndividualSpecifier(BaseSpecifier):
_operators = {}
def __init__(self, spec="", prereleases=None):
match = self._regex.search(spec)
if not match:
raise InvalidSpecifier("Invalid specifier: '{0}'".format(spec))
self._spec = (
match.group("operator").strip(),
match.group("version").strip(),
)
# Store whether or not this Specifier should accept prereleases
self._prereleases = prereleases
def __repr__(self):
pre = (
", prereleases={0!r}".format(self.prereleases)
if self._prereleases is not None
else ""
)
return "<{0}({1!r}{2})>".format(
self.__class__.__name__,
str(self),
pre,
)
def __str__(self):
return "{0}{1}".format(*self._spec)
def __hash__(self):
return hash(self._spec)
def __eq__(self, other):
if isinstance(other, string_types):
try:
other = self.__class__(other)
except InvalidSpecifier:
return NotImplemented
elif not isinstance(other, self.__class__):
return NotImplemented
return self._spec == other._spec
def __ne__(self, other):
if isinstance(other, string_types):
try:
other = self.__class__(other)
except InvalidSpecifier:
return NotImplemented
elif not isinstance(other, self.__class__):
return NotImplemented
return self._spec != other._spec
def _get_operator(self, op):
return getattr(self, "_compare_{0}".format(self._operators[op]))
def _coerce_version(self, version):
if not isinstance(version, (LegacyVersion, Version)):
version = parse(version)
return version
@property
def prereleases(self):
return self._prereleases
@prereleases.setter
def prereleases(self, value):
self._prereleases = value
def contains(self, item, prereleases=None):
# Determine if prereleases are to be allowed or not.
if prereleases is None:
prereleases = self.prereleases
# Normalize item to a Version or LegacyVersion, this allows us to have
# a shortcut for ``"2.0" in Specifier(">=2")
item = self._coerce_version(item)
# Determine if we should be supporting prereleases in this specifier
# or not, if we do not support prereleases than we can short circuit
# logic if this version is a prereleases.
if item.is_prerelease and not prereleases:
return False
# Actually do the comparison to determine if this item is contained
# within this Specifier or not.
return self._get_operator(self._spec[0])(item, self._spec[1])
def filter(self, iterable, prereleases=None):
yielded = False
found_prereleases = []
kw = {"prereleases": prereleases if prereleases is not None else True}
# Attempt to iterate over all the values in the iterable and if any of
# them match, yield them.
for version in iterable:
parsed_version = self._coerce_version(version)
if self.contains(parsed_version, **kw):
# If our version is a prerelease, and we were not set to allow
# prereleases, then we'll store it for later incase nothing
# else matches this specifier.
if (parsed_version.is_prerelease
and not (prereleases or self.prereleases)):
found_prereleases.append(version)
# Either this is not a prerelease, or we should have been
# accepting prereleases from the begining.
else:
yielded = True
yield version
# Now that we've iterated over everything, determine if we've yielded
# any values, and if we have not and we have any prereleases stored up
# then we will go ahead and yield the prereleases.
if not yielded and found_prereleases:
for version in found_prereleases:
yield version
class LegacySpecifier(_IndividualSpecifier):
_regex = re.compile(
r"""
^
\s*
(?P<operator>(==|!=|<=|>=|<|>))
\s*
(?P<version>
[^\s]* # We just match everything, except for whitespace since this
# is a "legacy" specifier and the version string can be just
# about anything.
)
\s*
$
""",
re.VERBOSE | re.IGNORECASE,
)
_operators = {
"==": "equal",
"!=": "not_equal",
"<=": "less_than_equal",
">=": "greater_than_equal",
"<": "less_than",
">": "greater_than",
}
def _coerce_version(self, version):
if not isinstance(version, LegacyVersion):
version = LegacyVersion(str(version))
return version
def _compare_equal(self, prospective, spec):
return prospective == self._coerce_version(spec)
def _compare_not_equal(self, prospective, spec):
return prospective != self._coerce_version(spec)
def _compare_less_than_equal(self, prospective, spec):
return prospective <= self._coerce_version(spec)
def _compare_greater_than_equal(self, prospective, spec):
return prospective >= self._coerce_version(spec)
def _compare_less_than(self, prospective, spec):
return prospective < self._coerce_version(spec)
def _compare_greater_than(self, prospective, spec):
return prospective > self._coerce_version(spec)
def _require_version_compare(fn):
@functools.wraps(fn)
def wrapped(self, prospective, spec):
if not isinstance(prospective, Version):
return False
return fn(self, prospective, spec)
return wrapped
class Specifier(_IndividualSpecifier):
_regex = re.compile(
r"""
^
\s*
(?P<operator>(~=|==|!=|<=|>=|<|>|===))
(?P<version>
(?:
# The identity operators allow for an escape hatch that will
# do an exact string match of the version you wish to install.
# This will not be parsed by PEP 440 and we cannot determine
# any semantic meaning from it. This operator is discouraged
# but included entirely as an escape hatch.
(?<====) # Only match for the identity operator
\s*
[^\s]* # We just match everything, except for whitespace
# since we are only testing for strict identity.
)
|
(?:
# The (non)equality operators allow for wild card and local
# versions to be specified so we have to define these two
# operators separately to enable that.
(?<===|!=) # Only match for equals and not equals
\s*
v?
(?:[0-9]+!)? # epoch
[0-9]+(?:\.[0-9]+)* # release
(?: # pre release
[-_\.]?
(a|b|c|rc|alpha|beta|pre|preview)
[-_\.]?
[0-9]*
)?
(?: # post release
(?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
)?
# You cannot use a wild card and a dev or local version
# together so group them with a | and make them optional.
(?:
(?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
(?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local
|
\.\* # Wild card syntax of .*
)?
)
|
(?:
# The compatible operator requires at least two digits in the
# release segment.
(?<=~=) # Only match for the compatible operator
\s*
v?
(?:[0-9]+!)? # epoch
[0-9]+(?:\.[0-9]+)+ # release (We have a + instead of a *)
(?: # pre release
[-_\.]?
(a|b|c|rc|alpha|beta|pre|preview)
[-_\.]?
[0-9]*
)?
(?: # post release
(?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
)?
(?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
)
|
(?:
# All other operators only allow a sub set of what the
# (non)equality operators do. Specifically they do not allow
# local versions to be specified nor do they allow the prefix
# matching wild cards.
(?<!==|!=|~=) # We have special cases for these
# operators so we want to make sure they
# don't match here.
\s*
v?
(?:[0-9]+!)? # epoch
[0-9]+(?:\.[0-9]+)* # release
(?: # pre release
[-_\.]?
(a|b|c|rc|alpha|beta|pre|preview)
[-_\.]?
[0-9]*
)?
(?: # post release
(?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*)
)?
(?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release
)
)
\s*
$
""",
re.VERBOSE | re.IGNORECASE,
)
_operators = {
"~=": "compatible",
"==": "equal",
"!=": "not_equal",
"<=": "less_than_equal",
">=": "greater_than_equal",
"<": "less_than",
">": "greater_than",
"===": "arbitrary",
}
@_require_version_compare
def _compare_compatible(self, prospective, spec):
# Compatible releases have an equivalent combination of >= and ==. That
# is that ~=2.2 is equivalent to >=2.2,==2.*. This allows us to
# implement this in terms of the other specifiers instead of
# implementing it ourselves. The only thing we need to do is construct
# the other specifiers.
# We want everything but the last item in the version, but we want to
# ignore post and dev releases and we want to treat the pre-release as
# it's own separate segment.
prefix = ".".join(
list(
itertools.takewhile(
lambda x: (not x.startswith("post")
and not x.startswith("dev")),
_version_split(spec),
)
)[:-1]
)
# Add the prefix notation to the end of our string
prefix += ".*"
return (self._get_operator(">=")(prospective, spec)
and self._get_operator("==")(prospective, prefix))
@_require_version_compare
def _compare_equal(self, prospective, spec):
# We need special logic to handle prefix matching
if spec.endswith(".*"):
# Split the spec out by dots, and pretend that there is an implicit
# dot in between a release segment and a pre-release segment.
spec = _version_split(spec[:-2]) # Remove the trailing .*
# Split the prospective version out by dots, and pretend that there
# is an implicit dot in between a release segment and a pre-release
# segment.
prospective = _version_split(str(prospective))
# Shorten the prospective version to be the same length as the spec
# so that we can determine if the specifier is a prefix of the
# prospective version or not.
prospective = prospective[:len(spec)]
# Pad out our two sides with zeros so that they both equal the same
# length.
spec, prospective = _pad_version(spec, prospective)
else:
# Convert our spec string into a Version
spec = Version(spec)
# If the specifier does not have a local segment, then we want to
# act as if the prospective version also does not have a local
# segment.
if not spec.local:
prospective = Version(prospective.public)
return prospective == spec
@_require_version_compare
def _compare_not_equal(self, prospective, spec):
return not self._compare_equal(prospective, spec)
@_require_version_compare
def _compare_less_than_equal(self, prospective, spec):
return prospective <= Version(spec)
@_require_version_compare
def _compare_greater_than_equal(self, prospective, spec):
return prospective >= Version(spec)
@_require_version_compare
def _compare_less_than(self, prospective, spec):
# Convert our spec to a Version instance, since we'll want to work with
# it as a version.
spec = Version(spec)
# Check to see if the prospective version is less than the spec
# version. If it's not we can short circuit and just return False now
# instead of doing extra unneeded work.
if not prospective < spec:
return False
# This special case is here so that, unless the specifier itself
# includes is a pre-release version, that we do not accept pre-release
# versions for the version mentioned in the specifier (e.g. <3.1 should
# not match 3.1.dev0, but should match 3.0.dev0).
if not spec.is_prerelease and prospective.is_prerelease:
if Version(prospective.base_version) == Version(spec.base_version):
return False
# If we've gotten to here, it means that prospective version is both
# less than the spec version *and* it's not a pre-release of the same
# version in the spec.
return True
@_require_version_compare
def _compare_greater_than(self, prospective, spec):
# Convert our spec to a Version instance, since we'll want to work with
# it as a version.
spec = Version(spec)
# Check to see if the prospective version is greater than the spec
# version. If it's not we can short circuit and just return False now
# instead of doing extra unneeded work.
if not prospective > spec:
return False
# This special case is here so that, unless the specifier itself
# includes is a post-release version, that we do not accept
# post-release versions for the version mentioned in the specifier
# (e.g. >3.1 should not match 3.0.post0, but should match 3.2.post0).
if not spec.is_postrelease and prospective.is_postrelease:
if Version(prospective.base_version) == Version(spec.base_version):
return False
# Ensure that we do not allow a local version of the version mentioned
# in the specifier, which is techincally greater than, to match.
if prospective.local is not None:
if Version(prospective.base_version) == Version(spec.base_version):
return False
# If we've gotten to here, it means that prospective version is both
# greater than the spec version *and* it's not a pre-release of the
# same version in the spec.
return True
def _compare_arbitrary(self, prospective, spec):
return str(prospective).lower() == str(spec).lower()
@property
def prereleases(self):
# If there is an explicit prereleases set for this, then we'll just
# blindly use that.
if self._prereleases is not None:
return self._prereleases
# Look at all of our specifiers and determine if they are inclusive
# operators, and if they are if they are including an explicit
# prerelease.
operator, version = self._spec
if operator in ["==", ">=", "<=", "~="]:
# The == specifier can include a trailing .*, if it does we
# want to remove before parsing.
if operator == "==" and version.endswith(".*"):
version = version[:-2]
# Parse the version, and if it is a pre-release than this
# specifier allows pre-releases.
if parse(version).is_prerelease:
return True
return False
@prereleases.setter
def prereleases(self, value):
self._prereleases = value
_prefix_regex = re.compile(r"^([0-9]+)((?:a|b|c|rc)[0-9]+)$")
def _version_split(version):
result = []
for item in version.split("."):
match = _prefix_regex.search(item)
if match:
result.extend(match.groups())
else:
result.append(item)
return result
def _pad_version(left, right):
left_split, right_split = [], []
# Get the release segment of our versions
left_split.append(list(itertools.takewhile(lambda x: x.isdigit(), left)))
right_split.append(list(itertools.takewhile(lambda x: x.isdigit(), right)))
# Get the rest of our versions
left_split.append(left[len(left_split):])
right_split.append(left[len(right_split):])
# Insert our padding
left_split.insert(
1,
["0"] * max(0, len(right_split[0]) - len(left_split[0])),
)
right_split.insert(
1,
["0"] * max(0, len(left_split[0]) - len(right_split[0])),
)
return (
list(itertools.chain(*left_split)),
list(itertools.chain(*right_split)),
)
class SpecifierSet(BaseSpecifier):
def __init__(self, specifiers="", prereleases=None):
# Split on , to break each indidivual specifier into it's own item, and
# strip each item to remove leading/trailing whitespace.
specifiers = [s.strip() for s in specifiers.split(",") if s.strip()]
# Parsed each individual specifier, attempting first to make it a
# Specifier and falling back to a LegacySpecifier.
parsed = set()
for specifier in specifiers:
try:
parsed.add(Specifier(specifier))
except InvalidSpecifier:
parsed.add(LegacySpecifier(specifier))
# Turn our parsed specifiers into a frozen set and save them for later.
self._specs = frozenset(parsed)
# Store our prereleases value so we can use it later to determine if
# we accept prereleases or not.
self._prereleases = prereleases
def __repr__(self):
pre = (
", prereleases={0!r}".format(self.prereleases)
if self._prereleases is not None
else ""
)
return "<SpecifierSet({0!r}{1})>".format(str(self), pre)
def __str__(self):
return ",".join(sorted(str(s) for s in self._specs))
def __hash__(self):
return hash(self._specs)
def __and__(self, other):
if isinstance(other, string_types):
other = SpecifierSet(other)
elif not isinstance(other, SpecifierSet):
return NotImplemented
specifier = SpecifierSet()
specifier._specs = frozenset(self._specs | other._specs)
if self._prereleases is None and other._prereleases is not None:
specifier._prereleases = other._prereleases
elif self._prereleases is not None and other._prereleases is None:
specifier._prereleases = self._prereleases
elif self._prereleases == other._prereleases:
specifier._prereleases = self._prereleases
else:
raise ValueError(
"Cannot combine SpecifierSets with True and False prerelease "
"overrides."
)
return specifier
def __eq__(self, other):
if isinstance(other, string_types):
other = SpecifierSet(other)
elif isinstance(other, _IndividualSpecifier):
other = SpecifierSet(str(other))
elif not isinstance(other, SpecifierSet):
return NotImplemented
return self._specs == other._specs
def __ne__(self, other):
if isinstance(other, string_types):
other = SpecifierSet(other)
elif isinstance(other, _IndividualSpecifier):
other = SpecifierSet(str(other))
elif not isinstance(other, SpecifierSet):
return NotImplemented
return self._specs != other._specs
@property
def prereleases(self):
# If we have been given an explicit prerelease modifier, then we'll
# pass that through here.
if self._prereleases is not None:
return self._prereleases
# Otherwise we'll see if any of the given specifiers accept
# prereleases, if any of them do we'll return True, otherwise False.
# Note: The use of any() here means that an empty set of specifiers
# will always return False, this is an explicit design decision.
return any(s.prereleases for s in self._specs)
@prereleases.setter
def prereleases(self, value):
self._prereleases = value
def contains(self, item, prereleases=None):
# Ensure that our item is a Version or LegacyVersion instance.
if not isinstance(item, (LegacyVersion, Version)):
item = parse(item)
# We can determine if we're going to allow pre-releases by looking to
# see if any of the underlying items supports them. If none of them do
# and this item is a pre-release then we do not allow it and we can
# short circuit that here.
# Note: This means that 1.0.dev1 would not be contained in something
# like >=1.0.devabc however it would be in >=1.0.debabc,>0.0.dev0
if (not (self.prereleases or prereleases)) and item.is_prerelease:
return False
# Determine if we're forcing a prerelease or not, we bypass
# self.prereleases here and use self._prereleases because we want to
# only take into consideration actual *forced* values. The underlying
# specifiers will handle the other logic.
# The logic here is: If prereleases is anything but None, we'll just
# go aheand and continue to use that. However if
# prereleases is None, then we'll use whatever the
# value of self._prereleases is as long as it is not
# None itself.
if prereleases is None and self._prereleases is not None:
prereleases = self._prereleases
# We simply dispatch to the underlying specs here to make sure that the
# given version is contained within all of them.
# Note: This use of all() here means that an empty set of specifiers
# will always return True, this is an explicit design decision.
return all(
s.contains(item, prereleases=prereleases)
for s in self._specs
)
def filter(self, iterable, prereleases=None):
# Determine if we're forcing a prerelease or not, we bypass
# self.prereleases here and use self._prereleases because we want to
# only take into consideration actual *forced* values. The underlying
# specifiers will handle the other logic.
# The logic here is: If prereleases is anything but None, we'll just
# go aheand and continue to use that. However if
# prereleases is None, then we'll use whatever the
# value of self._prereleases is as long as it is not
# None itself.
if prereleases is None and self._prereleases is not None:
prereleases = self._prereleases
# If we have any specifiers, then we want to wrap our iterable in the
# filter method for each one, this will act as a logical AND amongst
# each specifier.
if self._specs:
for spec in self._specs:
iterable = spec.filter(iterable, prereleases=prereleases)
return iterable
# If we do not have any specifiers, then we need to have a rough filter
# which will filter out any pre-releases, unless there are no final
# releases, and which will filter out LegacyVersion in general.
else:
filtered = []
found_prereleases = []
for item in iterable:
# Ensure that we some kind of Version class for this item.
if not isinstance(item, (LegacyVersion, Version)):
parsed_version = parse(item)
else:
parsed_version = item
# Filter out any item which is parsed as a LegacyVersion
if isinstance(parsed_version, LegacyVersion):
continue
# Store any item which is a pre-release for later unless we've
# already found a final version or we are accepting prereleases
if parsed_version.is_prerelease and not prereleases:
if not filtered:
found_prereleases.append(item)
else:
filtered.append(item)
# If we've found no items except for pre-releases, then we'll go
# ahead and use the pre-releases
if not filtered and found_prereleases and prereleases is None:
return found_prereleases
return filtered
| mit |
noslenfa/tdjangorest | uw/lib/python2.7/site-packages/django/contrib/gis/tests/layermap/tests.py | 100 | 14089 | # coding: utf-8
from __future__ import absolute_import, unicode_literals
import os
from copy import copy
from decimal import Decimal
from django.contrib.gis.gdal import DataSource
from django.contrib.gis.tests.utils import mysql
from django.contrib.gis.utils.layermapping import (LayerMapping, LayerMapError,
InvalidDecimal, MissingForeignKey)
from django.db import router
from django.conf import settings
from django.test import TestCase
from django.utils import unittest
from django.utils._os import upath
from .models import (
City, County, CountyFeat, Interstate, ICity1, ICity2, Invalid, State,
city_mapping, co_mapping, cofeat_mapping, inter_mapping)
shp_path = os.path.realpath(os.path.join(os.path.dirname(upath(__file__)), os.pardir, 'data'))
city_shp = os.path.join(shp_path, 'cities', 'cities.shp')
co_shp = os.path.join(shp_path, 'counties', 'counties.shp')
inter_shp = os.path.join(shp_path, 'interstates', 'interstates.shp')
invalid_shp = os.path.join(shp_path, 'invalid', 'emptypoints.shp')
# Dictionaries to hold what's expected in the county shapefile.
NAMES = ['Bexar', 'Galveston', 'Harris', 'Honolulu', 'Pueblo']
NUMS = [1, 2, 1, 19, 1] # Number of polygons for each.
STATES = ['Texas', 'Texas', 'Texas', 'Hawaii', 'Colorado']
class LayerMapTest(TestCase):
def test_init(self):
"Testing LayerMapping initialization."
# Model field that does not exist.
bad1 = copy(city_mapping)
bad1['foobar'] = 'FooField'
# Shapefile field that does not exist.
bad2 = copy(city_mapping)
bad2['name'] = 'Nombre'
# Nonexistent geographic field type.
bad3 = copy(city_mapping)
bad3['point'] = 'CURVE'
# Incrementing through the bad mapping dictionaries and
# ensuring that a LayerMapError is raised.
for bad_map in (bad1, bad2, bad3):
with self.assertRaises(LayerMapError):
lm = LayerMapping(City, city_shp, bad_map)
# A LookupError should be thrown for bogus encodings.
with self.assertRaises(LookupError):
lm = LayerMapping(City, city_shp, city_mapping, encoding='foobar')
def test_simple_layermap(self):
"Test LayerMapping import of a simple point shapefile."
# Setting up for the LayerMapping.
lm = LayerMapping(City, city_shp, city_mapping)
lm.save()
# There should be three cities in the shape file.
self.assertEqual(3, City.objects.count())
# Opening up the shapefile, and verifying the values in each
# of the features made it to the model.
ds = DataSource(city_shp)
layer = ds[0]
for feat in layer:
city = City.objects.get(name=feat['Name'].value)
self.assertEqual(feat['Population'].value, city.population)
self.assertEqual(Decimal(str(feat['Density'])), city.density)
self.assertEqual(feat['Created'].value, city.dt)
# Comparing the geometries.
pnt1, pnt2 = feat.geom, city.point
self.assertAlmostEqual(pnt1.x, pnt2.x, 5)
self.assertAlmostEqual(pnt1.y, pnt2.y, 5)
def test_layermap_strict(self):
"Testing the `strict` keyword, and import of a LineString shapefile."
# When the `strict` keyword is set an error encountered will force
# the importation to stop.
with self.assertRaises(InvalidDecimal):
lm = LayerMapping(Interstate, inter_shp, inter_mapping)
lm.save(silent=True, strict=True)
Interstate.objects.all().delete()
# This LayerMapping should work b/c `strict` is not set.
lm = LayerMapping(Interstate, inter_shp, inter_mapping)
lm.save(silent=True)
# Two interstate should have imported correctly.
self.assertEqual(2, Interstate.objects.count())
# Verifying the values in the layer w/the model.
ds = DataSource(inter_shp)
# Only the first two features of this shapefile are valid.
valid_feats = ds[0][:2]
for feat in valid_feats:
istate = Interstate.objects.get(name=feat['Name'].value)
if feat.fid == 0:
self.assertEqual(Decimal(str(feat['Length'])), istate.length)
elif feat.fid == 1:
# Everything but the first two decimal digits were truncated,
# because the Interstate model's `length` field has decimal_places=2.
self.assertAlmostEqual(feat.get('Length'), float(istate.length), 2)
for p1, p2 in zip(feat.geom, istate.path):
self.assertAlmostEqual(p1[0], p2[0], 6)
self.assertAlmostEqual(p1[1], p2[1], 6)
def county_helper(self, county_feat=True):
"Helper function for ensuring the integrity of the mapped County models."
for name, n, st in zip(NAMES, NUMS, STATES):
# Should only be one record b/c of `unique` keyword.
c = County.objects.get(name=name)
self.assertEqual(n, len(c.mpoly))
self.assertEqual(st, c.state.name) # Checking ForeignKey mapping.
# Multiple records because `unique` was not set.
if county_feat:
qs = CountyFeat.objects.filter(name=name)
self.assertEqual(n, qs.count())
def test_layermap_unique_multigeometry_fk(self):
"Testing the `unique`, and `transform`, geometry collection conversion, and ForeignKey mappings."
# All the following should work.
try:
# Telling LayerMapping that we want no transformations performed on the data.
lm = LayerMapping(County, co_shp, co_mapping, transform=False)
# Specifying the source spatial reference system via the `source_srs` keyword.
lm = LayerMapping(County, co_shp, co_mapping, source_srs=4269)
lm = LayerMapping(County, co_shp, co_mapping, source_srs='NAD83')
# Unique may take tuple or string parameters.
for arg in ('name', ('name', 'mpoly')):
lm = LayerMapping(County, co_shp, co_mapping, transform=False, unique=arg)
except:
self.fail('No exception should be raised for proper use of keywords.')
# Testing invalid params for the `unique` keyword.
for e, arg in ((TypeError, 5.0), (ValueError, 'foobar'), (ValueError, ('name', 'mpolygon'))):
self.assertRaises(e, LayerMapping, County, co_shp, co_mapping, transform=False, unique=arg)
# No source reference system defined in the shapefile, should raise an error.
if not mysql:
self.assertRaises(LayerMapError, LayerMapping, County, co_shp, co_mapping)
# Passing in invalid ForeignKey mapping parameters -- must be a dictionary
# mapping for the model the ForeignKey points to.
bad_fk_map1 = copy(co_mapping); bad_fk_map1['state'] = 'name'
bad_fk_map2 = copy(co_mapping); bad_fk_map2['state'] = {'nombre' : 'State'}
self.assertRaises(TypeError, LayerMapping, County, co_shp, bad_fk_map1, transform=False)
self.assertRaises(LayerMapError, LayerMapping, County, co_shp, bad_fk_map2, transform=False)
# There exist no State models for the ForeignKey mapping to work -- should raise
# a MissingForeignKey exception (this error would be ignored if the `strict`
# keyword is not set).
lm = LayerMapping(County, co_shp, co_mapping, transform=False, unique='name')
self.assertRaises(MissingForeignKey, lm.save, silent=True, strict=True)
# Now creating the state models so the ForeignKey mapping may work.
State.objects.bulk_create([
State(name='Colorado'), State(name='Hawaii'), State(name='Texas')
])
# If a mapping is specified as a collection, all OGR fields that
# are not collections will be converted into them. For example,
# a Point column would be converted to MultiPoint. Other things being done
# w/the keyword args:
# `transform=False`: Specifies that no transform is to be done; this
# has the effect of ignoring the spatial reference check (because the
# county shapefile does not have implicit spatial reference info).
#
# `unique='name'`: Creates models on the condition that they have
# unique county names; geometries from each feature however will be
# appended to the geometry collection of the unique model. Thus,
# all of the various islands in Honolulu county will be in in one
# database record with a MULTIPOLYGON type.
lm = LayerMapping(County, co_shp, co_mapping, transform=False, unique='name')
lm.save(silent=True, strict=True)
# A reference that doesn't use the unique keyword; a new database record will
# created for each polygon.
lm = LayerMapping(CountyFeat, co_shp, cofeat_mapping, transform=False)
lm.save(silent=True, strict=True)
# The county helper is called to ensure integrity of County models.
self.county_helper()
def test_test_fid_range_step(self):
"Tests the `fid_range` keyword and the `step` keyword of .save()."
# Function for clearing out all the counties before testing.
def clear_counties(): County.objects.all().delete()
State.objects.bulk_create([
State(name='Colorado'), State(name='Hawaii'), State(name='Texas')
])
# Initializing the LayerMapping object to use in these tests.
lm = LayerMapping(County, co_shp, co_mapping, transform=False, unique='name')
# Bad feature id ranges should raise a type error.
bad_ranges = (5.0, 'foo', co_shp)
for bad in bad_ranges:
self.assertRaises(TypeError, lm.save, fid_range=bad)
# Step keyword should not be allowed w/`fid_range`.
fr = (3, 5) # layer[3:5]
self.assertRaises(LayerMapError, lm.save, fid_range=fr, step=10)
lm.save(fid_range=fr)
# Features IDs 3 & 4 are for Galveston County, Texas -- only
# one model is returned because the `unique` keyword was set.
qs = County.objects.all()
self.assertEqual(1, qs.count())
self.assertEqual('Galveston', qs[0].name)
# Features IDs 5 and beyond for Honolulu County, Hawaii, and
# FID 0 is for Pueblo County, Colorado.
clear_counties()
lm.save(fid_range=slice(5, None), silent=True, strict=True) # layer[5:]
lm.save(fid_range=slice(None, 1), silent=True, strict=True) # layer[:1]
# Only Pueblo & Honolulu counties should be present because of
# the `unique` keyword. Have to set `order_by` on this QuerySet
# or else MySQL will return a different ordering than the other dbs.
qs = County.objects.order_by('name')
self.assertEqual(2, qs.count())
hi, co = tuple(qs)
hi_idx, co_idx = tuple(map(NAMES.index, ('Honolulu', 'Pueblo')))
self.assertEqual('Pueblo', co.name)
self.assertEqual(NUMS[co_idx], len(co.mpoly))
self.assertEqual('Honolulu', hi.name)
self.assertEqual(NUMS[hi_idx], len(hi.mpoly))
# Testing the `step` keyword -- should get the same counties
# regardless of we use a step that divides equally, that is odd,
# or that is larger than the dataset.
for st in (4,7,1000):
clear_counties()
lm.save(step=st, strict=True)
self.county_helper(county_feat=False)
def test_model_inheritance(self):
"Tests LayerMapping on inherited models. See #12093."
icity_mapping = {'name' : 'Name',
'population' : 'Population',
'density' : 'Density',
'point' : 'POINT',
'dt' : 'Created',
}
# Parent model has geometry field.
lm1 = LayerMapping(ICity1, city_shp, icity_mapping)
lm1.save()
# Grandparent has geometry field.
lm2 = LayerMapping(ICity2, city_shp, icity_mapping)
lm2.save()
self.assertEqual(6, ICity1.objects.count())
self.assertEqual(3, ICity2.objects.count())
def test_invalid_layer(self):
"Tests LayerMapping on invalid geometries. See #15378."
invalid_mapping = {'point': 'POINT'}
lm = LayerMapping(Invalid, invalid_shp, invalid_mapping,
source_srs=4326)
lm.save(silent=True)
def test_textfield(self):
"Tests that String content fits also in a TextField"
mapping = copy(city_mapping)
mapping['name_txt'] = 'Name'
lm = LayerMapping(City, city_shp, mapping)
lm.save(silent=True, strict=True)
self.assertEqual(City.objects.count(), 3)
self.assertEqual(City.objects.all().order_by('name_txt')[0].name_txt, "Houston")
def test_encoded_name(self):
""" Test a layer containing utf-8-encoded name """
city_shp = os.path.join(shp_path, 'ch-city', 'ch-city.shp')
lm = LayerMapping(City, city_shp, city_mapping)
lm.save(silent=True, strict=True)
self.assertEqual(City.objects.count(), 1)
self.assertEqual(City.objects.all()[0].name, "Zürich")
class OtherRouter(object):
def db_for_read(self, model, **hints):
return 'other'
def db_for_write(self, model, **hints):
return self.db_for_read(model, **hints)
def allow_relation(self, obj1, obj2, **hints):
return None
def allow_syncdb(self, db, model):
return True
class LayerMapRouterTest(TestCase):
def setUp(self):
self.old_routers = router.routers
router.routers = [OtherRouter()]
def tearDown(self):
router.routers = self.old_routers
@unittest.skipUnless(len(settings.DATABASES) > 1, 'multiple databases required')
def test_layermapping_default_db(self):
lm = LayerMapping(City, city_shp, city_mapping)
self.assertEqual(lm.using, 'other')
| apache-2.0 |
metadirective/GPicSync | src/kmlGen.py | 1 | 10726 |
###############################################################################
#
# (c) francois.schnell francois.schnell@gmail.com
# http://francois.schnell.free.fr
#
# This script is released under the GPL v2 license
#
###############################################################################
from geoexif import *
from gpx import *
import time
from _thread import start_new_thread
class KML(object):
"""
A quick and dirty kml generator in progress for gpicsync
(for live viewing in Google Earth)
"""
def __init__(self,fileName,name,url="",timeStampOrder=False,utc="0",eleMode=0,iconsStyle=0,gmaps=False):
self.f=open(fileName+".kml","w")
self.url=url
self.timeStampOrder=timeStampOrder
self.eleMode=eleMode # Elevation mode
self.iconsStyle=iconsStyle
gmaps=gmaps
#self.utcOffest=utc
if float(utc)>=0: sign="+"
if float(utc)<0: sign="-"
self.utcOffset=sign+str(abs(int(float(utc))))+":00"
#print ">>> self.utcOffest in kml for time stamps: ", self.utcOffset
kmlHead_p1="""<?xml version="1.0" encoding="UTF-8"?>
<kml xmlns="http://earth.google.com/kml/2.1">
<Document>
<name>"""+name+"""</name>"""
if (self.iconsStyle==0) and (gmaps==False):
kmlHead_p2="""
<Style id="hoverIcon0">
<IconStyle>
<scale>5.0</scale>
</IconStyle>
</Style>
<Style id="defaultIcon0">
<LabelStyle>
<scale>0</scale>
</LabelStyle>
<IconStyle>
<scale>1.0</scale>
</IconStyle>
</Style>
<StyleMap id="defaultStyle1">
<Pair>
<key>normal</key>
<styleUrl>#defaultIcon0</styleUrl>
</Pair>
<Pair>
<key>highlight</key>
<styleUrl>#hoverIcon0</styleUrl>
</Pair>
</StyleMap>"""
else: kmlHead_p2=""
kmlHead_p3="""
<Style id="lineStyle">
<PolyStyle>
<color>3feeee17</color>
</PolyStyle>
<LineStyle>
<color>99eeee17</color>
<width>6</width>
</LineStyle>
</Style>
<Style id="camera">
<scale>1.1</scale>
<IconStyle>
<color>ffffffff</color>
<Icon>
<href>http://maps.google.com/mapfiles/kml/pal4/icon38.png</href>
<x>192</x>
<y>96</y>
<w>32</w>
<h>32</h>
</Icon>
<hotSpot x="20" y="2" xunits="pixels" yunits="pixels"/>
</IconStyle>
</Style>
"""
#<href>root://icons/palette-4.png</href>
self.f.write(kmlHead_p1+kmlHead_p2+kmlHead_p3)
def writeInKml(self,text):
"""
Print the given string in the kml file
"""
self.f.write(text)
def footerPlacemark(self,picName,type="GE"):
"""
Returning a footer to the description of a placemark
"""
pmDescriptionFooter=""
mediaFile=picName.split(".")[0]
for ext in [".mp3",".wma",".ogg",".wav"]:
if os.path.exists(mediaFile+ext):
print ("Found mediaFile= ",mediaFile+ext)
if type=="GE":
pmDescriptionFooter="<br><br><a href='"+\
mediaFile+ext+"'>Play Audio</a>"
elif type=="GM":
pmDescriptionFooter="<br><br><a href='"+\
self.url+os.path.basename(picName.split(".")[0])+ext+"'>Play Audio</a>"
for ext in [".wmv",".mov",".avi"]:
if os.path.exists(mediaFile+ext):
print ("Found mediaFile= ",mediaFile+ext)
if type=="GE":
pmDescriptionFooter="<br><br><a href='"+\
mediaFile+ext+"'>Play Video</a>"
elif type=="GM":
pmDescriptionFooter="<br><br><a href='"+\
self.url+os.path.basename(picName.split(".")[0])+ext+"'>Play Video</a>"
if os.path.exists(mediaFile+".txt"):
print ("Found .txt file to add= ",mediaFile+".txt")
fileHandle = open (mediaFile+".txt")
pmDescriptionFooter=pmDescriptionFooter+"<br><br>"+fileHandle.read()
fileHandle.close()
return pmDescriptionFooter
def placemark(self,picName="",lat="",long="",ele="",width="800",height="600",
timeStamp="",elevation=""):
"""
Creates a placemark tag for the given picture in the kml file.
If only a picture path is given in argument, latitude and longitude will
be searched in the picture EXIF.
It's also possible to give the values in argument
(a string representing decimal degress, - sign ok)
"""
if elevation=="" or elevation=="None": elevation="0"
if self.timeStampOrder==True:
timeStamp1="<TimeStamp><when>"+timeStamp+self.utcOffset+"</when> </TimeStamp>\n"
timeStamp2="<TimeSpan><begin>"+timeStamp+self.utcOffset+"</begin></TimeSpan>\n"
timeStamp=timeStamp1
else:
timeStamp=""
print ("timeStamp=",timeStamp)
w=float(width)
h=float(height)
if width>height:
print ("width > height")
width=(600./w)*w
height=(600./w)*h
if height>width:
print ("height > width")
height=(400./h)*h
width=(400./h)*w
width=str(int(width))
height=str(int(height))
if lat and long == "":
mypicture=GeoExif(picName)
lat=mypicture.readLatitude()
long=mypicture.readLongitude()
pmHead="\n\n<Placemark>\n<name>"+\
os.path.basename(picName)+"</name>\n"
if self.eleMode==1 or self.eleMode==2:
eleAdd="\n<altitudeMode>absolute</altitudeMode>"
else: eleAdd=""
if self.iconsStyle==1:
iconLook="<styleUrl>#camera</styleUrl>"
elif self.iconsStyle==0:
iconLook="<styleUrl>#defaultStyle1</styleUrl><Style><IconStyle><Icon><href>thumbs/thumb_"+\
os.path.basename(picName)+"</href></Icon></IconStyle></Style>"\
#Adding a footer to the description
pmDescriptionFooter=self.footerPlacemark(picName,type="GE")
pictureName=os.path.basename(picName)
if sys.platform == 'win32':
pictureName=os.path.basename(picName).lower()
pmDescription="<description><![CDATA["+\
"<img src='"+self.url+pictureName+"' width='"+width+"' height='"+height+"'/>"+\
pmDescriptionFooter+\
"]]>\n</description>\n"+iconLook+\
"\n<Point>"+eleAdd+\
"\n<coordinates>"+str(long)+","+str(lat)+","+elevation+\
"</coordinates>\n</Point>\n"+timeStamp
pmTail="</Placemark>"
self.f.write(pmHead)
self.f.write(pmDescription)
self.f.write(pmTail)
def placemark4Gmaps(self,picName="",lat="",long="",width="400",height="300",elevation=""):
"""
The same as placemark but with special values and features for G maps.
Creates a placemark tag for the given picture in the kml file.
If only a picture path is given in argument, latitude and longitude will
be searched in the picture EXIF.
It's also possible to give the values in argument
(a string representing decimal degress, - sign ok)
"""
w=float(width)
h=float(height)
if width>height:
print ("width > height")
width=(200./w)*w
height=(200./w)*h
if height>width:
print ("height > width")
height=(200./h)*h
width=(200./h)*w
width=str(int(width))
height=str(int(height))
if lat and long == "":
mypicture=GeoExif(picName)
lat=mypicture.readLatitude()
long=mypicture.readLongitude()
pmHead="\n\n<Placemark>\n<name>"+\
os.path.basename(picName)+"</name>\n"
iconLook="<styleUrl>#camera</styleUrl>"
#Adding a footer to the description
pmDescriptionFooter=self.footerPlacemark(picName,type="GM")
pmDescription="<description><![CDATA["+\
"<a href='"+self.url+os.path.basename(picName)+"' target='_blank'> <img src='"+\
self.url+"thumbs/thumb_"+os.path.basename(picName)+"'/></a>"+\
pmDescriptionFooter+\
"]]>"+\
"</description>\n"+iconLook+\
"\n<Point>"+\
"\n<coordinates>"+str(long)+","+str(lat)+","+elevation+\
"</coordinates>\n</Point>\n"
pmTail="</Placemark>"
self.f.write(pmHead)
self.f.write(pmDescription)
self.f.write(pmTail)
def path(self,gpxFile,cut=500):
""" Creates the path of the GPX file in the kml"""
self.f.write("\n<Folder>\n<name>Track</name>")
i=1 # an iterator for the gpx file
part=cut # cut the gpx file in part (to be sure it displays in GM)
j=1 #Path j (a number for each section)
if self.eleMode==1:
pathAdd="\n<altitudeMode>absolute</altitudeMode>"
elif self.eleMode==2:
pathAdd="\n<altitudeMode>absolute</altitudeMode>\n<extrude>1</extrude>"
else: pathAdd=""
def makeHeadPath(j):
headPath="\n<Placemark>\n<name>Path "+str(j)+"</name>\n"\
+"<styleUrl>#lineStyle</styleUrl>\n<LineString>\n<tessellate>1</tessellate>"\
+pathAdd+"\n<coordinates>\n"
return headPath
endPath="\n</coordinates>\n</LineString>\n</Placemark>\n\n"
bodyPath=""
myGpx=Gpx(gpxFile)
track=myGpx.extract()
for rec in track:
if rec['ele']=="None" or rec['ele']=="": rec['ele']="0"
if i<part:
bodyPath=bodyPath+rec['lon']+','+rec['lat']+','+rec['ele']+" "
i=i+1
if i==part:
self.f.write(makeHeadPath(j))
self.f.write(bodyPath)
self.f.write(endPath)
i=1
j=j+1
bodyPath=""
self.f.write(makeHeadPath(j))
self.f.write(bodyPath)
self.f.write(endPath)
self.f.write("</Folder>\n")
def close(self):
"""Ending of the kml file"""
print ("close kml!")
kmlTail="\n</Document>\n</kml>"
self.f.write(kmlTail)
self.f.close()
if __name__=="__main__":
import os,sys,fnmatch
folder="C:/Documents and Settings/franz/Bureau/gpicsync.googlecode.com/trunk/GE-test"
myKml=KML(folder+"/test") # deprecated see __init__
for fileName in os.listdir ( folder ):
if fnmatch.fnmatch (fileName, '*.JPG') or fnmatch.fnmatch (fileName, '*.jpg'):
myKml.placemark(folder+"/"+fileName)
myKml.close()
| gpl-2.0 |
audreyr/opencomparison | package/repos/sourceforge.py | 4 | 2752 | import re
from urllib import urlopen
try:
import simplejson as json
except ImportError:
import json
from .base_handler import BaseHandler
API_TARGET = "https://sourceforge.net/api"
class SourceforgeError(Exception):
"""An error occurred when making a request to the Sourceforge API"""
class SourceforgeHandler(BaseHandler):
"""
The Sourceforge API has some tricky stuff in it - some sections are fed
via xml/rss, some are via json. As of 03/16/2011, the xml API is the most
up-to-date, but a bug has been opened to fix the json side. This API is
on hold until it is fixed.
"""
title = "Sourceforge"
url_regex = "https://sourceforge.net/"
url = "https://sourceforge.net"
repo_regex = r'https://sourceforge.com/[\w\-\_]+/([\w\-\_]+)/{0,1}'
slug_regex = r'https://sourceforge.com/[\w\-\_]+/([\w\-\_]+)/{0,1}'
def fetch_metadata(self, package):
sourceforge = '';
repo_name = package.repo_name()
target = API_TARGET + "/projects/name/" + repo_name
if not target.endswith("/"):
target += "/"
# sourceforge project API requires ending with /doap/
target += "json/"
# open the target and read the content
response = urlopen(target)
response_text = response.read()
# dejson the results
try:
data = json.loads(response_text)
except jason.decoder.JSONDecodeError:
raise SourceforgeError("unexpected response from sourceforge.net %d: %r" % (
response.status, response_text))
# sourceforge has both developers and maintainers in a list
participants = data.get("developers").append(data.get("maintainers"))
package.participants = [p['name'] for p in participants]
package.repo_description = data.get("description")
project_name = _name_from_pypi_home_page(package.pypi_home_page)
# dejsonify the results
try:
sf_package_data = _get_project_data(project_name)
except json.decoder.JSONDecodeError:
message = "%s had a JSONDecodeError while loading %s" % (package.title,
package_json_path)
warn(message)
return package
package.repo_watchers = len(sf_package_data.get('maintainers', [])) + len(sf_package_data.get('developers', []))
package.repo_description = sf_package_data.get('description', '')
# TODO - remove the line below and use repo_url as your foundation
package.repo_url = _get_repo_url(sf_package_data)
package.repo_forks = None
return package
repo_handler = SourceforgeHandler()
| mit |
HyechurnJang/archon | application/aci/service/device.py | 2 | 10347 | # -*- coding: utf-8 -*-
################################################################################
# _____ _ _____ _ #
# / ____(_) / ____| | | #
# | | _ ___ ___ ___ | (___ _ _ ___| |_ ___ _ __ ___ ___ #
# | | | / __|/ __/ _ \ \___ \| | | / __| __/ _ \ '_ ` _ \/ __| #
# | |____| \__ \ (_| (_) | ____) | |_| \__ \ || __/ | | | | \__ \ #
# \_____|_|___/\___\___/ |_____/ \__, |___/\__\___|_| |_| |_|___/ #
# __/ | #
# |___/ #
# _ __ _____ _ _____ ______ #
# | |/ / / ____| | |/ ____| ____| #
# | ' / ___ _ __ ___ __ _ | (___ ___ | | (___ | |__ #
# | < / _ \| '__/ _ \/ _` | \___ \ / _ \| |\___ \| __| #
# | . \ (_) | | | __/ (_| | ____) | (_) | |____) | |____ #
# |_|\_\___/|_| \___|\__,_| |_____/ \___/|_|_____/|______| #
# #
################################################################################
# #
# Copyright (c) 2016 Cisco Systems #
# All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT #
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the #
# License for the specific language governing permissions and limitations #
# under the License. #
# #
################################################################################
from archon import *
from common import *
def device_all(R, M, V):
#===========================================================================
# Get Data
#===========================================================================
if len(R.Path) > 3: node_data = M.Node.list(role=R.Path[3], detail=True, sort='id')
else: node_data = M.Node.list(detail=True, sort='id')
tsys_data = M.System.list(detail=True)
frm_data = M.Class('firmwareARunning').list(detail=True)
#===========================================================================
# Logic
#===========================================================================
for domain_name in sorted(M.keys()):
table = TABLE.BASIC(V('ID'), V('Type'), V('Name'), V('Model'), V('Serial'), V('Version'), V('Management IP'), V('State'), V('Fabric State'), V('Uptime'))
cnt_ctrl = 0
cnt_spne = 0
cnt_leaf = 0
for node in node_data[domain_name]:
id = node['id']
for tsys in tsys_data[domain_name]:
if node['dn'] + '/' in tsys['dn']:
mgmt = '<p>' + tsys['oobMgmtAddr'] + ' <small>[' + tsys['inbMgmtAddr'] + ']</small></p>'
state = tsys['state']
uptime = tsys['systemUpTime'][:-4]
break
else:
mgmt = ' '
state = ' '
uptime = ' '
for frm in frm_data[domain_name]:
if node['dn'] + '/' in frm['dn']: version = frm['version']; break
else: version = ' '
if node['role'] == 'leaf':
cnt_leaf += 1
role = 'Leaf'
elif node['role'] == 'spine':
cnt_spne += 1
role = 'Spine'
elif node['role'] == 'controller':
cnt_ctrl += 1
role = 'Controller'
table.Record(id,
role,
GET('/aci/show/device/%s/%s' % (domain_name, node['dn'])).html(node['name']),
node['model'],
node['serial'],
version,
mgmt,
state,
node['fabricSt'],
uptime)
#===========================================================================
# View
#===========================================================================
V.Page.html(HEAD(1).html('%s %s' % (domain_name, V('Domain'))))
if len(R.Path) < 4:
V.Page.html(
ROW().html(
COL(4).html(
COUNTER(V('Controller'), 'map-signs', cnt_ctrl, CLASS='panel-dgrey').click('/aci/show/device/controller')
),
COL(4).html(
COUNTER(V('Spine'), 'tree', cnt_spne, CLASS='panel-dgrey').click('/aci/show/device/spine')
),
COL(4).html(
COUNTER(V('Leaf'), 'leaf', cnt_leaf, CLASS='panel-dgrey').click('/aci/show/device/leaf')
)
)
)
V.Page.html(table)
V.Menu.html(BUTTON(CLASS='btn-primary').click('/'.join(R.Path)).html(V('Refresh')))
def device_one(R, M, V):
#===========================================================================
# Get Data
#===========================================================================
domain_name = R.Path[3]
dn = '/'.join(R.Path[4:])
node_data = M[domain_name](dn, detail=True)
#===========================================================================
# Logic
#===========================================================================
nav = NAV()
health = None
active_intf = None
# Detail
kv = KEYVAL()
for key in node_data.keys(): kv.Data(key, node_data[key])
nav.Tab(V('Details'), kv)
# Topology
topo = TOPO()
set_topo(topo, dn, color='red', dot=True)
nav.Tab(V('Topology'), DIV(STYLE='text-align:center;padding-top:10px;').html(topo))
if hasattr(node_data, 'System'):
if node_data['role'] != 'controller':
data = M.getHealth()
try: health = CHART.LINE(*data['_tstamp'], **CHART.THEME_HEALTH).Data(dn, *data[domain_name + '/' + dn])
except: pass
kv = KEYVAL()
for key in node_data.System.keys(): kv.Data(key, node_data.System[key])
nav.Tab(V('System'), kv)
physif = node_data.System.PhysIf.list(detail=True, sort='id')
if physif:
phys_health = node_data.System.PhysIf.health()
active_intf = ROW(STYLE='margin-bottom:20px;')
sort_phys_health = {}
key = node_data.System.PhysIf.keys()
table = TABLE.FLIP(*['+' + k if k != 'id' else V('ID') for k in key])
for pi in physif:
table.Record(*[pi[k] for k in key])
sort_phys_health[pi['id']] = None
nav.Tab(V('Physical Interface'), table)
for ph in phys_health: sort_phys_health[ph['name']] = ph['score']
for ph_name in sorted(sort_phys_health, key=lambda name: int(name.split('/')[1])):
ph_val = sort_phys_health[ph_name]
if ph_val != None:
if ph_val > 50:
active_intf.html(
COL(2, STYLE='padding:0px 5px 0px 5px').html(
DIV(STYLE='float:left;').html(FIGURE.DONUT(ph_val, 100 - ph_val, height=20, **FIGURE.THEME_HEALTH)),
DIV(STYLE='padding-left:22px;').html(ph_name)
)
)
else:
active_intf.html(
COL(2, STYLE='padding:0px 5px 0px 5px').html(
DIV(STYLE='float:left;').html(FIGURE.DONUT(100 - ph_val, ph_val, height=20, **FIGURE.THEME_UTIL)),
DIV(STYLE='padding-left:22px;').html(ph_name)
)
)
else:
active_intf.html(
COL(2, STYLE='padding:0px 5px 0px 5px').html(
DIV(STYLE='float:left;').html(FIGURE.DONUT(0, 100, height=20, **FIGURE.THEME_HEALTH)),
DIV(STYLE='padding-left:22px;').html(ph_name)
)
)
#===========================================================================
# View
#===========================================================================
V.Page.html(HEAD(1).html(node_data['name']))
if health != None: V.Page.html(ROW().html(health))
V.Page.html(
HEAD(3).html(V('Model')),
HEAD(4).html(node_data['vendor'] + ' ' + node_data['model']),
HEAD(4).html(node_data['serial'])
)
if active_intf != None:
V.Page.html(
HEAD(3).html(V('Active Interfaces')),
active_intf
)
V.Page.html(nav)
V.Menu.html(BUTTON(CLASS='btn-primary').click('/'.join(R.Path)).html(V('Refresh')))
| apache-2.0 |
j-rivero/ign-transport-git | tools/cpplint.py | 1 | 215811 | #!/usr/bin/python
#
# Copyright (c) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Does google-lint on c++ files.
The goal of this script is to identify places in the code that *may*
be in non-compliance with google style. It does not attempt to fix
up these problems -- the point is to educate. It does also not
attempt to find all problems, or to ensure that everything it does
find is legitimately a problem.
In particular, we can get very confused by /* and // inside strings!
We do a small hack, which is to ignore //'s with "'s after them on the
same line, but it is far from perfect (in either direction).
"""
import codecs
import copy
import getopt
import math # for log
import os
import re
import sre_compile
import string
import sys
import unicodedata
_USAGE = """
Syntax: cpplint.py [--verbose=#] [--output=vs7] [--filter=-x,+y,...]
[--counting=total|toplevel|detailed] [--root=subdir]
[--linelength=digits]
<file> [file] ...
The style guidelines this tries to follow are those in
http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml
Every problem is given a confidence score from 1-5, with 5 meaning we are
certain of the problem, and 1 meaning it could be a legitimate construct.
This will miss some errors, and is not a substitute for a code review.
To suppress false-positive errors of a certain category, add a
'NOLINT(category)' comment to the line. NOLINT or NOLINT(*)
suppresses errors of all categories on that line.
The files passed in will be linted; at least one file must be provided.
Default linted extensions are .cc, .cpp, .cu, .cuh and .h. Change the
extensions with the --extensions flag.
Flags:
output=vs7
By default, the output is formatted to ease emacs parsing. Visual Studio
compatible output (vs7) may also be used. Other formats are unsupported.
verbose=#
Specify a number 0-5 to restrict errors to certain verbosity levels.
filter=-x,+y,...
Specify a comma-separated list of category-filters to apply: only
error messages whose category names pass the filters will be printed.
(Category names are printed with the message and look like
"[whitespace/indent]".) Filters are evaluated left to right.
"-FOO" and "FOO" means "do not print categories that start with FOO".
"+FOO" means "do print categories that start with FOO".
Examples: --filter=-whitespace,+whitespace/braces
--filter=whitespace,runtime/printf,+runtime/printf_format
--filter=-,+build/include_what_you_use
To see a list of all the categories used in cpplint, pass no arg:
--filter=
counting=total|toplevel|detailed
The total number of errors found is always printed. If
'toplevel' is provided, then the count of errors in each of
the top-level categories like 'build' and 'whitespace' will
also be printed. If 'detailed' is provided, then a count
is provided for each category like 'build/class'.
root=subdir
The root directory used for deriving header guard CPP variable.
By default, the header guard CPP variable is calculated as the relative
path to the directory that contains .git, .hg, or .svn. When this flag
is specified, the relative path is calculated from the specified
directory. If the specified directory does not exist, this flag is
ignored.
Examples:
Assuming that src/.git exists, the header guard CPP variables for
src/chrome/browser/ui/browser.h are:
No flag => CHROME_BROWSER_UI_BROWSER_H_
--root=chrome => BROWSER_UI_BROWSER_H_
--root=chrome/browser => UI_BROWSER_H_
linelength=digits
This is the allowed line length for the project. The default value is
80 characters.
Examples:
--linelength=120
extensions=extension,extension,...
The allowed file extensions that cpplint will check
Examples:
--extensions=hpp,cpp
"""
# We categorize each error message we print. Here are the categories.
# We want an explicit list so we can list them all in cpplint --filter=.
# If you add a new error message with a new category, add it to the list
# here! cpplint_unittest.py should tell you if you forget to do this.
_ERROR_CATEGORIES = [
'build/class',
'build/c++11',
'build/deprecated',
'build/endif_comment',
'build/explicit_make_pair',
'build/forward_decl',
'build/header_guard',
'build/include',
'build/include_alpha',
'build/include_order',
'build/include_what_you_use',
'build/namespaces',
'build/printf_format',
'build/storage_class',
'legal/copyright',
'readability/alt_tokens',
'readability/braces',
'readability/casting',
'readability/check',
'readability/constructors',
'readability/fn_size',
'readability/function',
'readability/multiline_comment',
'readability/multiline_string',
'readability/namespace',
'readability/nolint',
'readability/nul',
'readability/streams',
'readability/todo',
'readability/utf8',
'runtime/arrays',
'runtime/casting',
'runtime/explicit',
'runtime/int',
'runtime/init',
'runtime/invalid_increment',
'runtime/member_string_references',
'runtime/memset',
'runtime/operator',
'runtime/printf',
'runtime/printf_format',
'runtime/references',
'runtime/string',
'runtime/threadsafe_fn',
'runtime/vlog',
'whitespace/blank_line',
'whitespace/braces',
'whitespace/comma',
'whitespace/comments',
'whitespace/empty_conditional_body',
'whitespace/empty_loop_body',
'whitespace/end_of_line',
'whitespace/ending_newline',
'whitespace/forcolon',
'whitespace/indent',
'whitespace/line_length',
'whitespace/newline',
'whitespace/operators',
'whitespace/parens',
'whitespace/semicolon',
'whitespace/tab',
'whitespace/todo'
]
# The default state of the category filter. This is overridden by the --filter=
# flag. By default all errors are on, so only add here categories that should be
# off by default (i.e., categories that must be enabled by the --filter= flags).
# All entries here should start with a '-' or '+', as in the --filter= flag.
_DEFAULT_FILTERS = ['-build/include_alpha']
# We used to check for high-bit characters, but after much discussion we
# decided those were OK, as long as they were in UTF-8 and didn't represent
# hard-coded international strings, which belong in a separate i18n file.
# C++ headers
_CPP_HEADERS = frozenset([
# Legacy
'algobase.h',
'algo.h',
'alloc.h',
'builtinbuf.h',
'bvector.h',
'complex.h',
'defalloc.h',
'deque.h',
'editbuf.h',
'fstream.h',
'function.h',
'hash_map',
'hash_map.h',
'hash_set',
'hash_set.h',
'hashtable.h',
'heap.h',
'indstream.h',
'iomanip.h',
'iostream.h',
'istream.h',
'iterator.h',
'list.h',
'map.h',
'multimap.h',
'multiset.h',
'ostream.h',
'pair.h',
'parsestream.h',
'pfstream.h',
'procbuf.h',
'pthread_alloc',
'pthread_alloc.h',
'rope',
'rope.h',
'ropeimpl.h',
'set.h',
'slist',
'slist.h',
'stack.h',
'stdiostream.h',
'stl_alloc.h',
'stl_relops.h',
'streambuf.h',
'stream.h',
'strfile.h',
'strstream.h',
'tempbuf.h',
'tree.h',
'type_traits.h',
'vector.h',
# 17.6.1.2 C++ library headers
'algorithm',
'array',
'atomic',
'bitset',
'chrono',
'codecvt',
'complex',
'condition_variable',
'deque',
'exception',
'forward_list',
'fstream',
'functional',
'future',
'initializer_list',
'iomanip',
'ios',
'iosfwd',
'iostream',
'istream',
'iterator',
'limits',
'list',
'locale',
'map',
'memory',
'mutex',
'new',
'numeric',
'ostream',
'queue',
'random',
'ratio',
'regex',
'set',
'sstream',
'stack',
'stdexcept',
'streambuf',
'string',
'strstream',
'system_error',
'thread',
'tuple',
'typeindex',
'typeinfo',
'type_traits',
'unordered_map',
'unordered_set',
'utility',
'valarray',
'vector',
# 17.6.1.2 C++ headers for C library facilities
'cassert',
'ccomplex',
'cctype',
'cerrno',
'cfenv',
'cfloat',
'cinttypes',
'ciso646',
'climits',
'clocale',
'cmath',
'csetjmp',
'csignal',
'cstdalign',
'cstdarg',
'cstdbool',
'cstddef',
'cstdint',
'cstdio',
'cstdlib',
'cstring',
'ctgmath',
'ctime',
'cuchar',
'cwchar',
'cwctype',
])
# Assertion macros. These are defined in base/logging.h and
# testing/base/gunit.h. Note that the _M versions need to come first
# for substring matching to work.
_CHECK_MACROS = [
'DCHECK', 'CHECK',
'EXPECT_TRUE_M', 'EXPECT_TRUE',
'ASSERT_TRUE_M', 'ASSERT_TRUE',
'EXPECT_FALSE_M', 'EXPECT_FALSE',
'ASSERT_FALSE_M', 'ASSERT_FALSE',
]
# Replacement macros for CHECK/DCHECK/EXPECT_TRUE/EXPECT_FALSE
_CHECK_REPLACEMENT = dict([(m, {}) for m in _CHECK_MACROS])
for op, replacement in [('==', 'EQ'), ('!=', 'NE'),
('>=', 'GE'), ('>', 'GT'),
('<=', 'LE'), ('<', 'LT')]:
_CHECK_REPLACEMENT['DCHECK'][op] = 'DCHECK_%s' % replacement
_CHECK_REPLACEMENT['CHECK'][op] = 'CHECK_%s' % replacement
_CHECK_REPLACEMENT['EXPECT_TRUE'][op] = 'EXPECT_%s' % replacement
_CHECK_REPLACEMENT['ASSERT_TRUE'][op] = 'ASSERT_%s' % replacement
_CHECK_REPLACEMENT['EXPECT_TRUE_M'][op] = 'EXPECT_%s_M' % replacement
_CHECK_REPLACEMENT['ASSERT_TRUE_M'][op] = 'ASSERT_%s_M' % replacement
for op, inv_replacement in [('==', 'NE'), ('!=', 'EQ'),
('>=', 'LT'), ('>', 'LE'),
('<=', 'GT'), ('<', 'GE')]:
_CHECK_REPLACEMENT['EXPECT_FALSE'][op] = 'EXPECT_%s' % inv_replacement
_CHECK_REPLACEMENT['ASSERT_FALSE'][op] = 'ASSERT_%s' % inv_replacement
_CHECK_REPLACEMENT['EXPECT_FALSE_M'][op] = 'EXPECT_%s_M' % inv_replacement
_CHECK_REPLACEMENT['ASSERT_FALSE_M'][op] = 'ASSERT_%s_M' % inv_replacement
# Alternative tokens and their replacements. For full list, see section 2.5
# Alternative tokens [lex.digraph] in the C++ standard.
#
# Digraphs (such as '%:') are not included here since it's a mess to
# match those on a word boundary.
_ALT_TOKEN_REPLACEMENT = {
'and': '&&',
'bitor': '|',
'or': '||',
'xor': '^',
'compl': '~',
'bitand': '&',
'and_eq': '&=',
'or_eq': '|=',
'xor_eq': '^=',
'not': '!',
'not_eq': '!='
}
# Compile regular expression that matches all the above keywords. The "[ =()]"
# bit is meant to avoid matching these keywords outside of boolean expressions.
#
# False positives include C-style multi-line comments and multi-line strings
# but those have always been troublesome for cpplint.
_ALT_TOKEN_REPLACEMENT_PATTERN = re.compile(
r'[ =()](' + ('|'.join(_ALT_TOKEN_REPLACEMENT.keys())) + r')(?=[ (]|$)')
# These constants define types of headers for use with
# _IncludeState.CheckNextIncludeOrder().
_C_SYS_HEADER = 1
_CPP_SYS_HEADER = 2
_LIKELY_MY_HEADER = 3
_POSSIBLE_MY_HEADER = 4
_OTHER_HEADER = 5
# These constants define the current inline assembly state
_NO_ASM = 0 # Outside of inline assembly block
_INSIDE_ASM = 1 # Inside inline assembly block
_END_ASM = 2 # Last line of inline assembly block
_BLOCK_ASM = 3 # The whole block is an inline assembly block
# Match start of assembly blocks
_MATCH_ASM = re.compile(r'^\s*(?:asm|_asm|__asm|__asm__)'
r'(?:\s+(volatile|__volatile__))?'
r'\s*[{(]')
_regexp_compile_cache = {}
# Finds occurrences of NOLINT or NOLINT(...).
_RE_SUPPRESSION = re.compile(r'\bNOLINT\b(\([^)]*\))?')
# {str, set(int)}: a map from error categories to sets of linenumbers
# on which those errors are expected and should be suppressed.
_error_suppressions = {}
# The root directory used for deriving header guard CPP variable.
# This is set by --root flag.
_root = None
# The allowed line length of files.
# This is set by --linelength flag.
_line_length = 80
# The allowed extensions for file names
# This is set by --extensions flag.
_valid_extensions = set(['cc', 'hh', 'h', 'cpp', 'cu', 'cuh'])
def ParseNolintSuppressions(filename, raw_line, linenum, error):
"""Updates the global list of error-suppressions.
Parses any NOLINT comments on the current line, updating the global
error_suppressions store. Reports an error if the NOLINT comment
was malformed.
Args:
filename: str, the name of the input file.
raw_line: str, the line of input text, with comments.
linenum: int, the number of the current line.
error: function, an error handler.
"""
# FIXME(adonovan): "NOLINT(" is misparsed as NOLINT(*).
matched = _RE_SUPPRESSION.search(raw_line)
if matched:
category = matched.group(1)
if category in (None, '(*)'): # => "suppress all"
_error_suppressions.setdefault(None, set()).add(linenum)
else:
if category.startswith('(') and category.endswith(')'):
category = category[1:-1]
if category in _ERROR_CATEGORIES:
_error_suppressions.setdefault(category, set()).add(linenum)
else:
error(filename, linenum, 'readability/nolint', 5,
'Unknown NOLINT error category: %s' % category)
def ResetNolintSuppressions():
"Resets the set of NOLINT suppressions to empty."
_error_suppressions.clear()
def IsErrorSuppressedByNolint(category, linenum):
"""Returns true if the specified error category is suppressed on this line.
Consults the global error_suppressions map populated by
ParseNolintSuppressions/ResetNolintSuppressions.
Args:
category: str, the category of the error.
linenum: int, the current line number.
Returns:
bool, True iff the error should be suppressed due to a NOLINT comment.
"""
return (linenum in _error_suppressions.get(category, set()) or
linenum in _error_suppressions.get(None, set()))
def Match(pattern, s):
"""Matches the string with the pattern, caching the compiled regexp."""
# The regexp compilation caching is inlined in both Match and Search for
# performance reasons; factoring it out into a separate function turns out
# to be noticeably expensive.
if pattern not in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].match(s)
def ReplaceAll(pattern, rep, s):
"""Replaces instances of pattern in a string with a replacement.
The compiled regex is kept in a cache shared by Match and Search.
Args:
pattern: regex pattern
rep: replacement text
s: search string
Returns:
string with replacements made (or original string if no replacements)
"""
if pattern not in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].sub(rep, s)
def Search(pattern, s):
"""Searches the string for the pattern, caching the compiled regexp."""
if pattern not in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].search(s)
class _IncludeState(dict):
"""Tracks line numbers for includes, and the order in which includes appear.
As a dict, an _IncludeState object serves as a mapping between include
filename and line number on which that file was included.
Call CheckNextIncludeOrder() once for each header in the file, passing
in the type constants defined above. Calls in an illegal order will
raise an _IncludeError with an appropriate error message.
"""
# self._section will move monotonically through this set. If it ever
# needs to move backwards, CheckNextIncludeOrder will raise an error.
_INITIAL_SECTION = 0
_MY_H_SECTION = 1
_C_SECTION = 2
_CPP_SECTION = 3
_OTHER_H_SECTION = 4
_TYPE_NAMES = {
_C_SYS_HEADER: 'C system header',
_CPP_SYS_HEADER: 'C++ system header',
_LIKELY_MY_HEADER: 'header this file implements',
_POSSIBLE_MY_HEADER: 'header this file may implement',
_OTHER_HEADER: 'other header',
}
_SECTION_NAMES = {
_INITIAL_SECTION: "... nothing. (This can't be an error.)",
_MY_H_SECTION: 'a header this file implements',
_C_SECTION: 'C system header',
_CPP_SECTION: 'C++ system header',
_OTHER_H_SECTION: 'other header',
}
def __init__(self):
dict.__init__(self)
self.ResetSection()
def ResetSection(self):
# The name of the current section.
self._section = self._INITIAL_SECTION
# The path of last found header.
self._last_header = ''
def SetLastHeader(self, header_path):
self._last_header = header_path
def CanonicalizeAlphabeticalOrder(self, header_path):
"""Returns a path canonicalized for alphabetical comparison.
- replaces "-" with "_" so they both cmp the same.
- removes '-inl' since we don't require them to be after the main header.
- lowercase everything, just in case.
Args:
header_path: Path to be canonicalized.
Returns:
Canonicalized path.
"""
return header_path.replace('-inl.h', '.h').replace('-', '_').lower()
def IsInAlphabeticalOrder(self, clean_lines, linenum, header_path):
"""Check if a header is in alphabetical order with the previous header.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
header_path: Canonicalized header to be checked.
Returns:
Returns true if the header is in alphabetical order.
"""
# If previous section is different from current section, _last_header will
# be reset to empty string, so it's always less than current header.
#
# If previous line was a blank line, assume that the headers are
# intentionally sorted the way they are.
if (self._last_header > header_path and
not Match(r'^\s*$', clean_lines.elided[linenum - 1])):
return False
return True
def CheckNextIncludeOrder(self, header_type):
"""Returns a non-empty error message if the next header is out of order.
This function also updates the internal state to be ready to check
the next include.
Args:
header_type: One of the _XXX_HEADER constants defined above.
Returns:
The empty string if the header is in the right order, or an
error message describing what's wrong.
"""
error_message = ('Found %s after %s' %
(self._TYPE_NAMES[header_type],
self._SECTION_NAMES[self._section]))
last_section = self._section
if header_type == _C_SYS_HEADER:
if self._section <= self._C_SECTION:
self._section = self._C_SECTION
else:
self._last_header = ''
return error_message
elif header_type == _CPP_SYS_HEADER:
if self._section <= self._CPP_SECTION:
self._section = self._CPP_SECTION
else:
self._last_header = ''
return error_message
elif header_type == _LIKELY_MY_HEADER:
if self._section <= self._MY_H_SECTION:
self._section = self._MY_H_SECTION
else:
self._section = self._OTHER_H_SECTION
elif header_type == _POSSIBLE_MY_HEADER:
if self._section <= self._MY_H_SECTION:
self._section = self._MY_H_SECTION
else:
# This will always be the fallback because we're not sure
# enough that the header is associated with this file.
self._section = self._OTHER_H_SECTION
else:
assert header_type == _OTHER_HEADER
self._section = self._OTHER_H_SECTION
if last_section != self._section:
self._last_header = ''
return ''
class _CppLintState(object):
"""Maintains module-wide state.."""
def __init__(self):
self.verbose_level = 1 # global setting.
self.error_count = 0 # global count of reported errors
# filters to apply when emitting error messages
self.filters = _DEFAULT_FILTERS[:]
self.counting = 'total' # In what way are we counting errors?
self.errors_by_category = {} # string to int dict storing error counts
# output format:
# "emacs" - format that emacs can parse (default)
# "vs7" - format that Microsoft Visual Studio 7 can parse
self.output_format = 'emacs'
def SetOutputFormat(self, output_format):
"""Sets the output format for errors."""
self.output_format = output_format
def SetVerboseLevel(self, level):
"""Sets the module's verbosity, and returns the previous setting."""
last_verbose_level = self.verbose_level
self.verbose_level = level
return last_verbose_level
def SetCountingStyle(self, counting_style):
"""Sets the module's counting options."""
self.counting = counting_style
def SetFilters(self, filters):
"""Sets the error-message filters.
These filters are applied when deciding whether to emit a given
error message.
Args:
filters: A string of comma-separated filters (eg "+whitespace/indent").
Each filter should start with + or -; else we die.
Raises:
ValueError: The comma-separated filters did not all start with '+' or '-'.
E.g. "-,+whitespace,-whitespace/indent,whitespace/badfilter"
"""
# Default filters always have less priority than the flag ones.
self.filters = _DEFAULT_FILTERS[:]
for filt in filters.split(','):
clean_filt = filt.strip()
if clean_filt:
self.filters.append(clean_filt)
for filt in self.filters:
if not (filt.startswith('+') or filt.startswith('-')):
raise ValueError('Every filter in --filters must start with + or -'
' (%s does not)' % filt)
def ResetErrorCounts(self):
"""Sets the module's error statistic back to zero."""
self.error_count = 0
self.errors_by_category = {}
def IncrementErrorCount(self, category):
"""Bumps the module's error statistic."""
self.error_count += 1
if self.counting in ('toplevel', 'detailed'):
if self.counting != 'detailed':
category = category.split('/')[0]
if category not in self.errors_by_category:
self.errors_by_category[category] = 0
self.errors_by_category[category] += 1
def PrintErrorCounts(self):
"""Print a summary of errors by category, and the total."""
for category, count in self.errors_by_category.iteritems():
sys.stderr.write('Category \'%s\' errors found: %d\n' %
(category, count))
sys.stderr.write('Total errors found: %d\n' % self.error_count)
_cpplint_state = _CppLintState()
def _OutputFormat():
"""Gets the module's output format."""
return _cpplint_state.output_format
def _SetOutputFormat(output_format):
"""Sets the module's output format."""
_cpplint_state.SetOutputFormat(output_format)
def _VerboseLevel():
"""Returns the module's verbosity setting."""
return _cpplint_state.verbose_level
def _SetVerboseLevel(level):
"""Sets the module's verbosity, and returns the previous setting."""
return _cpplint_state.SetVerboseLevel(level)
def _SetCountingStyle(level):
"""Sets the module's counting options."""
_cpplint_state.SetCountingStyle(level)
def _Filters():
"""Returns the module's list of output filters, as a list."""
return _cpplint_state.filters
def _SetFilters(filters):
"""Sets the module's error-message filters.
These filters are applied when deciding whether to emit a given
error message.
Args:
filters: A string of comma-separated filters (eg "whitespace/indent").
Each filter should start with + or -; else we die.
"""
_cpplint_state.SetFilters(filters)
class _FunctionState(object):
"""Tracks current function name and the number of lines in its body."""
_NORMAL_TRIGGER = 250 # for --v=0, 500 for --v=1, etc.
_TEST_TRIGGER = 400 # about 50% more than _NORMAL_TRIGGER.
def __init__(self):
self.in_a_function = False
self.lines_in_function = 0
self.current_function = ''
def Begin(self, function_name):
"""Start analyzing function body.
Args:
function_name: The name of the function being tracked.
"""
self.in_a_function = True
self.lines_in_function = 0
self.current_function = function_name
def Count(self):
"""Count line in current function body."""
if self.in_a_function:
self.lines_in_function += 1
def Check(self, error, filename, linenum):
"""Report if too many lines in function body.
Args:
error: The function to call with any errors found.
filename: The name of the current file.
linenum: The number of the line to check.
"""
if Match(r'T(EST|est)', self.current_function):
base_trigger = self._TEST_TRIGGER
else:
base_trigger = self._NORMAL_TRIGGER
trigger = base_trigger * 2**_VerboseLevel()
if self.lines_in_function > trigger:
error_level = int(math.log(self.lines_in_function / base_trigger, 2))
# 50 => 0, 100 => 1, 200 => 2, 400 => 3, 800 => 4, 1600 => 5, ...
if error_level > 5:
error_level = 5
error(filename, linenum, 'readability/fn_size', error_level,
'Small and focused functions are preferred:'
' %s has %d non-comment lines'
' (error triggered by exceeding %d lines).' % (
self.current_function, self.lines_in_function, trigger))
def End(self):
"""Stop analyzing function body."""
self.in_a_function = False
class _IncludeError(Exception):
"""Indicates a problem with the include order in a file."""
pass
class FileInfo:
"""Provides utility functions for filenames.
FileInfo provides easy access to the components of a file's path
relative to the project root.
"""
def __init__(self, filename):
self._filename = filename
def FullName(self):
"""Make Windows paths like Unix."""
return os.path.abspath(self._filename).replace('\\', '/')
def RepositoryName(self):
"""FullName after removing the local path to the repository.
If we have a real absolute path name here we can try to do something smart:
detecting the root of the checkout and truncating /path/to/checkout from
the name so that we get header guards that don't include things like
"C:\Documents and Settings\..." or "/home/username/..." in them and thus
people on different computers who have checked the source out to different
locations won't see bogus errors.
"""
fullname = self.FullName()
if os.path.exists(fullname):
project_dir = os.path.dirname(fullname)
if os.path.exists(os.path.join(project_dir, ".svn")):
# If there's a .svn file in the current directory, we recursively look
# up the directory tree for the top of the SVN checkout
root_dir = project_dir
one_up_dir = os.path.dirname(root_dir)
while os.path.exists(os.path.join(one_up_dir, ".svn")):
root_dir = os.path.dirname(root_dir)
one_up_dir = os.path.dirname(one_up_dir)
prefix = os.path.commonprefix([root_dir, project_dir])
return fullname[len(prefix) + 1:]
# Not SVN <= 1.6? Try to find a git, hg, or svn top level directory by
# searching up from the current path.
root_dir = os.path.dirname(fullname)
while (root_dir != os.path.dirname(root_dir) and
not os.path.exists(os.path.join(root_dir, ".git")) and
not os.path.exists(os.path.join(root_dir, ".hg")) and
not os.path.exists(os.path.join(root_dir, ".svn"))):
root_dir = os.path.dirname(root_dir)
if (os.path.exists(os.path.join(root_dir, ".git")) or
os.path.exists(os.path.join(root_dir, ".hg")) or
os.path.exists(os.path.join(root_dir, ".svn"))):
prefix = os.path.commonprefix([root_dir, project_dir])
return fullname[len(prefix) + 1:]
# Don't know what to do; header guard warnings may be wrong...
return fullname
def Split(self):
"""Splits the file into the directory, basename, and extension.
For 'chrome/browser/browser.cc', Split() would
return ('chrome/browser', 'browser', '.cc')
Returns:
A tuple of (directory, basename, extension).
"""
googlename = self.RepositoryName()
project, rest = os.path.split(googlename)
return (project,) + os.path.splitext(rest)
def BaseName(self):
"""File base name - text after the final slash, before the final period."""
return self.Split()[1]
def Extension(self):
"""File extension - text following the final period."""
return self.Split()[2]
def NoExtension(self):
"""File has no source file extension."""
return '/'.join(self.Split()[0:2])
def IsSource(self):
"""File has a source file extension."""
return self.Extension()[1:] in ('c', 'cc', 'cpp', 'cxx')
def _ShouldPrintError(category, confidence, linenum):
"""If confidence >= verbose, category passes filter and is not suppressed."""
# There are three ways we might decide not to print an error message:
# a "NOLINT(category)" comment appears in the source,
# the verbosity level isn't high enough, or the filters filter it out.
if IsErrorSuppressedByNolint(category, linenum):
return False
if confidence < _cpplint_state.verbose_level:
return False
is_filtered = False
for one_filter in _Filters():
if one_filter.startswith('-'):
if category.startswith(one_filter[1:]):
is_filtered = True
elif one_filter.startswith('+'):
if category.startswith(one_filter[1:]):
is_filtered = False
else:
assert False # should have been checked for in SetFilter.
if is_filtered:
return False
return True
def Error(filename, linenum, category, confidence, message):
"""Logs the fact we've found a lint error.
We log where the error was found, and also our confidence in the error,
that is, how certain we are this is a legitimate style regression, and
not a misidentification or a use that's sometimes justified.
False positives can be suppressed by the use of
"cpplint(category)" comments on the offending line. These are
parsed into _error_suppressions.
Args:
filename: The name of the file containing the error.
linenum: The number of the line containing the error.
category: A string used to describe the "category" this bug
falls under: "whitespace", say, or "runtime". Categories
may have a hierarchy separated by slashes: "whitespace/indent".
confidence: A number from 1-5 representing a confidence score for
the error, with 5 meaning that we are certain of the problem,
and 1 meaning that it could be a legitimate construct.
message: The error message.
"""
if _ShouldPrintError(category, confidence, linenum):
_cpplint_state.IncrementErrorCount(category)
if _cpplint_state.output_format == 'vs7':
sys.stderr.write('%s(%s): %s [%s] [%d]\n' % (
filename, linenum, message, category, confidence))
elif _cpplint_state.output_format == 'eclipse':
sys.stderr.write('%s:%s: warning: %s [%s] [%d]\n' % (
filename, linenum, message, category, confidence))
else:
sys.stderr.write('%s:%s: %s [%s] [%d]\n' % (
filename, linenum, message, category, confidence))
# Matches standard C++ escape sequences per 2.13.2.3 of the C++ standard.
_RE_PATTERN_CLEANSE_LINE_ESCAPES = re.compile(
r'\\([abfnrtv?"\\\']|\d+|x[0-9a-fA-F]+)')
# Match a single C style comment on the same line.
_RE_PATTERN_C_COMMENTS = r'/\*(?:[^*]|\*(?!/))*\*/'
# Matches multi-line C style comments.
# This RE is a little bit more complicated than one might expect, because we
# have to take care of space removals tools so we can handle comments inside
# statements better.
# The current rule is: We only clear spaces from both sides when we're at the
# end of the line. Otherwise, we try to remove spaces from the right side,
# if this doesn't work we try on left side but only if there's a non-character
# on the right.
_RE_PATTERN_CLEANSE_LINE_C_COMMENTS = re.compile(
r'(\s*' + _RE_PATTERN_C_COMMENTS + r'\s*$|' +
_RE_PATTERN_C_COMMENTS + r'\s+|' +
r'\s+' + _RE_PATTERN_C_COMMENTS + r'(?=\W)|' +
_RE_PATTERN_C_COMMENTS + r')')
def IsCppString(line):
"""Does line terminate so, that the next symbol is in string constant.
This function does not consider single-line nor multi-line comments.
Args:
line: is a partial line of code starting from the 0..n.
Returns:
True, if next character appended to 'line' is inside a
string constant.
"""
line = line.replace(r'\\', 'XX') # after this, \\" does not match to \"
return ((line.count('"') - line.count(r'\"') - line.count("'\"'")) & 1) == 1
def CleanseRawStrings(raw_lines):
"""Removes C++11 raw strings from lines.
Before:
static const char kData[] = R"(
multi-line string
)";
After:
static const char kData[] = ""
(replaced by blank line)
"";
Args:
raw_lines: list of raw lines.
Returns:
list of lines with C++11 raw strings replaced by empty strings.
"""
delimiter = None
lines_without_raw_strings = []
for line in raw_lines:
if delimiter:
# Inside a raw string, look for the end
end = line.find(delimiter)
if end >= 0:
# Found the end of the string, match leading space for this
# line and resume copying the original lines, and also insert
# a "" on the last line.
leading_space = Match(r'^(\s*)\S', line)
line = leading_space.group(1) + '""' + line[end + len(delimiter):]
delimiter = None
else:
# Haven't found the end yet, append a blank line.
line = '""'
# Look for beginning of a raw string, and replace them with
# empty strings. This is done in a loop to handle multiple raw
# strings on the same line.
while delimiter is None:
# Look for beginning of a raw string.
# See 2.14.15 [lex.string] for syntax.
matched = Match(r'^(.*)\b(?:R|u8R|uR|UR|LR)"([^\s\\()]*)\((.*)$', line)
if matched:
delimiter = ')' + matched.group(2) + '"'
end = matched.group(3).find(delimiter)
if end >= 0:
# Raw string ended on same line
line = (matched.group(1) + '""' +
matched.group(3)[end + len(delimiter):])
delimiter = None
else:
# Start of a multi-line raw string
line = matched.group(1) + '""'
else:
break
lines_without_raw_strings.append(line)
# TODO(unknown): if delimiter is not None here, we might want to
# emit a warning for unterminated string.
return lines_without_raw_strings
def FindNextMultiLineCommentStart(lines, lineix):
"""Find the beginning marker for a multiline comment."""
while lineix < len(lines):
if lines[lineix].strip().startswith('/*'):
# Only return this marker if the comment goes beyond this line
if lines[lineix].strip().find('*/', 2) < 0:
return lineix
lineix += 1
return len(lines)
def FindNextMultiLineCommentEnd(lines, lineix):
"""We are inside a comment, find the end marker."""
while lineix < len(lines):
if lines[lineix].strip().endswith('*/'):
return lineix
lineix += 1
return len(lines)
def RemoveMultiLineCommentsFromRange(lines, begin, end):
"""Clears a range of lines for multi-line comments."""
# Having // dummy comments makes the lines non-empty, so we will not get
# unnecessary blank line warnings later in the code.
for i in range(begin, end):
lines[i] = '// dummy'
def RemoveMultiLineComments(filename, lines, error):
"""Removes multiline (c-style) comments from lines."""
lineix = 0
while lineix < len(lines):
lineix_begin = FindNextMultiLineCommentStart(lines, lineix)
if lineix_begin >= len(lines):
return
lineix_end = FindNextMultiLineCommentEnd(lines, lineix_begin)
if lineix_end >= len(lines):
error(filename, lineix_begin + 1, 'readability/multiline_comment', 5,
'Could not find end of multi-line comment')
return
RemoveMultiLineCommentsFromRange(lines, lineix_begin, lineix_end + 1)
lineix = lineix_end + 1
def CleanseComments(line):
"""Removes //-comments and single-line C-style /* */ comments.
Args:
line: A line of C++ source.
Returns:
The line with single-line comments removed.
"""
commentpos = line.find('//')
if commentpos != -1 and not IsCppString(line[:commentpos]):
line = line[:commentpos].rstrip()
# get rid of /* ... */
return _RE_PATTERN_CLEANSE_LINE_C_COMMENTS.sub('', line)
class CleansedLines(object):
"""Holds 3 copies of all lines with different preprocessing applied to them.
1) elided member contains lines without strings and comments,
2) lines member contains lines without comments, and
3) raw_lines member contains all the lines without processing.
All these three members are of <type 'list'>, and of the same length.
"""
def __init__(self, lines):
self.elided = []
self.lines = []
self.raw_lines = lines
self.num_lines = len(lines)
self.lines_without_raw_strings = CleanseRawStrings(lines)
for linenum in range(len(self.lines_without_raw_strings)):
self.lines.append(CleanseComments(
self.lines_without_raw_strings[linenum]))
elided = self._CollapseStrings(self.lines_without_raw_strings[linenum])
self.elided.append(CleanseComments(elided))
def NumLines(self):
"""Returns the number of lines represented."""
return self.num_lines
@staticmethod
def _CollapseStrings(elided):
"""Collapses strings and chars on a line to simple "" or '' blocks.
We nix strings first so we're not fooled by text like '"http://"'
Args:
elided: The line being processed.
Returns:
The line with collapsed strings.
"""
if _RE_PATTERN_INCLUDE.match(elided):
return elided
# Remove escaped characters first to make quote/single quote collapsing
# basic. Things that look like escaped characters shouldn't occur
# outside of strings and chars.
elided = _RE_PATTERN_CLEANSE_LINE_ESCAPES.sub('', elided)
# Replace quoted strings and digit separators. Both single quotes
# and double quotes are processed in the same loop, otherwise
# nested quotes wouldn't work.
collapsed = ''
while True:
# Find the first quote character
match = Match(r'^([^\'"]*)([\'"])(.*)$', elided)
if not match:
collapsed += elided
break
head, quote, tail = match.groups()
if quote == '"':
# Collapse double quoted strings
second_quote = tail.find('"')
if second_quote >= 0:
collapsed += head + '""'
elided = tail[second_quote + 1:]
else:
# Unmatched double quote, don't bother processing the rest
# of the line since this is probably a multiline string.
collapsed += elided
break
else:
# Found single quote, check nearby text to eliminate digit separators.
#
# There is no special handling for floating point here, because
# the integer/fractional/exponent parts would all be parsed
# correctly as long as there are digits on both sides of the
# separator. So we are fine as long as we don't see something
# like "0.'3" (gcc 4.9.0 will not allow this literal).
if Search(r'\b(?:0[bBxX]?|[1-9])[0-9a-fA-F]*$', head):
match_literal = Match(r'^((?:\'?[0-9a-zA-Z_])*)(.*)$', "'" + tail)
collapsed += head + match_literal.group(1).replace("'", '')
elided = match_literal.group(2)
else:
second_quote = tail.find('\'')
if second_quote >= 0:
collapsed += head + "''"
elided = tail[second_quote + 1:]
else:
# Unmatched single quote
collapsed += elided
break
return collapsed
def FindEndOfExpressionInLine(line, startpos, stack):
"""Find the position just after the end of current parenthesized expression.
Args:
line: a CleansedLines line.
startpos: start searching at this position.
stack: nesting stack at startpos.
Returns:
On finding matching end: (index just after matching end, None)
On finding an unclosed expression: (-1, None)
Otherwise: (-1, new stack at end of this line)
"""
for i in xrange(startpos, len(line)):
char = line[i]
if char in '([{':
# Found start of parenthesized expression, push to expression stack
stack.append(char)
elif char == '<':
# Found potential start of template argument list
if i > 0 and line[i - 1] == '<':
# Left shift operator
if stack and stack[-1] == '<':
stack.pop()
if not stack:
return (-1, None)
elif i > 0 and Search(r'\boperator\s*$', line[0:i]):
# operator<, don't add to stack
continue
else:
# Tentative start of template argument list
stack.append('<')
elif char in ')]}':
# Found end of parenthesized expression.
#
# If we are currently expecting a matching '>', the pending '<'
# must have been an operator. Remove them from expression stack.
while stack and stack[-1] == '<':
stack.pop()
if not stack:
return (-1, None)
if ((stack[-1] == '(' and char == ')') or
(stack[-1] == '[' and char == ']') or
(stack[-1] == '{' and char == '}')):
stack.pop()
if not stack:
return (i + 1, None)
else:
# Mismatched parentheses
return (-1, None)
elif char == '>':
# Found potential end of template argument list.
# Ignore "->" and operator functions
if (i > 0 and
(line[i - 1] == '-' or Search(r'\boperator\s*$', line[0:i - 1]))):
continue
# Pop the stack if there is a matching '<'. Otherwise, ignore
# this '>' since it must be an operator.
if stack:
if stack[-1] == '<':
stack.pop()
if not stack:
return (i + 1, None)
elif char == ';':
# Found something that look like end of statements. If we are currently
# expecting a '>', the matching '<' must have been an operator, since
# template argument list should not contain statements.
while stack and stack[-1] == '<':
stack.pop()
if not stack:
return (-1, None)
# Did not find end of expression or unbalanced parentheses on this line
return (-1, stack)
def CloseExpression(clean_lines, linenum, pos):
"""If input points to ( or { or [ or <, finds the position that closes it.
If lines[linenum][pos] points to a '(' or '{' or '[' or '<', finds the
linenum/pos that correspond to the closing of the expression.
TODO(unknown): cpplint spends a fair bit of time matching parentheses.
Ideally we would want to index all opening and closing parentheses once
and have CloseExpression be just a simple lookup, but due to preprocessor
tricks, this is not so easy.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
pos: A position on the line.
Returns:
A tuple (line, linenum, pos) pointer *past* the closing brace, or
(line, len(lines), -1) if we never find a close. Note we ignore
strings and comments when matching; and the line we return is the
'cleansed' line at linenum.
"""
line = clean_lines.elided[linenum]
if (line[pos] not in '({[<') or Match(r'<[<=]', line[pos:]):
return (line, clean_lines.NumLines(), -1)
# Check first line
(end_pos, stack) = FindEndOfExpressionInLine(line, pos, [])
if end_pos > -1:
return (line, linenum, end_pos)
# Continue scanning forward
while stack and linenum < clean_lines.NumLines() - 1:
linenum += 1
line = clean_lines.elided[linenum]
(end_pos, stack) = FindEndOfExpressionInLine(line, 0, stack)
if end_pos > -1:
return (line, linenum, end_pos)
# Did not find end of expression before end of file, give up
return (line, clean_lines.NumLines(), -1)
def FindStartOfExpressionInLine(line, endpos, stack):
"""Find position at the matching start of current expression.
This is almost the reverse of FindEndOfExpressionInLine, but note
that the input position and returned position differs by 1.
Args:
line: a CleansedLines line.
endpos: start searching at this position.
stack: nesting stack at endpos.
Returns:
On finding matching start: (index at matching start, None)
On finding an unclosed expression: (-1, None)
Otherwise: (-1, new stack at beginning of this line)
"""
i = endpos
while i >= 0:
char = line[i]
if char in ')]}':
# Found end of expression, push to expression stack
stack.append(char)
elif char == '>':
# Found potential end of template argument list.
#
# Ignore it if it's a "->" or ">=" or "operator>"
if (i > 0 and
(line[i - 1] == '-' or
Match(r'\s>=\s', line[i - 1:]) or
Search(r'\boperator\s*$', line[0:i]))):
i -= 1
else:
stack.append('>')
elif char == '<':
# Found potential start of template argument list
if i > 0 and line[i - 1] == '<':
# Left shift operator
i -= 1
else:
# If there is a matching '>', we can pop the expression stack.
# Otherwise, ignore this '<' since it must be an operator.
if stack and stack[-1] == '>':
stack.pop()
if not stack:
return (i, None)
elif char in '([{':
# Found start of expression.
#
# If there are any unmatched '>' on the stack, they must be
# operators. Remove those.
while stack and stack[-1] == '>':
stack.pop()
if not stack:
return (-1, None)
if ((char == '(' and stack[-1] == ')') or
(char == '[' and stack[-1] == ']') or
(char == '{' and stack[-1] == '}')):
stack.pop()
if not stack:
return (i, None)
else:
# Mismatched parentheses
return (-1, None)
elif char == ';':
# Found something that look like end of statements. If we are currently
# expecting a '<', the matching '>' must have been an operator, since
# template argument list should not contain statements.
while stack and stack[-1] == '>':
stack.pop()
if not stack:
return (-1, None)
i -= 1
return (-1, stack)
def ReverseCloseExpression(clean_lines, linenum, pos):
"""If input points to ) or } or ] or >, finds the position that opens it.
If lines[linenum][pos] points to a ')' or '}' or ']' or '>', finds the
linenum/pos that correspond to the opening of the expression.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
pos: A position on the line.
Returns:
A tuple (line, linenum, pos) pointer *at* the opening brace, or
(line, 0, -1) if we never find the matching opening brace. Note
we ignore strings and comments when matching; and the line we
return is the 'cleansed' line at linenum.
"""
line = clean_lines.elided[linenum]
if line[pos] not in ')}]>':
return (line, 0, -1)
# Check last line
(start_pos, stack) = FindStartOfExpressionInLine(line, pos, [])
if start_pos > -1:
return (line, linenum, start_pos)
# Continue scanning backward
while stack and linenum > 0:
linenum -= 1
line = clean_lines.elided[linenum]
(start_pos, stack) = FindStartOfExpressionInLine(line, len(line) - 1, stack)
if start_pos > -1:
return (line, linenum, start_pos)
# Did not find start of expression before beginning of file, give up
return (line, 0, -1)
def CheckForCopyright(filename, lines, error):
"""Logs an error if no Copyright message appears at the top of the file."""
# We'll say it should occur by line 10. Don't forget there's a
# dummy line at the front.
for line in xrange(1, min(len(lines), 11)):
if re.search(r'Copyright', lines[line], re.I): break
else: # means no copyright line was found
error(filename, 0, 'legal/copyright', 5,
'No copyright message found. '
'You should have a line: "Copyright [year] <Copyright Owner>"')
def GetIndentLevel(line):
"""Return the number of leading spaces in line.
Args:
line: A string to check.
Returns:
An integer count of leading spaces, possibly zero.
"""
indent = Match(r'^( *)\S', line)
if indent:
return len(indent.group(1))
else:
return 0
def GetHeaderGuardCPPVariable(filename):
"""Returns the CPP variable that should be used as a header guard.
Args:
filename: The name of a C++ header file.
Returns:
The CPP variable that should be used as a header guard in the
named file.
"""
# Restores original filename in case that cpplint is invoked from Emacs's
# flymake.
filename = re.sub(r'_flymake\.h$', '.h', filename)
filename = re.sub(r'/\.flymake/([^/]*)$', r'/\1', filename)
fileinfo = FileInfo(filename)
file_path_from_root = fileinfo.RepositoryName()
if _root:
file_path_from_root = re.sub('^' + _root + os.sep, '', file_path_from_root)
return re.sub(r'[-./\s]', '_', file_path_from_root).upper() + '_'
def CheckForHeaderGuard(filename, lines, error):
"""Checks that the file contains a header guard.
Logs an error if no #ifndef header guard is present. For other
headers, checks that the full pathname is used.
Args:
filename: The name of the C++ header file.
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found.
"""
cppvar = GetHeaderGuardCPPVariable(filename)
ifndef = None
ifndef_linenum = 0
define = None
endif = None
endif_linenum = 0
for linenum, line in enumerate(lines):
linesplit = line.split()
if len(linesplit) >= 2:
# find the first occurrence of #ifndef and #define, save arg
if not ifndef and linesplit[0] == '#ifndef':
# set ifndef to the header guard presented on the #ifndef line.
ifndef = linesplit[1]
ifndef_linenum = linenum
if not define and linesplit[0] == '#define':
define = linesplit[1]
# find the last occurrence of #endif, save entire line
if line.startswith('#endif'):
endif = line
endif_linenum = linenum
if not ifndef:
error(filename, 0, 'build/header_guard', 5,
'No #ifndef header guard found, suggested CPP variable is: %s' %
cppvar)
return
if not define:
error(filename, 0, 'build/header_guard', 5,
'No #define header guard found, suggested CPP variable is: %s' %
cppvar)
return
# The guard should be PATH_FILE_H_, but we also allow PATH_FILE_H__
# for backward compatibility.
if ifndef != cppvar:
error_level = 0
if ifndef != cppvar + '_':
error_level = 5
ParseNolintSuppressions(filename, lines[ifndef_linenum], ifndef_linenum,
error)
error(filename, ifndef_linenum, 'build/header_guard', error_level,
'#ifndef header guard has wrong style, please use: %s' % cppvar)
if define != ifndef:
error(filename, 0, 'build/header_guard', 5,
'#ifndef and #define don\'t match, suggested CPP variable is: %s' %
cppvar)
return
if endif != ('#endif // %s' % cppvar):
error_level = 0
if endif != ('#endif // %s' % (cppvar + '_')):
error_level = 5
ParseNolintSuppressions(filename, lines[endif_linenum], endif_linenum,
error)
error(filename, endif_linenum, 'build/header_guard', error_level,
'#endif line should be "#endif // %s"' % cppvar)
def CheckForBadCharacters(filename, lines, error):
"""Logs an error for each line containing bad characters.
Two kinds of bad characters:
1. Unicode replacement characters: These indicate that either the file
contained invalid UTF-8 (likely) or Unicode replacement characters (which
it shouldn't). Note that it's possible for this to throw off line
numbering if the invalid UTF-8 occurred adjacent to a newline.
2. NUL bytes. These are problematic for some tools.
Args:
filename: The name of the current file.
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found.
"""
for linenum, line in enumerate(lines):
if u'\ufffd' in line:
error(filename, linenum, 'readability/utf8', 5,
'Line contains invalid UTF-8 (or Unicode replacement character).')
if '\0' in line:
error(filename, linenum, 'readability/nul', 5, 'Line contains NUL byte.')
def CheckForNewlineAtEOF(filename, lines, error):
"""Logs an error if there is no newline char at the end of the file.
Args:
filename: The name of the current file.
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found.
"""
# The array lines() was created by adding two newlines to the
# original file (go figure), then splitting on \n.
# To verify that the file ends in \n, we just have to make sure the
# last-but-two element of lines() exists and is empty.
if len(lines) < 3 or lines[-2]:
error(filename, len(lines) - 2, 'whitespace/ending_newline', 5,
'Could not find a newline character at the end of the file.')
def CheckForMultilineCommentsAndStrings(filename, clean_lines, linenum, error):
"""Logs an error if we see /* ... */ or "..." that extend past one line.
/* ... */ comments are legit inside macros, for one line.
Otherwise, we prefer // comments, so it's ok to warn about the
other. Likewise, it's ok for strings to extend across multiple
lines, as long as a line continuation character (backslash)
terminates each line. Although not currently prohibited by the C++
style guide, it's ugly and unnecessary. We don't do well with either
in this lint program, so we warn about both.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Remove all \\ (escaped backslashes) from the line. They are OK, and the
# second (escaped) slash may trigger later \" detection erroneously.
line = line.replace('\\\\', '')
if line.count('/*') > line.count('*/'):
error(filename, linenum, 'readability/multiline_comment', 5,
'Complex multi-line /*...*/-style comment found. '
'Lint may give bogus warnings. '
'Consider replacing these with //-style comments, '
'with #if 0...#endif, '
'or with more clearly structured multi-line comments.')
if (line.count('"') - line.count('\\"')) % 2:
error(filename, linenum, 'readability/multiline_string', 5,
'Multi-line string ("...") found. This lint script doesn\'t '
'do well with such strings, and may give bogus warnings. '
'Use C++11 raw strings or concatenation instead.')
# (non-threadsafe name, thread-safe alternative, validation pattern)
#
# The validation pattern is used to eliminate false positives such as:
# _rand(); // false positive due to substring match.
# ->rand(); // some member function rand().
# ACMRandom rand(seed); // some variable named rand.
# ISAACRandom rand(); // another variable named rand.
#
# Basically we require the return value of these functions to be used
# in some expression context on the same line by matching on some
# operator before the function name. This eliminates constructors and
# member function calls.
_UNSAFE_FUNC_PREFIX = r'(?:[-+*/=%^&|(<]\s*|>\s+)'
_THREADING_LIST = (
('asctime(', 'asctime_r(', _UNSAFE_FUNC_PREFIX + r'asctime\([^)]+\)'),
('ctime(', 'ctime_r(', _UNSAFE_FUNC_PREFIX + r'ctime\([^)]+\)'),
('getgrgid(', 'getgrgid_r(', _UNSAFE_FUNC_PREFIX + r'getgrgid\([^)]+\)'),
('getgrnam(', 'getgrnam_r(', _UNSAFE_FUNC_PREFIX + r'getgrnam\([^)]+\)'),
('getlogin(', 'getlogin_r(', _UNSAFE_FUNC_PREFIX + r'getlogin\(\)'),
('getpwnam(', 'getpwnam_r(', _UNSAFE_FUNC_PREFIX + r'getpwnam\([^)]+\)'),
('getpwuid(', 'getpwuid_r(', _UNSAFE_FUNC_PREFIX + r'getpwuid\([^)]+\)'),
('gmtime(', 'gmtime_r(', _UNSAFE_FUNC_PREFIX + r'gmtime\([^)]+\)'),
('localtime(', 'localtime_r(', _UNSAFE_FUNC_PREFIX + r'localtime\([^)]+\)'),
('rand(', 'rand_r(', _UNSAFE_FUNC_PREFIX + r'rand\(\)'),
('strtok(', 'strtok_r(',
_UNSAFE_FUNC_PREFIX + r'strtok\([^)]+\)'),
('ttyname(', 'ttyname_r(', _UNSAFE_FUNC_PREFIX + r'ttyname\([^)]+\)'),
)
def CheckPosixThreading(filename, clean_lines, linenum, error):
"""Checks for calls to thread-unsafe functions.
Much code has been originally written without consideration of
multi-threading. Also, engineers are relying on their old experience;
they have learned posix before threading extensions were added. These
tests guide the engineers to use thread-safe functions (when using
posix directly).
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
for single_thread_func, multithread_safe_func, pattern in _THREADING_LIST:
# Additional pattern matching check to confirm that this is the
# function we are looking for
if Search(pattern, line):
error(filename, linenum, 'runtime/threadsafe_fn', 2,
'Consider using ' + multithread_safe_func +
'...) instead of ' + single_thread_func +
'...) for improved thread safety.')
def CheckVlogArguments(filename, clean_lines, linenum, error):
"""Checks that VLOG() is only used for defining a logging level.
For example, VLOG(2) is correct. VLOG(INFO), VLOG(WARNING), VLOG(ERROR), and
VLOG(FATAL) are not.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
if Search(r'\bVLOG\((INFO|ERROR|WARNING|DFATAL|FATAL)\)', line):
error(filename, linenum, 'runtime/vlog', 5,
'VLOG() should be used with numeric verbosity level. '
'Use LOG() if you want symbolic severity levels.')
# Matches invalid increment: *count++, which moves pointer instead of
# incrementing a value.
_RE_PATTERN_INVALID_INCREMENT = re.compile(
r'^\s*\*\w+(\+\+|--);')
def CheckInvalidIncrement(filename, clean_lines, linenum, error):
"""Checks for invalid increment *count++.
For example following function:
void increment_counter(int* count) {
*count++;
}
is invalid, because it effectively does count++, moving pointer, and should
be replaced with ++*count, (*count)++ or *count += 1.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
if _RE_PATTERN_INVALID_INCREMENT.match(line):
error(filename, linenum, 'runtime/invalid_increment', 5,
'Changing pointer instead of value (or unused value of operator*).')
class _BlockInfo(object):
"""Stores information about a generic block of code."""
def __init__(self, seen_open_brace):
self.seen_open_brace = seen_open_brace
self.open_parentheses = 0
self.inline_asm = _NO_ASM
def CheckBegin(self, filename, clean_lines, linenum, error):
"""Run checks that applies to text up to the opening brace.
This is mostly for checking the text after the class identifier
and the "{", usually where the base class is specified. For other
blocks, there isn't much to check, so we always pass.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
pass
def CheckEnd(self, filename, clean_lines, linenum, error):
"""Run checks that applies to text after the closing brace.
This is mostly used for checking end of namespace comments.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
pass
def IsBlockInfo(self):
"""Returns true if this block is a _BlockInfo.
This is convenient for verifying that an object is an instance of
a _BlockInfo, but not an instance of any of the derived classes.
Returns:
True for this class, False for derived classes.
"""
return self.__class__ == _BlockInfo
class _ExternCInfo(_BlockInfo):
"""Stores information about an 'extern "C"' block."""
def __init__(self):
_BlockInfo.__init__(self, True)
class _ClassInfo(_BlockInfo):
"""Stores information about a class."""
def __init__(self, name, class_or_struct, clean_lines, linenum):
_BlockInfo.__init__(self, False)
self.name = name
self.starting_linenum = linenum
self.is_derived = False
if class_or_struct == 'struct':
self.access = 'public'
self.is_struct = True
else:
self.access = 'private'
self.is_struct = False
# Remember initial indentation level for this class. Using raw_lines here
# instead of elided to account for leading comments.
self.class_indent = GetIndentLevel(clean_lines.raw_lines[linenum])
# Try to find the end of the class. This will be confused by things like:
# class A {
# } *x = { ...
#
# But it's still good enough for CheckSectionSpacing.
self.last_line = 0
depth = 0
for i in range(linenum, clean_lines.NumLines()):
line = clean_lines.elided[i]
depth += line.count('{') - line.count('}')
if not depth:
self.last_line = i
break
def CheckBegin(self, filename, clean_lines, linenum, error):
# Look for a bare ':'
if Search('(^|[^:]):($|[^:])', clean_lines.elided[linenum]):
self.is_derived = True
def CheckEnd(self, filename, clean_lines, linenum, error):
# Check that closing brace is aligned with beginning of the class.
# Only do this if the closing brace is indented by only whitespaces.
# This means we will not check single-line class definitions.
indent = Match(r'^( *)\}', clean_lines.elided[linenum])
if indent and len(indent.group(1)) != self.class_indent:
if self.is_struct:
parent = 'struct ' + self.name
else:
parent = 'class ' + self.name
error(filename, linenum, 'whitespace/indent', 3,
'Closing brace should be aligned with beginning of %s' % parent)
class _NamespaceInfo(_BlockInfo):
"""Stores information about a namespace."""
def __init__(self, name, linenum):
_BlockInfo.__init__(self, False)
self.name = name or ''
self.starting_linenum = linenum
def CheckEnd(self, filename, clean_lines, linenum, error):
"""Check end of namespace comments."""
line = clean_lines.raw_lines[linenum]
# Check how many lines is enclosed in this namespace. Don't issue
# warning for missing namespace comments if there aren't enough
# lines. However, do apply checks if there is already an end of
# namespace comment and it's incorrect.
#
# TODO(unknown): We always want to check end of namespace comments
# if a namespace is large, but sometimes we also want to apply the
# check if a short namespace contained nontrivial things (something
# other than forward declarations). There is currently no logic on
# deciding what these nontrivial things are, so this check is
# triggered by namespace size only, which works most of the time.
if (linenum - self.starting_linenum < 10
and not Match(r'};*\s*(//|/\*).*\bnamespace\b', line)):
return
# Look for matching comment at end of namespace.
#
# Note that we accept C style "/* */" comments for terminating
# namespaces, so that code that terminate namespaces inside
# preprocessor macros can be cpplint clean.
#
# We also accept stuff like "// end of namespace <name>." with the
# period at the end.
#
# Besides these, we don't accept anything else, otherwise we might
# get false negatives when existing comment is a substring of the
# expected namespace.
if self.name:
# Named namespace
if Match((r'};*\s*(//|/\*).*\bnamespace\s+' + re.escape(self.name) +
r'[\*/\.\\\s]*$'),
line):
error(filename, linenum, 'readability/namespace', 5,
'Namespace should not be terminated with "// namespace %s"' %
self.name)
else:
# Anonymous namespace
if not Match(r'};*\s*(//|/\*).*\bnamespace[\*/\.\\\s]*$', line):
# If "// namespace anonymous" or "// anonymous namespace (more text)",
# mention "// anonymous namespace" as an acceptable form
if Match(r'}.*\b(namespace anonymous|anonymous namespace)\b', line):
error(filename, linenum, 'readability/namespace', 5,
'Anonymous namespace should be terminated with "// namespace"'
' or "// anonymous namespace"')
else:
error(filename, linenum, 'readability/namespace', 5,
'Anonymous namespace should be terminated with "// namespace"')
class _PreprocessorInfo(object):
"""Stores checkpoints of nesting stacks when #if/#else is seen."""
def __init__(self, stack_before_if):
# The entire nesting stack before #if
self.stack_before_if = stack_before_if
# The entire nesting stack up to #else
self.stack_before_else = []
# Whether we have already seen #else or #elif
self.seen_else = False
class NestingState(object):
"""Holds states related to parsing braces."""
def __init__(self):
# Stack for tracking all braces. An object is pushed whenever we
# see a "{", and popped when we see a "}". Only 3 types of
# objects are possible:
# - _ClassInfo: a class or struct.
# - _NamespaceInfo: a namespace.
# - _BlockInfo: some other type of block.
self.stack = []
# Top of the previous stack before each Update().
#
# Because the nesting_stack is updated at the end of each line, we
# had to do some convoluted checks to find out what is the current
# scope at the beginning of the line. This check is simplified by
# saving the previous top of nesting stack.
#
# We could save the full stack, but we only need the top. Copying
# the full nesting stack would slow down cpplint by ~10%.
self.previous_stack_top = []
# Stack of _PreprocessorInfo objects.
self.pp_stack = []
def SeenOpenBrace(self):
"""Check if we have seen the opening brace for the innermost block.
Returns:
True if we have seen the opening brace, False if the innermost
block is still expecting an opening brace.
"""
return (not self.stack) or self.stack[-1].seen_open_brace
def InNamespaceBody(self):
"""Check if we are currently one level inside a namespace body.
Returns:
True if top of the stack is a namespace block, False otherwise.
"""
return self.stack and isinstance(self.stack[-1], _NamespaceInfo)
def InExternC(self):
"""Check if we are currently one level inside an 'extern "C"' block.
Returns:
True if top of the stack is an extern block, False otherwise.
"""
return self.stack and isinstance(self.stack[-1], _ExternCInfo)
def InClassDeclaration(self):
"""Check if we are currently one level inside a class or struct declaration.
Returns:
True if top of the stack is a class/struct, False otherwise.
"""
return self.stack and isinstance(self.stack[-1], _ClassInfo)
def InAsmBlock(self):
"""Check if we are currently one level inside an inline ASM block.
Returns:
True if the top of the stack is a block containing inline ASM.
"""
return self.stack and self.stack[-1].inline_asm != _NO_ASM
def InTemplateArgumentList(self, clean_lines, linenum, pos):
"""Check if current position is inside template argument list.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
pos: position just after the suspected template argument.
Returns:
True if (linenum, pos) is inside template arguments.
"""
while linenum < clean_lines.NumLines():
# Find the earliest character that might indicate a template argument
line = clean_lines.elided[linenum]
match = Match(r'^[^{};=\[\]\.<>]*(.)', line[pos:])
if not match:
linenum += 1
pos = 0
continue
token = match.group(1)
pos += len(match.group(0))
# These things do not look like template argument list:
# class Suspect {
# class Suspect x; }
if token in ('{', '}', ';'): return False
# These things look like template argument list:
# template <class Suspect>
# template <class Suspect = default_value>
# template <class Suspect[]>
# template <class Suspect...>
if token in ('>', '=', '[', ']', '.'): return True
# Check if token is an unmatched '<'.
# If not, move on to the next character.
if token != '<':
pos += 1
if pos >= len(line):
linenum += 1
pos = 0
continue
# We can't be sure if we just find a single '<', and need to
# find the matching '>'.
(_, end_line, end_pos) = CloseExpression(clean_lines, linenum, pos - 1)
if end_pos < 0:
# Not sure if template argument list or syntax error in file
return False
linenum = end_line
pos = end_pos
return False
def UpdatePreprocessor(self, line):
"""Update preprocessor stack.
We need to handle preprocessors due to classes like this:
#ifdef SWIG
struct ResultDetailsPageElementExtensionPoint {
#else
struct ResultDetailsPageElementExtensionPoint : public Extension {
#endif
We make the following assumptions (good enough for most files):
- Preprocessor condition evaluates to true from #if up to first
#else/#elif/#endif.
- Preprocessor condition evaluates to false from #else/#elif up
to #endif. We still perform lint checks on these lines, but
these do not affect nesting stack.
Args:
line: current line to check.
"""
if Match(r'^\s*#\s*(if|ifdef|ifndef)\b', line):
# Beginning of #if block, save the nesting stack here. The saved
# stack will allow us to restore the parsing state in the #else case.
self.pp_stack.append(_PreprocessorInfo(copy.deepcopy(self.stack)))
elif Match(r'^\s*#\s*(else|elif)\b', line):
# Beginning of #else block
if self.pp_stack:
if not self.pp_stack[-1].seen_else:
# This is the first #else or #elif block. Remember the
# whole nesting stack up to this point. This is what we
# keep after the #endif.
self.pp_stack[-1].seen_else = True
self.pp_stack[-1].stack_before_else = copy.deepcopy(self.stack)
# Restore the stack to how it was before the #if
self.stack = copy.deepcopy(self.pp_stack[-1].stack_before_if)
else:
# TODO(unknown): unexpected #else, issue warning?
pass
elif Match(r'^\s*#\s*endif\b', line):
# End of #if or #else blocks.
if self.pp_stack:
# If we saw an #else, we will need to restore the nesting
# stack to its former state before the #else, otherwise we
# will just continue from where we left off.
if self.pp_stack[-1].seen_else:
# Here we can just use a shallow copy since we are the last
# reference to it.
self.stack = self.pp_stack[-1].stack_before_else
# Drop the corresponding #if
self.pp_stack.pop()
else:
# TODO(unknown): unexpected #endif, issue warning?
pass
# TODO(unknown): Update() is too long, but we will refactor later.
def Update(self, filename, clean_lines, linenum, error):
"""Update nesting state with current line.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Remember top of the previous nesting stack.
#
# The stack is always pushed/popped and not modified in place, so
# we can just do a shallow copy instead of copy.deepcopy. Using
# deepcopy would slow down cpplint by ~28%.
if self.stack:
self.previous_stack_top = self.stack[-1]
else:
self.previous_stack_top = None
# Update pp_stack
self.UpdatePreprocessor(line)
# Count parentheses. This is to avoid adding struct arguments to
# the nesting stack.
if self.stack:
inner_block = self.stack[-1]
depth_change = line.count('(') - line.count(')')
inner_block.open_parentheses += depth_change
# Also check if we are starting or ending an inline assembly block.
if inner_block.inline_asm in (_NO_ASM, _END_ASM):
if (depth_change != 0 and
inner_block.open_parentheses == 1 and
_MATCH_ASM.match(line)):
# Enter assembly block
inner_block.inline_asm = _INSIDE_ASM
else:
# Not entering assembly block. If previous line was _END_ASM,
# we will now shift to _NO_ASM state.
inner_block.inline_asm = _NO_ASM
elif (inner_block.inline_asm == _INSIDE_ASM and
inner_block.open_parentheses == 0):
# Exit assembly block
inner_block.inline_asm = _END_ASM
# Consume namespace declaration at the beginning of the line. Do
# this in a loop so that we catch same line declarations like this:
# namespace proto2 { namespace bridge { class MessageSet; } }
while True:
# Match start of namespace. The "\b\s*" below catches namespace
# declarations even if it weren't followed by a whitespace, this
# is so that we don't confuse our namespace checker. The
# missing spaces will be flagged by CheckSpacing.
namespace_decl_match = Match(r'^\s*namespace\b\s*([:\w]+)?(.*)$', line)
if not namespace_decl_match:
break
new_namespace = _NamespaceInfo(namespace_decl_match.group(1), linenum)
self.stack.append(new_namespace)
line = namespace_decl_match.group(2)
if line.find('{') != -1:
new_namespace.seen_open_brace = True
line = line[line.find('{') + 1:]
# Look for a class declaration in whatever is left of the line
# after parsing namespaces. The regexp accounts for decorated classes
# such as in:
# class LOCKABLE API Object {
# };
class_decl_match = Match(
r'^(\s*(?:template\s*<[\w\s<>,:]*>\s*)?'
r'(class|struct)\s+(?:[A-Z_]+\s+)*(\w+(?:::\w+)*))'
r'(.*)$', line)
if (class_decl_match and
(not self.stack or self.stack[-1].open_parentheses == 0)):
# We do not want to accept classes that are actually template arguments:
# template <class Ignore1,
# class Ignore2 = Default<Args>,
# template <Args> class Ignore3>
# void Function() {};
#
# To avoid template argument cases, we scan forward and look for
# an unmatched '>'. If we see one, assume we are inside a
# template argument list.
end_declaration = len(class_decl_match.group(1))
if not self.InTemplateArgumentList(clean_lines, linenum, end_declaration):
self.stack.append(_ClassInfo(
class_decl_match.group(3), class_decl_match.group(2),
clean_lines, linenum))
line = class_decl_match.group(4)
# If we have not yet seen the opening brace for the innermost block,
# run checks here.
if not self.SeenOpenBrace():
self.stack[-1].CheckBegin(filename, clean_lines, linenum, error)
# Update access control if we are inside a class/struct
if self.stack and isinstance(self.stack[-1], _ClassInfo):
classinfo = self.stack[-1]
access_match = Match(
r'^(.*)\b(public|private|protected|signals)(\s+(?:slots\s*)?)?'
r':(?:[^:]|$)',
line)
if access_match:
classinfo.access = access_match.group(2)
# Check that access keywords are indented +1 space. Skip this
# check if the keywords are not preceded by whitespaces.
indent = access_match.group(1)
if (len(indent) != classinfo.class_indent + 2 and
Match(r'^\s*$', indent)):
if classinfo.is_struct:
parent = 'struct ' + classinfo.name
else:
parent = 'class ' + classinfo.name
slots = ''
if access_match.group(3):
slots = access_match.group(3)
error(filename, linenum, 'whitespace/indent', 3,
'%s%s: should be indented +2 space inside %s' % (
access_match.group(2), slots, parent))
# Consume braces or semicolons from what's left of the line
while True:
# Match first brace, semicolon, or closed parenthesis.
matched = Match(r'^[^{;)}]*([{;)}])(.*)$', line)
if not matched:
break
token = matched.group(1)
if token == '{':
# If namespace or class hasn't seen a opening brace yet, mark
# namespace/class head as complete. Push a new block onto the
# stack otherwise.
if not self.SeenOpenBrace():
self.stack[-1].seen_open_brace = True
elif Match(r'^extern\s*"[^"]*"\s*\{', line):
self.stack.append(_ExternCInfo())
else:
self.stack.append(_BlockInfo(True))
if _MATCH_ASM.match(line):
self.stack[-1].inline_asm = _BLOCK_ASM
elif token == ';' or token == ')':
# If we haven't seen an opening brace yet, but we already saw
# a semicolon, this is probably a forward declaration. Pop
# the stack for these.
#
# Similarly, if we haven't seen an opening brace yet, but we
# already saw a closing parenthesis, then these are probably
# function arguments with extra "class" or "struct" keywords.
# Also pop these stack for these.
if not self.SeenOpenBrace():
self.stack.pop()
else: # token == '}'
# Perform end of block checks and pop the stack.
if self.stack:
self.stack[-1].CheckEnd(filename, clean_lines, linenum, error)
self.stack.pop()
line = matched.group(2)
def InnermostClass(self):
"""Get class info on the top of the stack.
Returns:
A _ClassInfo object if we are inside a class, or None otherwise.
"""
for i in range(len(self.stack), 0, -1):
classinfo = self.stack[i - 1]
if isinstance(classinfo, _ClassInfo):
return classinfo
return None
def CheckCompletedBlocks(self, filename, error):
"""Checks that all classes and namespaces have been completely parsed.
Call this when all lines in a file have been processed.
Args:
filename: The name of the current file.
error: The function to call with any errors found.
"""
# Note: This test can result in false positives if #ifdef constructs
# get in the way of brace matching. See the testBuildClass test in
# cpplint_unittest.py for an example of this.
for obj in self.stack:
if isinstance(obj, _ClassInfo):
error(filename, obj.starting_linenum, 'build/class', 5,
'Failed to find complete declaration of class %s' %
obj.name)
elif isinstance(obj, _NamespaceInfo):
error(filename, obj.starting_linenum, 'build/namespaces', 5,
'Failed to find complete declaration of namespace %s' %
obj.name)
def CheckForNonStandardConstructs(filename, clean_lines, linenum,
nesting_state, error):
r"""Logs an error if we see certain non-ANSI constructs ignored by gcc-2.
Complain about several constructs which gcc-2 accepts, but which are
not standard C++. Warning about these in lint is one way to ease the
transition to new compilers.
- put storage class first (e.g. "static const" instead of "const static").
- "%lld" instead of %qd" in printf-type functions.
- "%1$d" is non-standard in printf-type functions.
- "\%" is an undefined character escape sequence.
- text after #endif is not allowed.
- invalid inner-style forward declaration.
- >? and <? operators, and their >?= and <?= cousins.
Additionally, check for constructor/destructor style violations and reference
members, as it is very convenient to do so while checking for
gcc-2 compliance.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
"""
# Remove comments from the line, but leave in strings for now.
line = clean_lines.lines[linenum]
if Search(r'printf\s*\(.*".*%[-+ ]?\d*q', line):
error(filename, linenum, 'runtime/printf_format', 3,
'%q in format strings is deprecated. Use %ll instead.')
if Search(r'printf\s*\(.*".*%\d+\$', line):
error(filename, linenum, 'runtime/printf_format', 2,
'%N$ formats are unconventional. Try rewriting to avoid them.')
# Remove escaped backslashes before looking for undefined escapes.
line = line.replace('\\\\', '')
if Search(r'("|\').*\\(%|\[|\(|{)', line):
error(filename, linenum, 'build/printf_format', 3,
'%, [, (, and { are undefined character escapes. Unescape them.')
# For the rest, work with both comments and strings removed.
line = clean_lines.elided[linenum]
if Search(r'\b(const|volatile|void|char|short|int|long'
r'|float|double|signed|unsigned'
r'|schar|u?int8|u?int16|u?int32|u?int64)'
r'\s+(register|static|extern|typedef)\b',
line):
error(filename, linenum, 'build/storage_class', 5,
'Storage class (static, extern, typedef, etc) should be first.')
if Match(r'\s*#\s*endif\s*[^/\s]+', line):
error(filename, linenum, 'build/endif_comment', 5,
'Uncommented text after #endif is non-standard. Use a comment.')
if Match(r'\s*class\s+(\w+\s*::\s*)+\w+\s*;', line):
error(filename, linenum, 'build/forward_decl', 5,
'Inner-style forward declarations are invalid. Remove this line.')
if Search(r'(\w+|[+-]?\d+(\.\d*)?)\s*(<|>)\?=?\s*(\w+|[+-]?\d+)(\.\d*)?',
line):
error(filename, linenum, 'build/deprecated', 3,
'>? and <? (max and min) operators are non-standard and deprecated.')
if Search(r'^\s*const\s*string\s*&\s*\w+\s*;', line):
# TODO(unknown): Could it be expanded safely to arbitrary references,
# without triggering too many false positives? The first
# attempt triggered 5 warnings for mostly benign code in the regtest, hence
# the restriction.
# Here's the original regexp, for the reference:
# type_name = r'\w+((\s*::\s*\w+)|(\s*<\s*\w+?\s*>))?'
# r'\s*const\s*' + type_name + '\s*&\s*\w+\s*;'
error(filename, linenum, 'runtime/member_string_references', 2,
'const string& members are dangerous. It is much better to use '
'alternatives, such as pointers or simple constants.')
# Everything else in this function operates on class declarations.
# Return early if the top of the nesting stack is not a class, or if
# the class head is not completed yet.
classinfo = nesting_state.InnermostClass()
if not classinfo or not classinfo.seen_open_brace:
return
# The class may have been declared with namespace or classname qualifiers.
# The constructor and destructor will not have those qualifiers.
base_classname = classinfo.name.split('::')[-1]
# Look for single-argument constructors that aren't marked explicit.
# Technically a valid construct, but against style.
args = Match(r'\s+(?:inline\s+)?%s\s*\(([^,()]+)\)'
% re.escape(base_classname),
line)
if (args and
args.group(1) != 'void' and
not Search(r'\bstd::initializer_list\b', args.group(1)) and
not Match(r'(const\s+)?%s(\s+const)?\s*(?:<\w+>\s*)?&'
% re.escape(base_classname), args.group(1).strip())):
error(filename, linenum, 'runtime/explicit', 5,
'Single-argument constructors should be marked explicit.')
def CheckSpacingForFunctionCall(filename, clean_lines, linenum, error):
"""Checks for the correctness of various spacing around function calls.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Since function calls often occur inside if/for/while/switch
# expressions - which have their own, more liberal conventions - we
# first see if we should be looking inside such an expression for a
# function call, to which we can apply more strict standards.
fncall = line # if there's no control flow construct, look at whole line
for pattern in (r'\bif\s*\((.*)\)\s*{',
r'\bfor\s*\((.*)\)\s*{',
r'\bwhile\s*\((.*)\)\s*[{;]',
r'\bswitch\s*\((.*)\)\s*{'):
match = Search(pattern, line)
if match:
fncall = match.group(1) # look inside the parens for function calls
break
# Except in if/for/while/switch, there should never be space
# immediately inside parens (eg "f( 3, 4 )"). We make an exception
# for nested parens ( (a+b) + c ). Likewise, there should never be
# a space before a ( when it's a function argument. I assume it's a
# function argument when the char before the whitespace is legal in
# a function name (alnum + _) and we're not starting a macro. Also ignore
# pointers and references to arrays and functions coz they're too tricky:
# we use a very simple way to recognize these:
# " (something)(maybe-something)" or
# " (something)(maybe-something," or
# " (something)[something]"
# Note that we assume the contents of [] to be short enough that
# they'll never need to wrap.
if ( # Ignore control structures.
not Search(r'\b(if|for|while|switch|return|new|delete|catch|sizeof)\b',
fncall) and
# Ignore pointers/references to functions.
not Search(r' \([^)]+\)\([^)]*(\)|,$)', fncall) and
# Ignore pointers/references to arrays.
not Search(r' \([^)]+\)\[[^\]]+\]', fncall)):
if Search(r'\w\s*\(\s(?!\s*\\$)', fncall): # a ( used for a fn call
error(filename, linenum, 'whitespace/parens', 4,
'Extra space after ( in function call')
elif Search(r'\(\s+(?!(\s*\\)|\()', fncall):
error(filename, linenum, 'whitespace/parens', 2,
'Extra space after (')
if (Search(r'\w\s+\(', fncall) and
not Search(r'#\s*define|typedef|using\s+\w+\s*=', fncall) and
not Search(r'\w\s+\((\w+::)*\*\w+\)\(', fncall)):
# TODO(unknown): Space after an operator function seem to be a common
# error, silence those for now by restricting them to highest verbosity.
if Search(r'\boperator_*\b', line):
error(filename, linenum, 'whitespace/parens', 0,
'Extra space before ( in function call')
else:
error(filename, linenum, 'whitespace/parens', 4,
'Extra space before ( in function call')
# If the ) is followed only by a newline or a { + newline, assume it's
# part of a control statement (if/while/etc), and don't complain
if Search(r'[^)]\s+\)\s*[^{\s]', fncall):
# If the closing parenthesis is preceded by only whitespaces,
# try to give a more descriptive error message.
if Search(r'^\s+\)', fncall):
error(filename, linenum, 'whitespace/parens', 2,
'Closing ) should be moved to the previous line')
else:
error(filename, linenum, 'whitespace/parens', 2,
'Extra space before )')
def IsBlankLine(line):
"""Returns true if the given line is blank.
We consider a line to be blank if the line is empty or consists of
only white spaces.
Args:
line: A line of a string.
Returns:
True, if the given line is blank.
"""
return not line or line.isspace()
def CheckForFunctionLengths(filename, clean_lines, linenum,
function_state, error):
"""Reports for long function bodies.
For an overview why this is done, see:
http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Write_Short_Functions
Uses a simplistic algorithm assuming other style guidelines
(especially spacing) are followed.
Only checks unindented functions, so class members are unchecked.
Trivial bodies are unchecked, so constructors with huge initializer lists
may be missed.
Blank/comment lines are not counted so as to avoid encouraging the removal
of vertical space and comments just to get through a lint check.
NOLINT *on the last line of a function* disables this check.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
function_state: Current function name and lines in body so far.
error: The function to call with any errors found.
"""
lines = clean_lines.lines
line = lines[linenum]
joined_line = ''
starting_func = False
regexp = r'(\w(\w|::|\*|\&|\s)*)\(' # decls * & space::name( ...
match_result = Match(regexp, line)
if match_result:
# If the name is all caps and underscores, figure it's a macro and
# ignore it, unless it's TEST or TEST_F.
function_name = match_result.group(1).split()[-1]
if function_name == 'TEST' or function_name == 'TEST_F' or (
not Match(r'[A-Z_]+$', function_name)):
starting_func = True
if starting_func:
body_found = False
for start_linenum in xrange(linenum, clean_lines.NumLines()):
start_line = lines[start_linenum]
joined_line += ' ' + start_line.lstrip()
if Search(r'(;|})', start_line): # Declarations and trivial functions
body_found = True
break # ... ignore
elif Search(r'{', start_line):
body_found = True
function = Search(r'((\w|:)*)\(', line).group(1)
if Match(r'TEST', function): # Handle TEST... macros
parameter_regexp = Search(r'(\(.*\))', joined_line)
if parameter_regexp: # Ignore bad syntax
function += parameter_regexp.group(1)
else:
function += '()'
function_state.Begin(function)
break
if not body_found:
# No body for the function (or evidence of a non-function) was found.
error(filename, linenum, 'readability/fn_size', 5,
'Lint failed to find start of function body.')
elif Match(r'^\}\s*$', line): # function end
function_state.Check(error, filename, linenum)
function_state.End()
elif not Match(r'^\s*$', line):
function_state.Count() # Count non-blank/non-comment lines.
_RE_PATTERN_TODO = re.compile(r'^//(\s*)TODO(\(.+?\))?:?(\s|$)?')
def CheckComment(line, filename, linenum, next_line_start, error):
"""Checks for common mistakes in comments.
Args:
line: The line in question.
filename: The name of the current file.
linenum: The number of the line to check.
next_line_start: The first non-whitespace column of the next line.
error: The function to call with any errors found.
"""
commentpos = line.find('//')
if commentpos != -1:
# Check if the // may be in quotes. If so, ignore it
# Comparisons made explicit for clarity -- pylint: disable=g-explicit-bool-comparison
if (line.count('"', 0, commentpos) -
line.count('\\"', 0, commentpos)) % 2 == 0: # not in quotes
# Allow one space for new scopes, two spaces otherwise:
if (not (Match(r'^.*{ *//', line) and next_line_start == commentpos) and
((commentpos >= 1 and
line[commentpos-1] not in string.whitespace) or
(commentpos >= 2 and
line[commentpos-2] not in string.whitespace))):
error(filename, linenum, 'whitespace/comments', 2,
'At least two spaces is best between code and comments')
# Checks for common mistakes in TODO comments.
comment = line[commentpos:]
match = _RE_PATTERN_TODO.match(comment)
if match:
# One whitespace is correct; zero whitespace is handled elsewhere.
leading_whitespace = match.group(1)
if len(leading_whitespace) > 1:
error(filename, linenum, 'whitespace/todo', 2,
'Too many spaces before TODO')
username = match.group(2)
# if not username:
# error(filename, linenum, 'readability/todo', 2,
# 'Missing username in TODO; it should look like '
# '"// TODO(my_username): Stuff."')
middle_whitespace = match.group(3)
# Comparisons made explicit for correctness -- pylint: disable=g-explicit-bool-comparison
if middle_whitespace != ' ' and middle_whitespace != '':
error(filename, linenum, 'whitespace/todo', 2,
'TODO(my_username) should be followed by a space')
# If the comment contains an alphanumeric character, there
# should be a space somewhere between it and the //.
if Match(r'//[^ ]*\w', comment):
error(filename, linenum, 'whitespace/comments', 4,
'Should have a space between // and comment')
def CheckAccess(filename, clean_lines, linenum, nesting_state, error):
"""Checks for improper use of DISALLOW* macros.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum] # get rid of comments and strings
matched = Match((r'\s*(DISALLOW_COPY_AND_ASSIGN|'
r'DISALLOW_EVIL_CONSTRUCTORS|'
r'DISALLOW_IMPLICIT_CONSTRUCTORS)'), line)
if not matched:
return
if nesting_state.stack and isinstance(nesting_state.stack[-1], _ClassInfo):
if nesting_state.stack[-1].access != 'private':
error(filename, linenum, 'readability/constructors', 3,
'%s must be in the private: section' % matched.group(1))
else:
# Found DISALLOW* macro outside a class declaration, or perhaps it
# was used inside a function when it should have been part of the
# class declaration. We could issue a warning here, but it
# probably resulted in a compiler error already.
pass
def CheckSpacing(filename, clean_lines, linenum, nesting_state, error):
"""Checks for the correctness of various spacing issues in the code.
Things we check for: spaces around operators, spaces after
if/for/while/switch, no spaces around parens in function calls, two
spaces between code and comment, don't start a block with a blank
line, don't end a function with a blank line, don't add a blank line
after public/protected/private, don't have too many blank lines in a row.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# Don't use "elided" lines here, otherwise we can't check commented lines.
# Don't want to use "raw" either, because we don't want to check inside C++11
# raw strings,
raw = clean_lines.lines_without_raw_strings
line = raw[linenum]
# Before nixing comments, check if the line is blank for no good
# reason. This includes the first line after a block is opened, and
# blank lines at the end of a function (ie, right before a line like '}'
#
# Skip all the blank line checks if we are immediately inside a
# namespace body. In other words, don't issue blank line warnings
# for this block:
# namespace {
#
# }
#
# A warning about missing end of namespace comments will be issued instead.
#
# Also skip blank line checks for 'extern "C"' blocks, which are formatted
# like namespaces.
if (IsBlankLine(line) and
not nesting_state.InNamespaceBody() and
not nesting_state.InExternC()):
elided = clean_lines.elided
prev_line = elided[linenum - 1]
prevbrace = prev_line.rfind('{')
# TODO(unknown): Don't complain if line before blank line, and line after,
# both start with alnums and are indented the same amount.
# This ignores whitespace at the start of a namespace block
# because those are not usually indented.
if prevbrace != -1 and prev_line[prevbrace:].find('}') == -1:
# OK, we have a blank line at the start of a code block. Before we
# complain, we check if it is an exception to the rule: The previous
# non-empty line has the parameters of a function header that are indented
# 4 spaces (because they did not fit in a 80 column line when placed on
# the same line as the function name). We also check for the case where
# the previous line is indented 6 spaces, which may happen when the
# initializers of a constructor do not fit into a 80 column line.
exception = False
if Match(r' {6}\w', prev_line): # Initializer list?
# We are looking for the opening column of initializer list, which
# should be indented 4 spaces to cause 6 space indentation afterwards.
search_position = linenum-2
while (search_position >= 0
and Match(r' {6}\w', elided[search_position])):
search_position -= 1
exception = (search_position >= 0
and elided[search_position][:5] == ' :')
else:
# Search for the function arguments or an initializer list. We use a
# simple heuristic here: If the line is indented 4 spaces; and we have a
# closing paren, without the opening paren, followed by an opening brace
# or colon (for initializer lists) we assume that it is the last line of
# a function header. If we have a colon indented 4 spaces, it is an
# initializer list.
exception = (Match(r' {4}\w[^\(]*\)\s*(const\s*)?(\{\s*$|:)',
prev_line)
or Match(r' {4}:', prev_line))
if not exception:
error(filename, linenum, 'whitespace/blank_line', 2,
'Redundant blank line at the start of a code block '
'should be deleted.')
# Ignore blank lines at the end of a block in a long if-else
# chain, like this:
# if (condition1) {
# // Something followed by a blank line
#
# } else if (condition2) {
# // Something else
# }
if linenum + 1 < clean_lines.NumLines():
next_line = raw[linenum + 1]
if (next_line
and Match(r'\s*}', next_line)
and next_line.find('} else ') == -1):
error(filename, linenum, 'whitespace/blank_line', 3,
'Redundant blank line at the end of a code block '
'should be deleted.')
# matched = Match(r'\s*(public|protected|private):', prev_line)
# if matched:
# error(filename, linenum, 'whitespace/blank_line', 3,
# 'Do not leave a blank line after "%s:"' % matched.group(1))
# Next, check comments
next_line_start = 0
if linenum + 1 < clean_lines.NumLines():
next_line = raw[linenum + 1]
next_line_start = len(next_line) - len(next_line.lstrip())
CheckComment(line, filename, linenum, next_line_start, error)
# get rid of comments and strings
line = clean_lines.elided[linenum]
# You shouldn't have spaces before your brackets, except maybe after
# 'delete []' or 'return []() {};'
if Search(r'\w\s+\[', line) and not Search(r'(?:delete|return)\s+\[', line):
error(filename, linenum, 'whitespace/braces', 5,
'Extra space before [')
# In range-based for, we wanted spaces before and after the colon, but
# not around "::" tokens that might appear.
if (Search(r'for *\(.*[^:]:[^: ]', line) or
Search(r'for *\(.*[^: ]:[^:]', line)):
error(filename, linenum, 'whitespace/forcolon', 2,
'Missing space around colon in range-based for loop')
def CheckOperatorSpacing(filename, clean_lines, linenum, error):
"""Checks for horizontal spacing around operators.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Don't try to do spacing checks for operator methods. Do this by
# replacing the troublesome characters with something else,
# preserving column position for all other characters.
#
# The replacement is done repeatedly to avoid false positives from
# operators that call operators.
while True:
match = Match(r'^(.*\boperator\b)(\S+)(\s*\(.*)$', line)
if match:
line = match.group(1) + ('_' * len(match.group(2))) + match.group(3)
else:
break
# We allow no-spaces around = within an if: "if ( (a=Foo()) == 0 )".
# Otherwise not. Note we only check for non-spaces on *both* sides;
# sometimes people put non-spaces on one side when aligning ='s among
# many lines (not that this is behavior that I approve of...)
if Search(r'[\w.]=[\w.]', line) and not Search(r'\b(if|while) ', line):
error(filename, linenum, 'whitespace/operators', 4,
'Missing spaces around =')
# It's ok not to have spaces around binary operators like + - * /, but if
# there's too little whitespace, we get concerned. It's hard to tell,
# though, so we punt on this one for now. TODO.
# You should always have whitespace around binary operators.
#
# Check <= and >= first to avoid false positives with < and >, then
# check non-include lines for spacing around < and >.
#
# If the operator is followed by a comma, assume it's be used in a
# macro context and don't do any checks. This avoids false
# positives.
#
# Note that && is not included here. Those are checked separately
# in CheckRValueReference
match = Search(r'[^<>=!\s](==|!=|<=|>=|\|\|)[^<>=!\s,;\)]', line)
if match:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around %s' % match.group(1))
elif not Match(r'#.*include', line):
# Look for < that is not surrounded by spaces. This is only
# triggered if both sides are missing spaces, even though
# technically should should flag if at least one side is missing a
# space. This is done to avoid some false positives with shifts.
match = Match(r'^(.*[^\s<])<[^\s=<,]', line)
if match:
(_, _, end_pos) = CloseExpression(
clean_lines, linenum, len(match.group(1)))
if end_pos <= -1:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around <')
# Look for > that is not surrounded by spaces. Similar to the
# above, we only trigger if both sides are missing spaces to avoid
# false positives with shifts.
match = Match(r'^(.*[^-\s>])>[^\s=>,]', line)
if match:
(_, _, start_pos) = ReverseCloseExpression(
clean_lines, linenum, len(match.group(1)))
if start_pos <= -1:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around >')
# We allow no-spaces around << when used like this: 10<<20, but
# not otherwise (particularly, not when used as streams)
# We also allow operators following an opening parenthesis, since
# those tend to be macros that deal with operators.
match = Search(r'(operator|\S)(?:L|UL|ULL|l|ul|ull)?<<([^\s,=])', line)
if (match and match.group(1) != '(' and
not (match.group(1).isdigit() and match.group(2).isdigit()) and
not (match.group(1) == 'operator' and match.group(2) == ';')):
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around <<')
# We allow no-spaces around >> for almost anything. This is because
# C++11 allows ">>" to close nested templates, which accounts for
# most cases when ">>" is not followed by a space.
#
# We still warn on ">>" followed by alpha character, because that is
# likely due to ">>" being used for right shifts, e.g.:
# value >> alpha
#
# When ">>" is used to close templates, the alphanumeric letter that
# follows would be part of an identifier, and there should still be
# a space separating the template type and the identifier.
# type<type<type>> alpha
match = Search(r'>>[a-zA-Z_]', line)
if match:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around >>')
# There shouldn't be space around unary operators
match = Search(r'(!\s|~\s|[\s]--[\s;]|[\s]\+\+[\s;])', line)
if match:
error(filename, linenum, 'whitespace/operators', 4,
'Extra space for operator %s' % match.group(1))
def CheckParenthesisSpacing(filename, clean_lines, linenum, error):
"""Checks for horizontal spacing around parentheses.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# No spaces after an if, while, switch, or for
match = Search(r' (if\(|for\(|while\(|switch\()', line)
if match:
error(filename, linenum, 'whitespace/parens', 5,
'Missing space before ( in %s' % match.group(1))
# For if/for/while/switch, the left and right parens should be
# consistent about how many spaces are inside the parens, and
# there should either be zero or one spaces inside the parens.
# We don't want: "if ( foo)" or "if ( foo )".
# Exception: "for ( ; foo; bar)" and "for (foo; bar; )" are allowed.
match = Search(r'\b(if|for|while|switch)\s*'
r'\(([ ]*)(.).*[^ ]+([ ]*)\)\s*{\s*$',
line)
if match:
if len(match.group(2)) != len(match.group(4)):
if not (match.group(3) == ';' and
len(match.group(2)) == 1 + len(match.group(4)) or
not match.group(2) and Search(r'\bfor\s*\(.*; \)', line)):
error(filename, linenum, 'whitespace/parens', 5,
'Mismatching spaces inside () in %s' % match.group(1))
if len(match.group(2)) not in [0, 1]:
error(filename, linenum, 'whitespace/parens', 5,
'Should have zero or one spaces inside ( and ) in %s' %
match.group(1))
def CheckCommaSpacing(filename, clean_lines, linenum, error):
"""Checks for horizontal spacing near commas and semicolons.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
raw = clean_lines.lines_without_raw_strings
line = clean_lines.elided[linenum]
# You should always have a space after a comma (either as fn arg or operator)
#
# This does not apply when the non-space character following the
# comma is another comma, since the only time when that happens is
# for empty macro arguments.
#
# We run this check in two passes: first pass on elided lines to
# verify that lines contain missing whitespaces, second pass on raw
# lines to confirm that those missing whitespaces are not due to
# elided comments.
if Search(r',[^,\s]', line) and Search(r',[^,\s]', raw[linenum]):
error(filename, linenum, 'whitespace/comma', 3,
'Missing space after ,')
# You should always have a space after a semicolon
# except for few corner cases
# TODO(unknown): clarify if 'if (1) { return 1;}' is requires one more
# space after ;
if Search(r';[^\s};\\)/]', line):
error(filename, linenum, 'whitespace/semicolon', 3,
'Missing space after ;')
def CheckBracesSpacing(filename, clean_lines, linenum, error):
"""Checks for horizontal spacing near commas.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Except after an opening paren, or after another opening brace (in case of
# an initializer list, for instance), you should have spaces before your
# braces. And since you should never have braces at the beginning of a line,
# this is an easy test.
match = Match(r'^(.*[^ ({]){', line)
if match:
# Try a bit harder to check for brace initialization. This
# happens in one of the following forms:
# Constructor() : initializer_list_{} { ... }
# Constructor{}.MemberFunction()
# Type variable{};
# FunctionCall(type{}, ...);
# LastArgument(..., type{});
# LOG(INFO) << type{} << " ...";
# map_of_type[{...}] = ...;
# ternary = expr ? new type{} : nullptr;
# OuterTemplate<InnerTemplateConstructor<Type>{}>
#
# We check for the character following the closing brace, and
# silence the warning if it's one of those listed above, i.e.
# "{.;,)<>]:".
#
# To account for nested initializer list, we allow any number of
# closing braces up to "{;,)<". We can't simply silence the
# warning on first sight of closing brace, because that would
# cause false negatives for things that are not initializer lists.
# Silence this: But not this:
# Outer{ if (...) {
# Inner{...} if (...){ // Missing space before {
# }; }
#
# There is a false negative with this approach if people inserted
# spurious semicolons, e.g. "if (cond){};", but we will catch the
# spurious semicolon with a separate check.
(endline, endlinenum, endpos) = CloseExpression(
clean_lines, linenum, len(match.group(1)))
trailing_text = ''
if endpos > -1:
trailing_text = endline[endpos:]
for offset in xrange(endlinenum + 1,
min(endlinenum + 3, clean_lines.NumLines() - 1)):
trailing_text += clean_lines.elided[offset]
if not Match(r'^[\s}]*[{.;,)<>\]:]', trailing_text):
error(filename, linenum, 'whitespace/braces', 5,
'Missing space before {')
# Make sure '} else {' has spaces.
if Search(r'}else', line):
error(filename, linenum, 'whitespace/braces', 5,
'Missing space before else')
# You shouldn't have a space before a semicolon at the end of the line.
# There's a special case for "for" since the style guide allows space before
# the semicolon there.
if Search(r':\s*;\s*$', line):
error(filename, linenum, 'whitespace/semicolon', 5,
'Semicolon defining empty statement. Use {} instead.')
elif Search(r'^\s*;\s*$', line):
error(filename, linenum, 'whitespace/semicolon', 5,
'Line contains only semicolon. If this should be an empty statement, '
'use {} instead.')
elif (Search(r'\s+;\s*$', line) and
not Search(r'\bfor\b', line)):
error(filename, linenum, 'whitespace/semicolon', 5,
'Extra space before last semicolon. If this should be an empty '
'statement, use {} instead.')
def IsDecltype(clean_lines, linenum, column):
"""Check if the token ending on (linenum, column) is decltype().
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: the number of the line to check.
column: end column of the token to check.
Returns:
True if this token is decltype() expression, False otherwise.
"""
(text, _, start_col) = ReverseCloseExpression(clean_lines, linenum, column)
if start_col < 0:
return False
if Search(r'\bdecltype\s*$', text[0:start_col]):
return True
return False
def IsTemplateParameterList(clean_lines, linenum, column):
"""Check if the token ending on (linenum, column) is the end of template<>.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: the number of the line to check.
column: end column of the token to check.
Returns:
True if this token is end of a template parameter list, False otherwise.
"""
(_, startline, startpos) = ReverseCloseExpression(
clean_lines, linenum, column)
if (startpos > -1 and
Search(r'\btemplate\s*$', clean_lines.elided[startline][0:startpos])):
return True
return False
def IsRValueType(clean_lines, nesting_state, linenum, column):
"""Check if the token ending on (linenum, column) is a type.
Assumes that text to the right of the column is "&&" or a function
name.
Args:
clean_lines: A CleansedLines instance containing the file.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
linenum: the number of the line to check.
column: end column of the token to check.
Returns:
True if this token is a type, False if we are not sure.
"""
prefix = clean_lines.elided[linenum][0:column]
# Get one word to the left. If we failed to do so, this is most
# likely not a type, since it's unlikely that the type name and "&&"
# would be split across multiple lines.
match = Match(r'^(.*)(\b\w+|[>*)&])\s*$', prefix)
if not match:
return False
# Check text following the token. If it's "&&>" or "&&," or "&&...", it's
# most likely a rvalue reference used inside a template.
suffix = clean_lines.elided[linenum][column:]
if Match(r'&&\s*(?:[>,]|\.\.\.)', suffix):
return True
# Check for simple type and end of templates:
# int&& variable
# vector<int>&& variable
#
# Because this function is called recursively, we also need to
# recognize pointer and reference types:
# int* Function()
# int& Function()
if match.group(2) in ['char', 'char16_t', 'char32_t', 'wchar_t', 'bool',
'short', 'int', 'long', 'signed', 'unsigned',
'float', 'double', 'void', 'auto', '>', '*', '&']:
return True
# If we see a close parenthesis, look for decltype on the other side.
# decltype would unambiguously identify a type, anything else is
# probably a parenthesized expression and not a type.
if match.group(2) == ')':
return IsDecltype(
clean_lines, linenum, len(match.group(1)) + len(match.group(2)) - 1)
# Check for casts and cv-qualifiers.
# match.group(1) remainder
# -------------- ---------
# const_cast< type&&
# const type&&
# type const&&
if Search(r'\b(?:const_cast\s*<|static_cast\s*<|dynamic_cast\s*<|'
r'reinterpret_cast\s*<|\w+\s)\s*$',
match.group(1)):
return True
# Look for a preceding symbol that might help differentiate the context.
# These are the cases that would be ambiguous:
# match.group(1) remainder
# -------------- ---------
# Call ( expression &&
# Declaration ( type&&
# sizeof ( type&&
# if ( expression &&
# while ( expression &&
# for ( type&&
# for( ; expression &&
# statement ; type&&
# block { type&&
# constructor { expression &&
start = linenum
line = match.group(1)
match_symbol = None
while start >= 0:
# We want to skip over identifiers and commas to get to a symbol.
# Commas are skipped so that we can find the opening parenthesis
# for function parameter lists.
match_symbol = Match(r'^(.*)([^\w\s,])[\w\s,]*$', line)
if match_symbol:
break
start -= 1
line = clean_lines.elided[start]
if not match_symbol:
# Probably the first statement in the file is an rvalue reference
return True
if match_symbol.group(2) == '}':
# Found closing brace, probably an indicate of this:
# block{} type&&
return True
if match_symbol.group(2) == ';':
# Found semicolon, probably one of these:
# for(; expression &&
# statement; type&&
# Look for the previous 'for(' in the previous lines.
before_text = match_symbol.group(1)
for i in xrange(start - 1, max(start - 6, 0), -1):
before_text = clean_lines.elided[i] + before_text
if Search(r'for\s*\([^{};]*$', before_text):
# This is the condition inside a for-loop
return False
# Did not find a for-init-statement before this semicolon, so this
# is probably a new statement and not a condition.
return True
if match_symbol.group(2) == '{':
# Found opening brace, probably one of these:
# block{ type&& = ... ; }
# constructor{ expression && expression }
# Look for a closing brace or a semicolon. If we see a semicolon
# first, this is probably a rvalue reference.
line = clean_lines.elided[start][0:len(match_symbol.group(1)) + 1]
end = start
depth = 1
while True:
for ch in line:
if ch == ';':
return True
elif ch == '{':
depth += 1
elif ch == '}':
depth -= 1
if depth == 0:
return False
end += 1
if end >= clean_lines.NumLines():
break
line = clean_lines.elided[end]
# Incomplete program?
return False
if match_symbol.group(2) == '(':
# Opening parenthesis. Need to check what's to the left of the
# parenthesis. Look back one extra line for additional context.
before_text = match_symbol.group(1)
if linenum > 1:
before_text = clean_lines.elided[linenum - 1] + before_text
before_text = match_symbol.group(1)
# Patterns that are likely to be types:
# [](type&&
# for (type&&
# sizeof(type&&
# operator=(type&&
#
if Search(r'(?:\]|\bfor|\bsizeof|\boperator\s*\S+\s*)\s*$', before_text):
return True
# Patterns that are likely to be expressions:
# if (expression &&
# while (expression &&
# : initializer(expression &&
# , initializer(expression &&
# ( FunctionCall(expression &&
# + FunctionCall(expression &&
# + (expression &&
#
# The last '+' represents operators such as '+' and '-'.
if Search(r'(?:\bif|\bwhile|[-+=%^(<!?:,&*]\s*)$', before_text):
return False
# Something else. Check that tokens to the left look like
# return_type function_name
match_func = Match(r'^(.*)\s+\w(?:\w|::)*(?:<[^<>]*>)?\s*$',
match_symbol.group(1))
if match_func:
# Check for constructors, which don't have return types.
if Search(r'\bexplicit$', match_func.group(1)):
return True
implicit_constructor = Match(r'\s*(\w+)\((?:const\s+)?(\w+)', prefix)
if (implicit_constructor and
implicit_constructor.group(1) == implicit_constructor.group(2)):
return True
return IsRValueType(clean_lines, nesting_state, linenum,
len(match_func.group(1)))
# Nothing before the function name. If this is inside a block scope,
# this is probably a function call.
return not (nesting_state.previous_stack_top and
nesting_state.previous_stack_top.IsBlockInfo())
if match_symbol.group(2) == '>':
# Possibly a closing bracket, check that what's on the other side
# looks like the start of a template.
return IsTemplateParameterList(
clean_lines, start, len(match_symbol.group(1)))
# Some other symbol, usually something like "a=b&&c". This is most
# likely not a type.
return False
def IsRValueAllowed(clean_lines, linenum):
"""Check if RValue reference is allowed within some range of lines.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
Returns:
True if line is within the region where RValue references are allowed.
"""
for i in xrange(linenum, 0, -1):
line = clean_lines.elided[i]
if Match(r'GOOGLE_ALLOW_RVALUE_REFERENCES_(?:PUSH|POP)', line):
if not line.endswith('PUSH'):
return False
for j in xrange(linenum, clean_lines.NumLines(), 1):
line = clean_lines.elided[j]
if Match(r'GOOGLE_ALLOW_RVALUE_REFERENCES_(?:PUSH|POP)', line):
return line.endswith('POP')
return False
def CheckRValueReference(filename, clean_lines, linenum, nesting_state, error):
"""Check for rvalue references.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# Find lines missing spaces around &&.
# TODO(unknown): currently we don't check for rvalue references
# with spaces surrounding the && to avoid false positives with
# boolean expressions.
line = clean_lines.elided[linenum]
match = Match(r'^(.*\S)&&', line)
if not match:
match = Match(r'(.*)&&\S', line)
if (not match) or '(&&)' in line or Search(r'\boperator\s*$', match.group(1)):
return
# Either poorly formed && or an rvalue reference, check the context
# to get a more accurate error message. Mostly we want to determine
# if what's to the left of "&&" is a type or not.
and_pos = len(match.group(1))
if IsRValueType(clean_lines, nesting_state, linenum, and_pos):
if not IsRValueAllowed(clean_lines, linenum):
error(filename, linenum, 'build/c++11', 3,
'RValue references are an unapproved C++ feature.')
else:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around &&')
def CheckSectionSpacing(filename, clean_lines, class_info, linenum, error):
"""Checks for additional blank line issues related to sections.
Currently the only thing checked here is blank line before protected/private.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
class_info: A _ClassInfo objects.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Skip checks if the class is small, where small means 25 lines or less.
# 25 lines seems like a good cutoff since that's the usual height of
# terminals, and any class that can't fit in one screen can't really
# be considered "small".
#
# Also skip checks if we are on the first line. This accounts for
# classes that look like
# class Foo { public: ... };
#
# If we didn't find the end of the class, last_line would be zero,
# and the check will be skipped by the first condition.
if (class_info.last_line - class_info.starting_linenum <= 24 or
linenum <= class_info.starting_linenum):
return
matched = Match(r'\s*(public|protected|private):', clean_lines.lines[linenum])
if matched:
# Issue warning if the line before public/protected/private was
# not a blank line, but don't do this if the previous line contains
# "class" or "struct". This can happen two ways:
# - We are at the beginning of the class.
# - We are forward-declaring an inner class that is semantically
# private, but needed to be public for implementation reasons.
# Also ignores cases where the previous line ends with a backslash as can be
# common when defining classes in C macros.
prev_line = clean_lines.lines[linenum - 1]
if (not IsBlankLine(prev_line) and
not Search(r'\b(class|struct)\b', prev_line) and
not Search(r'\\$', prev_line)):
# Try a bit harder to find the beginning of the class. This is to
# account for multi-line base-specifier lists, e.g.:
# class Derived
# : public Base {
end_class_head = class_info.starting_linenum
for i in range(class_info.starting_linenum, linenum):
if Search(r'\{\s*$', clean_lines.lines[i]):
end_class_head = i
break
if end_class_head < linenum - 1:
error(filename, linenum, 'whitespace/blank_line', 3,
'"%s:" should be preceded by a blank line' % matched.group(1))
def GetPreviousNonBlankLine(clean_lines, linenum):
"""Return the most recent non-blank line and its line number.
Args:
clean_lines: A CleansedLines instance containing the file contents.
linenum: The number of the line to check.
Returns:
A tuple with two elements. The first element is the contents of the last
non-blank line before the current line, or the empty string if this is the
first non-blank line. The second is the line number of that line, or -1
if this is the first non-blank line.
"""
prevlinenum = linenum - 1
while prevlinenum >= 0:
prevline = clean_lines.elided[prevlinenum]
if not IsBlankLine(prevline): # if not a blank line...
return (prevline, prevlinenum)
prevlinenum -= 1
return ('', -1)
def CheckBraces(filename, clean_lines, linenum, error):
"""Looks for misplaced braces (e.g. at the end of line).
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum] # get rid of comments and strings
#if Match(r'\s*{\s*$', line):
# # We allow an open brace to start a line in the case where someone is using
# # braces in a block to explicitly create a new scope, which is commonly used
# # to control the lifetime of stack-allocated variables. Braces are also
# # used for brace initializers inside function calls. We don't detect this
# # perfectly: we just don't complain if the last non-whitespace character on
# # the previous non-blank line is ',', ';', ':', '(', '{', or '}', or if the
# # previous line starts a preprocessor block.
# prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
# if (not Search(r'[,;:}{()]\s*$', prevline) and
# not Match(r'\s*#', prevline) and
# os.path.splitext(filename)[1] != ".hh"):
# #not Match(r'\s*}\s*', prevline) and
# #not Match(r'^\s*{\s*&', prevline)):
# error(filename, linenum, 'whitespace/braces', 4,
# '{ should never be at the end of the previous line')
# An else clause should not be on the same line as the preceding closing brace.
if Match(r'\s*}\s*else\b\s*(?:if\b|\{|$)', line):
# prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
#if Match(r'\s*}\s*$', line):
error(filename, linenum, 'whitespace/newline', 4,
'An else should not appear on the same line as the preceding }')
if Match(r'\s*else.*{$', line):
error(filename, linenum, 'whitespace/newline', 4,
'An else should not appear on the same line as the next {')
# If braces come on one side of an else, they should be on both.
# However, we have to worry about "else if" that spans multiple lines!
# if Search(r'else if\s*\(', line): # could be multi-line if
# brace_on_left = bool(Search(r'}\s*else if\s*\(', line))
# # find the ( after the if
# pos = line.find('else if')
# pos = line.find('(', pos)
# if pos > 0:
# (endline, _, endpos) = CloseExpression(clean_lines, linenum, pos)
# brace_on_right = endline[endpos:].find('{') != -1
# if brace_on_left != brace_on_right: # must be brace after if
# error(filename, linenum, 'readability/braces', 5,
# 'If an else has a brace on one side, it should have it on both')
# elif Search(r'}\s*else[^{]*$', line) or Match(r'[^}]*else\s*{', line):
# error(filename, linenum, 'readability/braces', 5,
# 'If an else has a brace on one side, it should have it on both')
# Likewise, an else should never have the else clause on the same line
if Search(r'\belse [^\s{]', line) and not Search(r'\belse if\b', line):
error(filename, linenum, 'whitespace/newline', 4,
'Else clause should never be on same line as else (use 2 lines)')
# In the same way, a do/while should never be on one line
if Match(r'\s*do [^\s{]', line):
error(filename, linenum, 'whitespace/newline', 4,
'do/while clauses should not be on a single line')
# Check single-line if/else bodies. The style guide says 'curly braces are not
# required for single-line statements'. We additionally allow multi-line,
# single statements, but we reject anything with more than one semicolon in
# it. This means that the first semicolon after the if should be at the end of
# its line, and the line after that should have an indent level equal to or
# lower than the if. We also check for ambiguous if/else nesting without
# braces.
if_else_match = Search(r'\b(if\s*\(|else\b)', line)
if if_else_match and not Match(r'\s*#', line):
if_indent = GetIndentLevel(line)
endline, endlinenum, endpos = line, linenum, if_else_match.end()
if_match = Search(r'\bif\s*\(', line)
if if_match:
# This could be a multiline if condition, so find the end first.
pos = if_match.end() - 1
(endline, endlinenum, endpos) = CloseExpression(clean_lines, linenum, pos)
# Check for an opening brace, either directly after the if or on the next
# line. If found, this isn't a single-statement conditional.
if (not Match(r'\s*{', endline[endpos:])
and not (Match(r'\s*$', endline[endpos:])
and endlinenum < (len(clean_lines.elided) - 1)
and Match(r'\s*{', clean_lines.elided[endlinenum + 1]))):
while (endlinenum < len(clean_lines.elided)
and ';' not in clean_lines.elided[endlinenum][endpos:]):
endlinenum += 1
endpos = 0
if endlinenum < len(clean_lines.elided):
endline = clean_lines.elided[endlinenum]
# We allow a mix of whitespace and closing braces (e.g. for one-liner
# methods) and a single \ after the semicolon (for macros)
endpos = endline.find(';')
if not Match(r';[\s}]*(\\?)$', endline[endpos:]):
# Semicolon isn't the last character, there's something trailing
error(filename, linenum, 'readability/braces', 4,
'If/else bodies with multiple statements require braces')
elif endlinenum < len(clean_lines.elided) - 1:
# Make sure the next line is dedented
next_line = clean_lines.elided[endlinenum + 1]
next_indent = GetIndentLevel(next_line)
# With ambiguous nested if statements, this will error out on the
# if that *doesn't* match the else, regardless of whether it's the
# inner one or outer one.
if (if_match and Match(r'\s*else\b', next_line)
and next_indent != if_indent):
error(filename, linenum, 'readability/braces', 4,
'Else clause should be indented at the same level as if. '
'Ambiguous nested if/else chains require braces.')
elif next_indent > if_indent:
error(filename, linenum, 'readability/braces', 4,
'If/else bodies with multiple statements require braces')
def CheckTrailingSemicolon(filename, clean_lines, linenum, error):
"""Looks for redundant trailing semicolon.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Block bodies should not be followed by a semicolon. Due to C++11
# brace initialization, there are more places where semicolons are
# required than not, so we use a whitelist approach to check these
# rather than a blacklist. These are the places where "};" should
# be replaced by just "}":
# 1. Some flavor of block following closing parenthesis:
# for (;;) {};
# while (...) {};
# switch (...) {};
# Function(...) {};
# if (...) {};
# if (...) else if (...) {};
#
# 2. else block:
# if (...) else {};
#
# 3. const member function:
# Function(...) const {};
#
# 4. Block following some statement:
# x = 42;
# {};
#
# 5. Block at the beginning of a function:
# Function(...) {
# {};
# }
#
# Note that naively checking for the preceding "{" will also match
# braces inside multi-dimensional arrays, but this is fine since
# that expression will not contain semicolons.
#
# 6. Block following another block:
# while (true) {}
# {};
#
# 7. End of namespaces:
# namespace {};
#
# These semicolons seems far more common than other kinds of
# redundant semicolons, possibly due to people converting classes
# to namespaces. For now we do not warn for this case.
#
# Try matching case 1 first.
match = Match(r'^(.*\)\s*)\{', line)
if match:
# Matched closing parenthesis (case 1). Check the token before the
# matching opening parenthesis, and don't warn if it looks like a
# macro. This avoids these false positives:
# - macro that defines a base class
# - multi-line macro that defines a base class
# - macro that defines the whole class-head
#
# But we still issue warnings for macros that we know are safe to
# warn, specifically:
# - TEST, TEST_F, TEST_P, MATCHER, MATCHER_P
# - TYPED_TEST
# - INTERFACE_DEF
# - EXCLUSIVE_LOCKS_REQUIRED, SHARED_LOCKS_REQUIRED, LOCKS_EXCLUDED:
#
# We implement a whitelist of safe macros instead of a blacklist of
# unsafe macros, even though the latter appears less frequently in
# google code and would have been easier to implement. This is because
# the downside for getting the whitelist wrong means some extra
# semicolons, while the downside for getting the blacklist wrong
# would result in compile errors.
#
# In addition to macros, we also don't want to warn on compound
# literals and lambdas.
closing_brace_pos = match.group(1).rfind(')')
opening_parenthesis = ReverseCloseExpression(
clean_lines, linenum, closing_brace_pos)
if opening_parenthesis[2] > -1:
line_prefix = opening_parenthesis[0][0:opening_parenthesis[2]]
macro = Search(r'\b([A-Z_]+)\s*$', line_prefix)
func = Match(r'^(.*\])\s*$', line_prefix)
if ((macro and
macro.group(1) not in (
'TEST', 'TEST_F', 'MATCHER', 'MATCHER_P', 'TYPED_TEST',
'EXCLUSIVE_LOCKS_REQUIRED', 'SHARED_LOCKS_REQUIRED',
'LOCKS_EXCLUDED', 'INTERFACE_DEF')) or
(func and not Search(r'\boperator\s*\[\s*\]', func.group(1))) or
Search(r'\s+=\s*$', line_prefix)):
match = None
if (match and
opening_parenthesis[1] > 1 and
Search(r'\]\s*$', clean_lines.elided[opening_parenthesis[1] - 1])):
# Multi-line lambda-expression
match = None
else:
# Try matching cases 2-3.
match = Match(r'^(.*(?:else|\)\s*const)\s*)\{', line)
if not match:
# Try matching cases 4-6. These are always matched on separate lines.
#
# Note that we can't simply concatenate the previous line to the
# current line and do a single match, otherwise we may output
# duplicate warnings for the blank line case:
# if (cond) {
# // blank line
# }
prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
if prevline and Search(r'[;{}]\s*$', prevline):
match = Match(r'^(\s*)\{', line)
# Check matching closing brace
if match:
(endline, endlinenum, endpos) = CloseExpression(
clean_lines, linenum, len(match.group(1)))
if endpos > -1 and Match(r'^\s*;', endline[endpos:]):
# Current {} pair is eligible for semicolon check, and we have found
# the redundant semicolon, output warning here.
#
# Note: because we are scanning forward for opening braces, and
# outputting warnings for the matching closing brace, if there are
# nested blocks with trailing semicolons, we will get the error
# messages in reversed order.
error(filename, endlinenum, 'readability/braces', 4,
"You don't need a ; after a }")
def CheckEmptyBlockBody(filename, clean_lines, linenum, error):
"""Look for empty loop/conditional body with only a single semicolon.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Search for loop keywords at the beginning of the line. Because only
# whitespaces are allowed before the keywords, this will also ignore most
# do-while-loops, since those lines should start with closing brace.
#
# We also check "if" blocks here, since an empty conditional block
# is likely an error.
line = clean_lines.elided[linenum]
matched = Match(r'\s*(for|while|if)\s*\(', line)
if matched:
# Find the end of the conditional expression
(end_line, end_linenum, end_pos) = CloseExpression(
clean_lines, linenum, line.find('('))
# Output warning if what follows the condition expression is a semicolon.
# No warning for all other cases, including whitespace or newline, since we
# have a separate check for semicolons preceded by whitespace.
if end_pos >= 0 and Match(r';', end_line[end_pos:]):
if matched.group(1) == 'if':
error(filename, end_linenum, 'whitespace/empty_conditional_body', 5,
'Empty conditional bodies should use {}')
elif matched.group(1) != "while":
error(filename, end_linenum, 'whitespace/empty_loop_body', 5,
'Empty loop bodies should use {} or continue')
def FindCheckMacro(line):
"""Find a replaceable CHECK-like macro.
Args:
line: line to search on.
Returns:
(macro name, start position), or (None, -1) if no replaceable
macro is found.
"""
for macro in _CHECK_MACROS:
i = line.find(macro)
if i >= 0:
# Find opening parenthesis. Do a regular expression match here
# to make sure that we are matching the expected CHECK macro, as
# opposed to some other macro that happens to contain the CHECK
# substring.
matched = Match(r'^(.*\b' + macro + r'\s*)\(', line)
if not matched:
continue
return (macro, len(matched.group(1)))
return (None, -1)
def CheckCheck(filename, clean_lines, linenum, error):
"""Checks the use of CHECK and EXPECT macros.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Decide the set of replacement macros that should be suggested
lines = clean_lines.elided
(check_macro, start_pos) = FindCheckMacro(lines[linenum])
if not check_macro:
return
# Find end of the boolean expression by matching parentheses
(last_line, end_line, end_pos) = CloseExpression(
clean_lines, linenum, start_pos)
if end_pos < 0:
return
if linenum == end_line:
expression = lines[linenum][start_pos + 1:end_pos - 1]
else:
expression = lines[linenum][start_pos + 1:]
for i in xrange(linenum + 1, end_line):
expression += lines[i]
expression += last_line[0:end_pos - 1]
# Parse expression so that we can take parentheses into account.
# This avoids false positives for inputs like "CHECK((a < 4) == b)",
# which is not replaceable by CHECK_LE.
lhs = ''
rhs = ''
operator = None
while expression:
matched = Match(r'^\s*(<<|<<=|>>|>>=|->\*|->|&&|\|\||'
r'==|!=|>=|>|<=|<|\()(.*)$', expression)
if matched:
token = matched.group(1)
if token == '(':
# Parenthesized operand
expression = matched.group(2)
(end, _) = FindEndOfExpressionInLine(expression, 0, ['('])
if end < 0:
return # Unmatched parenthesis
lhs += '(' + expression[0:end]
expression = expression[end:]
elif token in ('&&', '||'):
# Logical and/or operators. This means the expression
# contains more than one term, for example:
# CHECK(42 < a && a < b);
#
# These are not replaceable with CHECK_LE, so bail out early.
return
elif token in ('<<', '<<=', '>>', '>>=', '->*', '->'):
# Non-relational operator
lhs += token
expression = matched.group(2)
else:
# Relational operator
operator = token
rhs = matched.group(2)
break
else:
# Unparenthesized operand. Instead of appending to lhs one character
# at a time, we do another regular expression match to consume several
# characters at once if possible. Trivial benchmark shows that this
# is more efficient when the operands are longer than a single
# character, which is generally the case.
matched = Match(r'^([^-=!<>()&|]+)(.*)$', expression)
if not matched:
matched = Match(r'^(\s*\S)(.*)$', expression)
if not matched:
break
lhs += matched.group(1)
expression = matched.group(2)
# Only apply checks if we got all parts of the boolean expression
if not (lhs and operator and rhs):
return
# Check that rhs do not contain logical operators. We already know
# that lhs is fine since the loop above parses out && and ||.
if rhs.find('&&') > -1 or rhs.find('||') > -1:
return
# At least one of the operands must be a constant literal. This is
# to avoid suggesting replacements for unprintable things like
# CHECK(variable != iterator)
#
# The following pattern matches decimal, hex integers, strings, and
# characters (in that order).
lhs = lhs.strip()
rhs = rhs.strip()
match_constant = r'^([-+]?(\d+|0[xX][0-9a-fA-F]+)[lLuU]{0,3}|".*"|\'.*\')$'
if Match(match_constant, lhs) or Match(match_constant, rhs):
# Note: since we know both lhs and rhs, we can provide a more
# descriptive error message like:
# Consider using CHECK_EQ(x, 42) instead of CHECK(x == 42)
# Instead of:
# Consider using CHECK_EQ instead of CHECK(a == b)
#
# We are still keeping the less descriptive message because if lhs
# or rhs gets long, the error message might become unreadable.
error(filename, linenum, 'readability/check', 2,
'Consider using %s instead of %s(a %s b)' % (
_CHECK_REPLACEMENT[check_macro][operator],
check_macro, operator))
def CheckAltTokens(filename, clean_lines, linenum, error):
"""Check alternative keywords being used in boolean expressions.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Avoid preprocessor lines
if Match(r'^\s*#', line):
return
# Last ditch effort to avoid multi-line comments. This will not help
# if the comment started before the current line or ended after the
# current line, but it catches most of the false positives. At least,
# it provides a way to workaround this warning for people who use
# multi-line comments in preprocessor macros.
#
# TODO(unknown): remove this once cpplint has better support for
# multi-line comments.
if line.find('/*') >= 0 or line.find('*/') >= 0:
return
for match in _ALT_TOKEN_REPLACEMENT_PATTERN.finditer(line):
error(filename, linenum, 'readability/alt_tokens', 2,
'Use operator %s instead of %s' % (
_ALT_TOKEN_REPLACEMENT[match.group(1)], match.group(1)))
def GetLineWidth(line):
"""Determines the width of the line in column positions.
Args:
line: A string, which may be a Unicode string.
Returns:
The width of the line in column positions, accounting for Unicode
combining characters and wide characters.
"""
if isinstance(line, unicode):
width = 0
for uc in unicodedata.normalize('NFC', line):
if unicodedata.east_asian_width(uc) in ('W', 'F'):
width += 2
elif not unicodedata.combining(uc):
width += 1
return width
else:
return len(line)
def CheckStyle(filename, clean_lines, linenum, file_extension, nesting_state,
error):
"""Checks rules from the 'C++ style rules' section of cppguide.html.
Most of these rules are hard to test (naming, comment style), but we
do what we can. In particular we check for 2-space indents, line lengths,
tab usage, spaces inside code, etc.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
file_extension: The extension (without the dot) of the filename.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# Don't use "elided" lines here, otherwise we can't check commented lines.
# Don't want to use "raw" either, because we don't want to check inside C++11
# raw strings,
raw_lines = clean_lines.lines_without_raw_strings
line = raw_lines[linenum]
if line.find('\t') != -1:
error(filename, linenum, 'whitespace/tab', 1,
'Tab found; better to use spaces')
# One or three blank spaces at the beginning of the line is weird; it's
# hard to reconcile that with 2-space indents.
# NOTE: here are the conditions rob pike used for his tests. Mine aren't
# as sophisticated, but it may be worth becoming so: RLENGTH==initial_spaces
# if(RLENGTH > 20) complain = 0;
# if(match($0, " +(error|private|public|protected):")) complain = 0;
# if(match(prev, "&& *$")) complain = 0;
# if(match(prev, "\\|\\| *$")) complain = 0;
# if(match(prev, "[\",=><] *$")) complain = 0;
# if(match($0, " <<")) complain = 0;
# if(match(prev, " +for \\(")) complain = 0;
# if(prevodd && match(prevprev, " +for \\(")) complain = 0;
scope_or_label_pattern = r'\s*\w+\s*:\s*\\?$'
classinfo = nesting_state.InnermostClass()
initial_spaces = 0
cleansed_line = clean_lines.elided[linenum]
while initial_spaces < len(line) and line[initial_spaces] == ' ':
initial_spaces += 1
if line and line[-1].isspace():
error(filename, linenum, 'whitespace/end_of_line', 4,
'Line ends in whitespace. Consider deleting these extra spaces.')
# There are certain situations we allow one space, notably for
# section labels, and also lines containing multi-line raw strings.
elif ((initial_spaces == 1 or initial_spaces == 3) and
not Match(scope_or_label_pattern, cleansed_line) and
not (clean_lines.raw_lines[linenum] != line and
Match(r'^\s*""', line))):
error(filename, linenum, 'whitespace/indent', 3,
'Weird number of spaces at line-start. '
'Are you using a 2-space indent?')
# Check if the line is a header guard.
is_header_guard = False
if file_extension == 'h':
cppvar = GetHeaderGuardCPPVariable(filename)
if (line.startswith('#ifndef %s' % cppvar) or
line.startswith('#define %s' % cppvar) or
line.startswith('#endif // %s' % cppvar)):
is_header_guard = True
# #include lines and header guards can be long, since there's no clean way to
# split them.
#
# URLs can be long too. It's possible to split these, but it makes them
# harder to cut&paste.
#
# The "$Id:...$" comment may also get very long without it being the
# developers fault.
if (not line.startswith('#include') and not is_header_guard and
not Match(r'^\s*//.*http(s?)://\S*$', line) and
not Match(r'^// \$Id:.*#[0-9]+ \$$', line)):
line_width = GetLineWidth(line)
extended_length = int((_line_length * 1.25))
if line_width > extended_length:
error(filename, linenum, 'whitespace/line_length', 4,
'Lines should very rarely be longer than %i characters' %
extended_length)
elif line_width > _line_length:
error(filename, linenum, 'whitespace/line_length', 2,
'Lines should be <= %i characters long' % _line_length)
if (cleansed_line.count(';') > 1 and
# for loops are allowed two ;'s (and may run over two lines).
cleansed_line.find('for') == -1 and
(GetPreviousNonBlankLine(clean_lines, linenum)[0].find('for') == -1 or
GetPreviousNonBlankLine(clean_lines, linenum)[0].find(';') != -1) and
# It's ok to have many commands in a switch case that fits in 1 line
not ((cleansed_line.find('case ') != -1 or
cleansed_line.find('default:') != -1) and
cleansed_line.find('break;') != -1)):
error(filename, linenum, 'whitespace/newline', 0,
'More than one command on the same line')
# Some more style checks
CheckBraces(filename, clean_lines, linenum, error)
CheckTrailingSemicolon(filename, clean_lines, linenum, error)
CheckEmptyBlockBody(filename, clean_lines, linenum, error)
CheckAccess(filename, clean_lines, linenum, nesting_state, error)
CheckSpacing(filename, clean_lines, linenum, nesting_state, error)
CheckOperatorSpacing(filename, clean_lines, linenum, error)
CheckParenthesisSpacing(filename, clean_lines, linenum, error)
CheckCommaSpacing(filename, clean_lines, linenum, error)
CheckBracesSpacing(filename, clean_lines, linenum, error)
CheckSpacingForFunctionCall(filename, clean_lines, linenum, error)
CheckRValueReference(filename, clean_lines, linenum, nesting_state, error)
CheckCheck(filename, clean_lines, linenum, error)
CheckAltTokens(filename, clean_lines, linenum, error)
classinfo = nesting_state.InnermostClass()
if classinfo:
CheckSectionSpacing(filename, clean_lines, classinfo, linenum, error)
_RE_PATTERN_INCLUDE_NEW_STYLE = re.compile(r'#include +"[^/]+\.h"')
_RE_PATTERN_INCLUDE = re.compile(r'^\s*#\s*include\s*([<"])([^>"]*)[>"].*$')
# Matches the first component of a filename delimited by -s and _s. That is:
# _RE_FIRST_COMPONENT.match('foo').group(0) == 'foo'
# _RE_FIRST_COMPONENT.match('foo.cc').group(0) == 'foo'
# _RE_FIRST_COMPONENT.match('foo-bar_baz.cc').group(0) == 'foo'
# _RE_FIRST_COMPONENT.match('foo_bar-baz.cc').group(0) == 'foo'
_RE_FIRST_COMPONENT = re.compile(r'^[^-_.]+')
def _DropCommonSuffixes(filename):
"""Drops common suffixes like _test.cc or -inl.h from filename.
For example:
>>> _DropCommonSuffixes('foo/foo-inl.h')
'foo/foo'
>>> _DropCommonSuffixes('foo/bar/foo.cc')
'foo/bar/foo'
>>> _DropCommonSuffixes('foo/foo_internal.h')
'foo/foo'
>>> _DropCommonSuffixes('foo/foo_unusualinternal.h')
'foo/foo_unusualinternal'
Args:
filename: The input filename.
Returns:
The filename with the common suffix removed.
"""
for suffix in ('test.cc', 'regtest.cc', 'unittest.cc',
'inl.h', 'impl.h', 'internal.h'):
if (filename.endswith(suffix) and len(filename) > len(suffix) and
filename[-len(suffix) - 1] in ('-', '_')):
return filename[:-len(suffix) - 1]
return os.path.splitext(filename)[0]
def _IsTestFilename(filename):
"""Determines if the given filename has a suffix that identifies it as a test.
Args:
filename: The input filename.
Returns:
True if 'filename' looks like a test, False otherwise.
"""
if (filename.endswith('_test.cc') or
filename.endswith('_unittest.cc') or
filename.endswith('_regtest.cc')):
return True
else:
return False
def _ClassifyInclude(fileinfo, include, is_system):
"""Figures out what kind of header 'include' is.
Args:
fileinfo: The current file cpplint is running over. A FileInfo instance.
include: The path to a #included file.
is_system: True if the #include used <> rather than "".
Returns:
One of the _XXX_HEADER constants.
For example:
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'stdio.h', True)
_C_SYS_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'string', True)
_CPP_SYS_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/foo.h', False)
_LIKELY_MY_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo_unknown_extension.cc'),
... 'bar/foo_other_ext.h', False)
_POSSIBLE_MY_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/bar.h', False)
_OTHER_HEADER
"""
# This is a list of all standard c++ header files, except
# those already checked for above.
is_cpp_h = include in _CPP_HEADERS or include.find(".hh") > 0
if is_system:
if is_cpp_h:
return _CPP_SYS_HEADER
else:
return _C_SYS_HEADER
# If the target file and the include we're checking share a
# basename when we drop common extensions, and the include
# lives in . , then it's likely to be owned by the target file.
target_dir, target_base = (
os.path.split(_DropCommonSuffixes(fileinfo.RepositoryName())))
include_dir, include_base = os.path.split(_DropCommonSuffixes(include))
if target_base == include_base and (
include_dir == target_dir or
include_dir == os.path.normpath(target_dir + '/../public')):
return _LIKELY_MY_HEADER
# If the target and include share some initial basename
# component, it's possible the target is implementing the
# include, so it's allowed to be first, but we'll never
# complain if it's not there.
target_first_component = _RE_FIRST_COMPONENT.match(target_base)
include_first_component = _RE_FIRST_COMPONENT.match(include_base)
if (target_first_component and include_first_component and
target_first_component.group(0) ==
include_first_component.group(0)):
return _POSSIBLE_MY_HEADER
return _OTHER_HEADER
def CheckIncludeLine(filename, clean_lines, linenum, include_state, error):
"""Check rules that are applicable to #include lines.
Strings on #include lines are NOT removed from elided line, to make
certain tasks easier. However, to prevent false positives, checks
applicable to #include lines in CheckLanguage must be put here.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
include_state: An _IncludeState instance in which the headers are inserted.
error: The function to call with any errors found.
"""
fileinfo = FileInfo(filename)
line = clean_lines.lines[linenum]
# "include" should use the new style "foo/bar.h" instead of just "bar.h"
if _RE_PATTERN_INCLUDE_NEW_STYLE.search(line):
error(filename, linenum, 'build/include', 4,
'Include the directory when naming .h files')
# we shouldn't include a file more than once. actually, there are a
# handful of instances where doing so is okay, but in general it's
# not.
match = _RE_PATTERN_INCLUDE.search(line)
if match:
include = match.group(2)
is_system = (match.group(1) == '<')
if include in include_state:
error(filename, linenum, 'build/include', 4,
'"%s" already included at %s:%s' %
(include, filename, include_state[include]))
else:
include_state[include] = linenum
# We want to ensure that headers appear in the right order:
# 1) for foo.cc, foo.h (preferred location)
# 2) c system files
# 3) cpp system files
# 4) for foo.cc, foo.h (deprecated location)
# 5) other google headers
#
# We classify each include statement as one of those 5 types
# using a number of techniques. The include_state object keeps
# track of the highest type seen, and complains if we see a
# lower type after that.
error_message = include_state.CheckNextIncludeOrder(
_ClassifyInclude(fileinfo, include, is_system))
if error_message:
error(filename, linenum, 'build/include_order', 4,
'%s. Should be: %s.h, c system, c++ system, other.' %
(error_message, fileinfo.BaseName()))
canonical_include = include_state.CanonicalizeAlphabeticalOrder(include)
if not include_state.IsInAlphabeticalOrder(
clean_lines, linenum, canonical_include):
error(filename, linenum, 'build/include_alpha', 4,
'Include "%s" not in alphabetical order' % include)
include_state.SetLastHeader(canonical_include)
# Look for any of the stream classes that are part of standard C++.
match = _RE_PATTERN_INCLUDE.match(line)
if match:
include = match.group(2)
if Match(r'(f|ind|io|i|o|parse|pf|stdio|str|)?stream$', include):
# Many unit tests use cout, so we exempt them.
if not _IsTestFilename(filename):
# Suggest a different header for ostream
if include == 'ostream':
error(filename, linenum, 'readability/streams', 3,
'For logging, include "base/logging.h" instead of <ostream>.')
# else:
# error(filename, linenum, 'readability/streams', 3,
# 'Streams are highly discouraged.')
def _GetTextInside(text, start_pattern):
r"""Retrieves all the text between matching open and close parentheses.
Given a string of lines and a regular expression string, retrieve all the text
following the expression and between opening punctuation symbols like
(, [, or {, and the matching close-punctuation symbol. This properly nested
occurrences of the punctuations, so for the text like
printf(a(), b(c()));
a call to _GetTextInside(text, r'printf\(') will return 'a(), b(c())'.
start_pattern must match string having an open punctuation symbol at the end.
Args:
text: The lines to extract text. Its comments and strings must be elided.
It can be single line and can span multiple lines.
start_pattern: The regexp string indicating where to start extracting
the text.
Returns:
The extracted text.
None if either the opening string or ending punctuation could not be found.
"""
# TODO(unknown): Audit cpplint.py to see what places could be profitably
# rewritten to use _GetTextInside (and use inferior regexp matching today).
# Give opening punctuations to get the matching close-punctuations.
matching_punctuation = {'(': ')', '{': '}', '[': ']'}
closing_punctuation = set(matching_punctuation.itervalues())
# Find the position to start extracting text.
match = re.search(start_pattern, text, re.M)
if not match: # start_pattern not found in text.
return None
start_position = match.end(0)
assert start_position > 0, (
'start_pattern must ends with an opening punctuation.')
assert text[start_position - 1] in matching_punctuation, (
'start_pattern must ends with an opening punctuation.')
# Stack of closing punctuations we expect to have in text after position.
punctuation_stack = [matching_punctuation[text[start_position - 1]]]
position = start_position
while punctuation_stack and position < len(text):
if text[position] == punctuation_stack[-1]:
punctuation_stack.pop()
elif text[position] in closing_punctuation:
# A closing punctuation without matching opening punctuations.
return None
elif text[position] in matching_punctuation:
punctuation_stack.append(matching_punctuation[text[position]])
position += 1
if punctuation_stack:
# Opening punctuations left without matching close-punctuations.
return None
# punctuations match.
return text[start_position:position - 1]
# Patterns for matching call-by-reference parameters.
#
# Supports nested templates up to 2 levels deep using this messy pattern:
# < (?: < (?: < [^<>]*
# >
# | [^<>] )*
# >
# | [^<>] )*
# >
_RE_PATTERN_IDENT = r'[_a-zA-Z]\w*' # =~ [[:alpha:]][[:alnum:]]*
_RE_PATTERN_TYPE = (
r'(?:const\s+)?(?:typename\s+|class\s+|struct\s+|union\s+|enum\s+)?'
r'(?:\w|'
r'\s*<(?:<(?:<[^<>]*>|[^<>])*>|[^<>])*>|'
r'::)+')
# A call-by-reference parameter ends with '& identifier'.
_RE_PATTERN_REF_PARAM = re.compile(
r'(' + _RE_PATTERN_TYPE + r'(?:\s*(?:\bconst\b|[*]))*\s*'
r'&\s*' + _RE_PATTERN_IDENT + r')\s*(?:=[^,()]+)?[,)]')
# A call-by-const-reference parameter either ends with 'const& identifier'
# or looks like 'const type& identifier' when 'type' is atomic.
_RE_PATTERN_CONST_REF_PARAM = (
r'(?:.*\s*\bconst\s*&\s*' + _RE_PATTERN_IDENT +
r'|const\s+' + _RE_PATTERN_TYPE + r'\s*&\s*' + _RE_PATTERN_IDENT + r')')
def CheckLanguage(filename, clean_lines, linenum, file_extension,
include_state, nesting_state, error):
"""Checks rules from the 'C++ language rules' section of cppguide.html.
Some of these rules are hard to test (function overloading, using
uint32 inappropriately), but we do the best we can.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
file_extension: The extension (without the dot) of the filename.
include_state: An _IncludeState instance in which the headers are inserted.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# If the line is empty or consists of entirely a comment, no need to
# check it.
line = clean_lines.elided[linenum]
if not line:
return
match = _RE_PATTERN_INCLUDE.search(line)
if match:
CheckIncludeLine(filename, clean_lines, linenum, include_state, error)
return
# Reset include state across preprocessor directives. This is meant
# to silence warnings for conditional includes.
if Match(r'^\s*#\s*(?:ifdef|elif|else|endif)\b', line):
include_state.ResetSection()
# Make Windows paths like Unix.
fullname = os.path.abspath(filename).replace('\\', '/')
# Perform other checks now that we are sure that this is not an include line
CheckCasts(filename, clean_lines, linenum, error)
CheckGlobalStatic(filename, clean_lines, linenum, error)
CheckPrintf(filename, clean_lines, linenum, error)
if file_extension == 'h':
# TODO(unknown): check that 1-arg constructors are explicit.
# How to tell it's a constructor?
# (handled in CheckForNonStandardConstructs for now)
# TODO(unknown): check that classes have DISALLOW_EVIL_CONSTRUCTORS
# (level 1 error)
pass
# Check if people are using the verboten C basic types. The only exception
# we regularly allow is "unsigned short port" for port.
if Search(r'\bshort port\b', line):
if not Search(r'\bunsigned short port\b', line):
error(filename, linenum, 'runtime/int', 4,
'Use "unsigned short" for ports, not "short"')
else:
match = Search(r'\b(short|long(?! +double)|long long)\b', line)
if match:
error(filename, linenum, 'runtime/int', 4,
'Use int16/int64/etc, rather than the C type %s' % match.group(1))
# Check if some verboten operator overloading is going on
# TODO(unknown): catch out-of-line unary operator&:
# class X {};
# int operator&(const X& x) { return 42; } // unary operator&
# The trick is it's hard to tell apart from binary operator&:
# class Y { int operator&(const Y& x) { return 23; } }; // binary operator&
if Search(r'\boperator\s*&\s*\(\s*\)', line):
error(filename, linenum, 'runtime/operator', 4,
'Unary operator& is dangerous. Do not use it.')
# Check for suspicious usage of "if" like
# } if (a == b) {
if Search(r'\}\s*if\s*\(', line):
error(filename, linenum, 'readability/braces', 4,
'Did you mean "else if"? If not, start a new line for "if".')
# Check for potential format string bugs like printf(foo).
# We constrain the pattern not to pick things like DocidForPrintf(foo).
# Not perfect but it can catch printf(foo.c_str()) and printf(foo->c_str())
# TODO(unknown): Catch the following case. Need to change the calling
# convention of the whole function to process multiple line to handle it.
# printf(
# boy_this_is_a_really_long_variable_that_cannot_fit_on_the_prev_line);
printf_args = _GetTextInside(line, r'(?i)\b(string)?printf\s*\(')
if printf_args:
match = Match(r'([\w.\->()]+)$', printf_args)
if match and match.group(1) != '__VA_ARGS__':
function_name = re.search(r'\b((?:string)?printf)\s*\(',
line, re.I).group(1)
error(filename, linenum, 'runtime/printf', 4,
'Potential format string bug. Do %s("%%s", %s) instead.'
% (function_name, match.group(1)))
# Check for potential memset bugs like memset(buf, sizeof(buf), 0).
match = Search(r'memset\s*\(([^,]*),\s*([^,]*),\s*0\s*\)', line)
if match and not Match(r"^''|-?[0-9]+|0x[0-9A-Fa-f]$", match.group(2)):
error(filename, linenum, 'runtime/memset', 4,
'Did you mean "memset(%s, 0, %s)"?'
% (match.group(1), match.group(2)))
# if Search(r'\busing namespace\b', line):
# error(filename, linenum, 'build/namespaces', 5,
# 'Do not use namespace using-directives. '
# 'Use using-declarations instead.')
# Detect variable-length arrays.
match = Match(r'\s*(.+::)?(\w+) [a-z]\w*\[(.+)];', line)
if (match and match.group(2) != 'return' and match.group(2) != 'delete' and
match.group(3).find(']') == -1):
# Split the size using space and arithmetic operators as delimiters.
# If any of the resulting tokens are not compile time constants then
# report the error.
tokens = re.split(r'\s|\+|\-|\*|\/|<<|>>]', match.group(3))
is_const = True
skip_next = False
for tok in tokens:
if skip_next:
skip_next = False
continue
if Search(r'sizeof\(.+\)', tok): continue
if Search(r'arraysize\(\w+\)', tok): continue
tok = tok.lstrip('(')
tok = tok.rstrip(')')
if not tok: continue
if Match(r'\d+', tok): continue
if Match(r'0[xX][0-9a-fA-F]+', tok): continue
if Match(r'k[A-Z0-9]\w*', tok): continue
if Match(r'(.+::)?k[A-Z0-9]\w*', tok): continue
if Match(r'(.+::)?[A-Z][A-Z0-9_]*', tok): continue
# A catch all for tricky sizeof cases, including 'sizeof expression',
# 'sizeof(*type)', 'sizeof(const type)', 'sizeof(struct StructName)'
# requires skipping the next token because we split on ' ' and '*'.
if tok.startswith('sizeof') or tok.startswith('this'):
skip_next = True
continue
is_const = False
break
if not is_const:
error(filename, linenum, 'runtime/arrays', 1,
'Do not use variable-length arrays. Use an appropriately named '
"('k' followed by CamelCase) compile-time constant for the size.")
# If DISALLOW_EVIL_CONSTRUCTORS, DISALLOW_COPY_AND_ASSIGN, or
# DISALLOW_IMPLICIT_CONSTRUCTORS is present, then it should be the last thing
# in the class declaration.
match = Match(
(r'\s*'
r'(DISALLOW_(EVIL_CONSTRUCTORS|COPY_AND_ASSIGN|IMPLICIT_CONSTRUCTORS))'
r'\(.*\);$'),
line)
if match and linenum + 1 < clean_lines.NumLines():
next_line = clean_lines.elided[linenum + 1]
# We allow some, but not all, declarations of variables to be present
# in the statement that defines the class. The [\w\*,\s]* fragment of
# the regular expression below allows users to declare instances of
# the class or pointers to instances, but not less common types such
# as function pointers or arrays. It's a tradeoff between allowing
# reasonable code and avoiding trying to parse more C++ using regexps.
if not Search(r'^\s*}[\w\*,\s]*;', next_line):
error(filename, linenum, 'readability/constructors', 3,
match.group(1) + ' should be the last thing in the class')
# Check for use of unnamed namespaces in header files. Registration
# macros are typically OK, so we allow use of "namespace {" on lines
# that end with backslashes.
if (file_extension == 'h'
and Search(r'\bnamespace\s*{', line)
and line[-1] != '\\'):
error(filename, linenum, 'build/namespaces', 4,
'Do not use unnamed namespaces in header files. See '
'http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Namespaces'
' for more information.')
def CheckGlobalStatic(filename, clean_lines, linenum, error):
"""Check for unsafe global or static objects.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Check for people declaring static/global STL strings at the top level.
# This is dangerous because the C++ language does not guarantee that
# globals with constructors are initialized before the first access.
match = Match(
r'((?:|static +)(?:|const +))string +([a-zA-Z0-9_:]+)\b(.*)',
line)
# Remove false positives:
# - String pointers (as opposed to values).
# string *pointer
# const string *pointer
# string const *pointer
# string *const pointer
#
# - Functions and template specializations.
# string Function<Type>(...
# string Class<Type>::Method(...
#
# - Operators. These are matched separately because operator names
# cross non-word boundaries, and trying to match both operators
# and functions at the same time would decrease accuracy of
# matching identifiers.
# string Class::operator*()
if (match and
not Search(r'\bstring\b(\s+const)?\s*\*\s*(const\s+)?\w', line) and
not Search(r'\boperator\W', line) and
not Match(r'\s*(<.*>)?(::[a-zA-Z0-9_]+)?\s*\(([^"]|$)', match.group(3))):
error(filename, linenum, 'runtime/string', 4,
'For a static/global string constant, use a C style string instead: '
'"%schar %s[]".' %
(match.group(1), match.group(2)))
if Search(r'\b([A-Za-z0-9_]*_)\(\1\)', line):
error(filename, linenum, 'runtime/init', 4,
'You seem to be initializing a member variable with itself.')
def CheckPrintf(filename, clean_lines, linenum, error):
"""Check for printf related issues.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# When snprintf is used, the second argument shouldn't be a literal.
match = Search(r'snprintf\s*\(([^,]*),\s*([0-9]*)\s*,', line)
if match and match.group(2) != '0':
# If 2nd arg is zero, snprintf is used to calculate size.
error(filename, linenum, 'runtime/printf', 3,
'If you can, use sizeof(%s) instead of %s as the 2nd arg '
'to snprintf.' % (match.group(1), match.group(2)))
# Check if some verboten C functions are being used.
if Search(r'\bsprintf\b', line):
error(filename, linenum, 'runtime/printf', 5,
'Never use sprintf. Use snprintf instead.')
match = Search(r'\b(strcpy|strcat)\b', line)
if match:
error(filename, linenum, 'runtime/printf', 4,
'Almost always, snprintf is better than %s' % match.group(1))
def IsDerivedFunction(clean_lines, linenum):
"""Check if current line contains an inherited function.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
Returns:
True if current line contains a function with "override"
virt-specifier.
"""
# Look for leftmost opening parenthesis on current line
opening_paren = clean_lines.elided[linenum].find('(')
if opening_paren < 0: return False
# Look for "override" after the matching closing parenthesis
line, _, closing_paren = CloseExpression(clean_lines, linenum, opening_paren)
return closing_paren >= 0 and Search(r'\boverride\b', line[closing_paren:])
def IsInitializerList(clean_lines, linenum):
"""Check if current line is inside constructor initializer list.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
Returns:
True if current line appears to be inside constructor initializer
list, False otherwise.
"""
for i in xrange(linenum, 1, -1):
line = clean_lines.elided[i]
if i == linenum:
remove_function_body = Match(r'^(.*)\{\s*$', line)
if remove_function_body:
line = remove_function_body.group(1)
if Search(r'\s:\s*\w+[({]', line):
# A lone colon tend to indicate the start of a constructor
# initializer list. It could also be a ternary operator, which
# also tend to appear in constructor initializer lists as
# opposed to parameter lists.
return True
if Search(r'\}\s*,\s*$', line):
# A closing brace followed by a comma is probably the end of a
# brace-initialized member in constructor initializer list.
return True
if Search(r'[{};]\s*$', line):
# Found one of the following:
# - A closing brace or semicolon, probably the end of the previous
# function.
# - An opening brace, probably the start of current class or namespace.
#
# Current line is probably not inside an initializer list since
# we saw one of those things without seeing the starting colon.
return False
# Got to the beginning of the file without seeing the start of
# constructor initializer list.
return False
def CheckForNonConstReference(filename, clean_lines, linenum,
nesting_state, error):
"""Check for non-const references.
Separate from CheckLanguage since it scans backwards from current
line, instead of scanning forward.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: The function to call with any errors found.
"""
# Do nothing if there is no '&' on current line.
line = clean_lines.elided[linenum]
if '&' not in line:
return
# If a function is inherited, current function doesn't have much of
# a choice, so any non-const references should not be blamed on
# derived function.
if IsDerivedFunction(clean_lines, linenum):
return
# Long type names may be broken across multiple lines, usually in one
# of these forms:
# LongType
# ::LongTypeContinued &identifier
# LongType::
# LongTypeContinued &identifier
# LongType<
# ...>::LongTypeContinued &identifier
#
# If we detected a type split across two lines, join the previous
# line to current line so that we can match const references
# accordingly.
#
# Note that this only scans back one line, since scanning back
# arbitrary number of lines would be expensive. If you have a type
# that spans more than 2 lines, please use a typedef.
if linenum > 1:
previous = None
if Match(r'\s*::(?:[\w<>]|::)+\s*&\s*\S', line):
# previous_line\n + ::current_line
previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+[\w<>])\s*$',
clean_lines.elided[linenum - 1])
elif Match(r'\s*[a-zA-Z_]([\w<>]|::)+\s*&\s*\S', line):
# previous_line::\n + current_line
previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+::)\s*$',
clean_lines.elided[linenum - 1])
if previous:
line = previous.group(1) + line.lstrip()
else:
# Check for templated parameter that is split across multiple lines
endpos = line.rfind('>')
if endpos > -1:
(_, startline, startpos) = ReverseCloseExpression(
clean_lines, linenum, endpos)
if startpos > -1 and startline < linenum:
# Found the matching < on an earlier line, collect all
# pieces up to current line.
line = ''
for i in xrange(startline, linenum + 1):
line += clean_lines.elided[i].strip()
# Check for non-const references in function parameters. A single '&' may
# found in the following places:
# inside expression: binary & for bitwise AND
# inside expression: unary & for taking the address of something
# inside declarators: reference parameter
# We will exclude the first two cases by checking that we are not inside a
# function body, including one that was just introduced by a trailing '{'.
# TODO(unknown): Doesn't account for 'catch(Exception& e)' [rare].
if (nesting_state.previous_stack_top and
not (isinstance(nesting_state.previous_stack_top, _ClassInfo) or
isinstance(nesting_state.previous_stack_top, _NamespaceInfo))):
# Not at toplevel, not within a class, and not within a namespace
return
# Avoid preprocessors
if Search(r'\\\s*$', line):
return
# Avoid constructor initializer lists
if IsInitializerList(clean_lines, linenum):
return
# We allow non-const references in a few standard places, like functions
# called "swap()" or iostream operators like "<<" or ">>". Do not check
# those function parameters.
#
# We also accept & in static_assert, which looks like a function but
# it's actually a declaration expression.
whitelisted_functions = (r'(?:[sS]wap(?:<\w:+>)?|'
r'operator\s*[<>][<>]|'
r'static_assert|COMPILE_ASSERT'
r')\s*\(')
if Search(whitelisted_functions, line):
return
elif not Search(r'\S+\([^)]*$', line):
# Don't see a whitelisted function on this line. Actually we
# didn't see any function name on this line, so this is likely a
# multi-line parameter list. Try a bit harder to catch this case.
for i in xrange(2):
if (linenum > i and
Search(whitelisted_functions, clean_lines.elided[linenum - i - 1])):
return
decls = ReplaceAll(r'{[^}]*}', ' ', line) # exclude function body
# for parameter in re.findall(_RE_PATTERN_REF_PARAM, decls):
# if not Match(_RE_PATTERN_CONST_REF_PARAM, parameter):
# error(filename, linenum, 'runtime/references', 2,
# 'Is this a non-const reference? '
# 'If so, make const or use a pointer: ' +
# ReplaceAll(' *<', '<', parameter))
def CheckCasts(filename, clean_lines, linenum, error):
"""Various cast related checks.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Check to see if they're using an conversion function cast.
# I just try to capture the most common basic types, though there are more.
# Parameterless conversion functions, such as bool(), are allowed as they are
# probably a member operator declaration or default constructor.
match = Search(
r'(\bnew\s+|\S<\s*(?:const\s+)?)?\b'
r'(int|float|double|bool|char|int32|uint32|int64|uint64)'
r'(\([^)].*)', line)
expecting_function = ExpectingFunctionArgs(clean_lines, linenum)
if match and not expecting_function:
matched_type = match.group(2)
# matched_new_or_template is used to silence two false positives:
# - New operators
# - Template arguments with function types
#
# For template arguments, we match on types immediately following
# an opening bracket without any spaces. This is a fast way to
# silence the common case where the function type is the first
# template argument. False negative with less-than comparison is
# avoided because those operators are usually followed by a space.
#
# function<double(double)> // bracket + no space = false positive
# value < double(42) // bracket + space = true positive
matched_new_or_template = match.group(1)
# Other things to ignore:
# - Function pointers
# - Casts to pointer types
# - Placement new
# - Alias declarations
matched_funcptr = match.group(3)
if (matched_new_or_template is None and
not (matched_funcptr and
(Match(r'\((?:[^() ]+::\s*\*\s*)?[^() ]+\)\s*\(',
matched_funcptr) or
matched_funcptr.startswith('(*)'))) and
not Match(r'\s*using\s+\S+\s*=\s*' + matched_type, line) and
not Search(r'new\(\S+\)\s*' + matched_type, line)):
error(filename, linenum, 'readability/casting', 4,
'Using deprecated casting style. '
'Use static_cast<%s>(...) instead' %
matched_type)
if not expecting_function:
CheckCStyleCast(filename, linenum, line, clean_lines.raw_lines[linenum],
'static_cast',
r'\((int|float|double|bool|char|u?int(16|32|64))\)', error)
# This doesn't catch all cases. Consider (const char * const)"hello".
#
# (char *) "foo" should always be a const_cast (reinterpret_cast won't
# compile).
if CheckCStyleCast(filename, linenum, line, clean_lines.raw_lines[linenum],
'const_cast', r'\((char\s?\*+\s?)\)\s*"', error):
pass
else:
# Check pointer casts for other than string constants
CheckCStyleCast(filename, linenum, line, clean_lines.raw_lines[linenum],
'reinterpret_cast', r'\((\w+\s?\*+\s?)\)', error)
# In addition, we look for people taking the address of a cast. This
# is dangerous -- casts can assign to temporaries, so the pointer doesn't
# point where you think.
match = Search(
r'(?:&\(([^)]+)\)[\w(])|'
r'(?:&(static|dynamic|down|reinterpret)_cast\b)', line)
if match and match.group(1) != '*':
# Try a better error message when the & is bound to something
# dereferenced by the casted pointer, as opposed to the casted
# pointer itself.
parenthesis_error = False
match = Match(r'^(.*&(?:static|dynamic|down|reinterpret)_cast\b)<', line)
if match:
_, y1, x1 = CloseExpression(clean_lines, linenum, len(match.group(1)))
if x1 >= 0 and clean_lines.elided[y1][x1] == '(':
_, y2, x2 = CloseExpression(clean_lines, y1, x1)
if x2 >= 0:
extended_line = clean_lines.elided[y2][x2:]
if y2 < clean_lines.NumLines() - 1:
extended_line += clean_lines.elided[y2 + 1]
if Match(r'\s*(?:->|\[)', extended_line):
parenthesis_error = True
if parenthesis_error:
error(filename, linenum, 'readability/casting', 4,
('Are you taking an address of something dereferenced '
'from a cast? Wrapping the dereferenced expression in '
'parentheses will make the binding more obvious'))
else:
error(filename, linenum, 'runtime/casting', 4,
('Are you taking an address of a cast? '
'This is dangerous: could be a temp var. '
'Take the address before doing the cast, rather than after'))
def CheckCStyleCast(filename, linenum, line, raw_line, cast_type, pattern,
error):
"""Checks for a C-style cast by looking for the pattern.
Args:
filename: The name of the current file.
linenum: The number of the line to check.
line: The line of code to check.
raw_line: The raw line of code to check, with comments.
cast_type: The string for the C++ cast to recommend. This is either
reinterpret_cast, static_cast, or const_cast, depending.
pattern: The regular expression used to find C-style casts.
error: The function to call with any errors found.
Returns:
True if an error was emitted.
False otherwise.
"""
match = Search(pattern, line)
if not match:
return False
# Exclude lines with keywords that tend to look like casts, and also
# macros which are generally troublesome.
if Match(r'.*\b(?:sizeof|alignof|alignas|[A-Z_]+)\s*$',
line[0:match.start(1) - 1]):
return False
# operator++(int) and operator--(int)
if (line[0:match.start(1) - 1].endswith(' operator++') or
line[0:match.start(1) - 1].endswith(' operator--')):
return False
# A single unnamed argument for a function tends to look like old
# style cast. If we see those, don't issue warnings for deprecated
# casts, instead issue warnings for unnamed arguments where
# appropriate.
#
# These are things that we want warnings for, since the style guide
# explicitly require all parameters to be named:
# Function(int);
# Function(int) {
# ConstMember(int) const;
# ConstMember(int) const {
# ExceptionMember(int) throw (...);
# ExceptionMember(int) throw (...) {
# PureVirtual(int) = 0;
#
# These are functions of some sort, where the compiler would be fine
# if they had named parameters, but people often omit those
# identifiers to reduce clutter:
# (FunctionPointer)(int);
# (FunctionPointer)(int) = value;
# Function((function_pointer_arg)(int))
# <TemplateArgument(int)>;
# <(FunctionPointerTemplateArgument)(int)>;
remainder = line[match.end(0):]
if Match(r'^\s*(?:;|const\b|throw\b|final\b|override\b|=|>|\{|\))',
remainder):
# Looks like an unnamed parameter.
# Don't warn on any kind of template arguments.
if Match(r'^\s*>', remainder):
return False
# Don't warn on assignments to function pointers, but keep warnings for
# unnamed parameters to pure virtual functions. Note that this pattern
# will also pass on assignments of "0" to function pointers, but the
# preferred values for those would be "nullptr" or "NULL".
matched_zero = Match(r'^\s=\s*(\S+)\s*;', remainder)
if matched_zero and matched_zero.group(1) != '0':
return False
# Don't warn on function pointer declarations. For this we need
# to check what came before the "(type)" string.
if Match(r'.*\)\s*$', line[0:match.start(0)]):
return False
# Don't warn if the parameter is named with block comments, e.g.:
# Function(int /*unused_param*/);
if '/*' in raw_line:
return False
# Passed all filters, issue warning here.
error(filename, linenum, 'readability/function', 3,
'All parameters should be named in a function')
return True
# At this point, all that should be left is actual casts.
error(filename, linenum, 'readability/casting', 4,
'Using C-style cast. Use %s<%s>(...) instead' %
(cast_type, match.group(1)))
return True
def ExpectingFunctionArgs(clean_lines, linenum):
"""Checks whether where function type arguments are expected.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
Returns:
True if the line at 'linenum' is inside something that expects arguments
of function types.
"""
line = clean_lines.elided[linenum]
return (Match(r'^\s*MOCK_(CONST_)?METHOD\d+(_T)?\(', line) or
(linenum >= 2 and
(Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\((?:\S+,)?\s*$',
clean_lines.elided[linenum - 1]) or
Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\(\s*$',
clean_lines.elided[linenum - 2]) or
Search(r'\bstd::m?function\s*\<\s*$',
clean_lines.elided[linenum - 1]))))
_HEADERS_CONTAINING_TEMPLATES = (
('<deque>', ('deque',)),
('<functional>', ('unary_function', 'binary_function',
'plus', 'minus', 'multiplies', 'divides', 'modulus',
'negate',
'equal_to', 'not_equal_to', 'greater', 'less',
'greater_equal', 'less_equal',
'logical_and', 'logical_or', 'logical_not',
'unary_negate', 'not1', 'binary_negate', 'not2',
'bind1st', 'bind2nd',
'pointer_to_unary_function',
'pointer_to_binary_function',
'ptr_fun',
'mem_fun_t', 'mem_fun', 'mem_fun1_t', 'mem_fun1_ref_t',
'mem_fun_ref_t',
'const_mem_fun_t', 'const_mem_fun1_t',
'const_mem_fun_ref_t', 'const_mem_fun1_ref_t',
'mem_fun_ref',
)),
('<limits>', ('numeric_limits',)),
('<list>', ('list',)),
('<map>', ('map', 'multimap',)),
('<memory>', ('allocator',)),
('<queue>', ('queue', 'priority_queue',)),
('<set>', ('set', 'multiset',)),
('<stack>', ('stack',)),
('<string>', ('char_traits', 'basic_string',)),
('<utility>', ('pair',)),
('<vector>', ('vector',)),
# gcc extensions.
# Note: std::hash is their hash, ::hash is our hash
('<hash_map>', ('hash_map', 'hash_multimap',)),
('<hash_set>', ('hash_set', 'hash_multiset',)),
('<slist>', ('slist',)),
)
_RE_PATTERN_STRING = re.compile(r'\bstring\b')
_re_pattern_algorithm_header = []
for _template in ('copy', 'max', 'min', 'min_element', 'sort', 'swap',
'transform'):
# Match max<type>(..., ...), max(..., ...), but not foo->max, foo.max or
# type::max().
_re_pattern_algorithm_header.append(
(re.compile(r'[^>.]\b' + _template + r'(<.*?>)?\([^\)]'),
_template,
'<algorithm>'))
_re_pattern_templates = []
for _header, _templates in _HEADERS_CONTAINING_TEMPLATES:
for _template in _templates:
_re_pattern_templates.append(
(re.compile(r'(\<|\b)' + _template + r'\s*\<'),
_template + '<>',
_header))
def FilesBelongToSameModule(filename_cc, filename_h):
"""Check if these two filenames belong to the same module.
The concept of a 'module' here is a as follows:
foo.h, foo-inl.h, foo.cc, foo_test.cc and foo_unittest.cc belong to the
same 'module' if they are in the same directory.
some/path/public/xyzzy and some/path/internal/xyzzy are also considered
to belong to the same module here.
If the filename_cc contains a longer path than the filename_h, for example,
'/absolute/path/to/base/sysinfo.cc', and this file would include
'base/sysinfo.h', this function also produces the prefix needed to open the
header. This is used by the caller of this function to more robustly open the
header file. We don't have access to the real include paths in this context,
so we need this guesswork here.
Known bugs: tools/base/bar.cc and base/bar.h belong to the same module
according to this implementation. Because of this, this function gives
some false positives. This should be sufficiently rare in practice.
Args:
filename_cc: is the path for the .cc file
filename_h: is the path for the header path
Returns:
Tuple with a bool and a string:
bool: True if filename_cc and filename_h belong to the same module.
string: the additional prefix needed to open the header file.
"""
if not filename_cc.endswith('.cc'):
return (False, '')
filename_cc = filename_cc[:-len('.cc')]
if filename_cc.endswith('_unittest'):
filename_cc = filename_cc[:-len('_unittest')]
elif filename_cc.endswith('_test'):
filename_cc = filename_cc[:-len('_test')]
filename_cc = filename_cc.replace('/public/', '/')
filename_cc = filename_cc.replace('/internal/', '/')
if not filename_h.endswith('.h'):
return (False, '')
filename_h = filename_h[:-len('.h')]
if filename_h.endswith('-inl'):
filename_h = filename_h[:-len('-inl')]
filename_h = filename_h.replace('/public/', '/')
filename_h = filename_h.replace('/internal/', '/')
files_belong_to_same_module = filename_cc.endswith(filename_h)
common_path = ''
if files_belong_to_same_module:
common_path = filename_cc[:-len(filename_h)]
return files_belong_to_same_module, common_path
def UpdateIncludeState(filename, include_state, io=codecs):
"""Fill up the include_state with new includes found from the file.
Args:
filename: the name of the header to read.
include_state: an _IncludeState instance in which the headers are inserted.
io: The io factory to use to read the file. Provided for testability.
Returns:
True if a header was succesfully added. False otherwise.
"""
headerfile = None
try:
headerfile = io.open(filename, 'r', 'utf8', 'replace')
except IOError:
return False
linenum = 0
for line in headerfile:
linenum += 1
clean_line = CleanseComments(line)
match = _RE_PATTERN_INCLUDE.search(clean_line)
if match:
include = match.group(2)
# The value formatting is cute, but not really used right now.
# What matters here is that the key is in include_state.
include_state.setdefault(include, '%s:%d' % (filename, linenum))
return True
def CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error,
io=codecs):
"""Reports for missing stl includes.
This function will output warnings to make sure you are including the headers
necessary for the stl containers and functions that you use. We only give one
reason to include a header. For example, if you use both equal_to<> and
less<> in a .h file, only one (the latter in the file) of these will be
reported as a reason to include the <functional>.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
include_state: An _IncludeState instance.
error: The function to call with any errors found.
io: The IO factory to use to read the header file. Provided for unittest
injection.
"""
required = {} # A map of header name to linenumber and the template entity.
# Example of required: { '<functional>': (1219, 'less<>') }
for linenum in xrange(clean_lines.NumLines()):
line = clean_lines.elided[linenum]
if not line or line[0] == '#':
continue
# String is special -- it is a non-templatized type in STL.
matched = _RE_PATTERN_STRING.search(line)
if matched:
# Don't warn about strings in non-STL namespaces:
# (We check only the first match per line; good enough.)
prefix = line[:matched.start()]
if prefix.endswith('std::') or not prefix.endswith('::'):
required['<string>'] = (linenum, 'string')
for pattern, template, header in _re_pattern_algorithm_header:
if pattern.search(line):
required[header] = (linenum, template)
# The following function is just a speed up, no semantics are changed.
if not '<' in line: # Reduces the cpu time usage by skipping lines.
continue
for pattern, template, header in _re_pattern_templates:
if pattern.search(line):
required[header] = (linenum, template)
# The policy is that if you #include something in foo.h you don't need to
# include it again in foo.cc. Here, we will look at possible includes.
# Let's copy the include_state so it is only messed up within this function.
include_state = include_state.copy()
# Did we find the header for this file (if any) and succesfully load it?
header_found = False
# Use the absolute path so that matching works properly.
abs_filename = FileInfo(filename).FullName()
# For Emacs's flymake.
# If cpplint is invoked from Emacs's flymake, a temporary file is generated
# by flymake and that file name might end with '_flymake.cc'. In that case,
# restore original file name here so that the corresponding header file can be
# found.
# e.g. If the file name is 'foo_flymake.cc', we should search for 'foo.h'
# instead of 'foo_flymake.h'
abs_filename = re.sub(r'_flymake\.cc$', '.cc', abs_filename)
# include_state is modified during iteration, so we iterate over a copy of
# the keys.
header_keys = include_state.keys()
for header in header_keys:
(same_module, common_path) = FilesBelongToSameModule(abs_filename, header)
fullpath = common_path + header
if same_module and UpdateIncludeState(fullpath, include_state, io):
header_found = True
# If we can't find the header file for a .cc, assume it's because we don't
# know where to look. In that case we'll give up as we're not sure they
# didn't include it in the .h file.
# TODO(unknown): Do a better job of finding .h files so we are confident that
# not having the .h file means there isn't one.
if filename.endswith('.cc') and not header_found:
return
# All the lines have been processed, report the errors found.
for required_header_unstripped in required:
template = required[required_header_unstripped][1]
if required_header_unstripped.strip('<>"') not in include_state:
error(filename, required[required_header_unstripped][0],
'build/include_what_you_use', 4,
'Add #include ' + required_header_unstripped + ' for ' + template)
_RE_PATTERN_EXPLICIT_MAKEPAIR = re.compile(r'\bmake_pair\s*<')
def CheckMakePairUsesDeduction(filename, clean_lines, linenum, error):
"""Check that make_pair's template arguments are deduced.
G++ 4.6 in C++11 mode fails badly if make_pair's template arguments are
specified explicitly, and such use isn't intended in any case.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
match = _RE_PATTERN_EXPLICIT_MAKEPAIR.search(line)
if match:
error(filename, linenum, 'build/explicit_make_pair',
4, # 4 = high confidence
'For C++11-compatibility, omit template arguments from make_pair'
' OR use pair directly OR if appropriate, construct a pair directly')
def CheckDefaultLambdaCaptures(filename, clean_lines, linenum, error):
"""Check that default lambda captures are not used.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# A lambda introducer specifies a default capture if it starts with "[="
# or if it starts with "[&" _not_ followed by an identifier.
match = Match(r'^(.*)\[\s*(?:=|&[^\w])', line)
if match:
# Found a potential error, check what comes after the lambda-introducer.
# If it's not open parenthesis (for lambda-declarator) or open brace
# (for compound-statement), it's not a lambda.
line, _, pos = CloseExpression(clean_lines, linenum, len(match.group(1)))
if pos >= 0 and Match(r'^\s*[{(]', line[pos:]):
error(filename, linenum, 'build/c++11',
4, # 4 = high confidence
'Default lambda captures are an unapproved C++ feature.')
def ProcessLine(filename, file_extension, clean_lines, line,
include_state, function_state, nesting_state, error,
extra_check_functions=[]):
"""Processes a single line in the file.
Args:
filename: Filename of the file that is being processed.
file_extension: The extension (dot not included) of the file.
clean_lines: An array of strings, each representing a line of the file,
with comments stripped.
line: Number of line being processed.
include_state: An _IncludeState instance in which the headers are inserted.
function_state: A _FunctionState instance which counts function lines, etc.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error
"""
raw_lines = clean_lines.raw_lines
ParseNolintSuppressions(filename, raw_lines[line], line, error)
nesting_state.Update(filename, clean_lines, line, error)
if nesting_state.InAsmBlock(): return
CheckForFunctionLengths(filename, clean_lines, line, function_state, error)
CheckForMultilineCommentsAndStrings(filename, clean_lines, line, error)
CheckStyle(filename, clean_lines, line, file_extension, nesting_state, error)
CheckLanguage(filename, clean_lines, line, file_extension, include_state,
nesting_state, error)
CheckForNonConstReference(filename, clean_lines, line, nesting_state, error)
CheckForNonStandardConstructs(filename, clean_lines, line,
nesting_state, error)
CheckVlogArguments(filename, clean_lines, line, error)
CheckPosixThreading(filename, clean_lines, line, error)
CheckInvalidIncrement(filename, clean_lines, line, error)
CheckMakePairUsesDeduction(filename, clean_lines, line, error)
# CheckDefaultLambdaCaptures(filename, clean_lines, line, error)
for check_fn in extra_check_functions:
check_fn(filename, clean_lines, line, error)
def FlagCxx11Features(filename, clean_lines, linenum, error):
"""Flag those c++11 features that we only allow in certain places.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Flag unapproved C++11 headers.
include = Match(r'\s*#\s*include\s+[<"]([^<"]+)[">]', line)
# if include and include.group(1) in ('cfenv',
# 'condition_variable',
# 'fenv.h',
# 'ratio',
# 'regex',
# 'system_error',
# ):
# error(filename, linenum, 'build/c++11', 5,
# ('<%s> is an unapproved C++11 header.') % include.group(1))
# The only place where we need to worry about C++11 keywords and library
# features in preprocessor directives is in macro definitions.
if Match(r'\s*#', line) and not Match(r'\s*#\s*define\b', line): return
# These are classes and free functions. The classes are always
# mentioned as std::*, but we only catch the free functions if
# they're not found by ADL. They're alphabetical by header.
for top_name in (
# type_traits
'alignment_of',
'aligned_union',
# utility
'forward',
):
if Search(r'\bstd::%s\b' % top_name, line):
error(filename, linenum, 'build/c++11', 5,
('std::%s is an unapproved C++11 class or function. Send c-style '
'an example of where it would make your code more readable, and '
'they may let you use it.') % top_name)
def ProcessFileData(filename, file_extension, lines, error,
extra_check_functions=[]):
"""Performs lint checks and reports any errors to the given error function.
Args:
filename: Filename of the file that is being processed.
file_extension: The extension (dot not included) of the file.
lines: An array of strings, each representing a line of the file, with the
last element being empty if the file is terminated with a newline.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error
"""
lines = (['// marker so line numbers and indices both start at 1'] + lines +
['// marker so line numbers end in a known way'])
include_state = _IncludeState()
function_state = _FunctionState()
nesting_state = NestingState()
ResetNolintSuppressions()
CheckForCopyright(filename, lines, error)
if file_extension == 'h':
CheckForHeaderGuard(filename, lines, error)
RemoveMultiLineComments(filename, lines, error)
clean_lines = CleansedLines(lines)
for line in xrange(clean_lines.NumLines()):
ProcessLine(filename, file_extension, clean_lines, line,
include_state, function_state, nesting_state, error,
extra_check_functions)
FlagCxx11Features(filename, clean_lines, line, error)
nesting_state.CheckCompletedBlocks(filename, error)
CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error)
# We check here rather than inside ProcessLine so that we see raw
# lines rather than "cleaned" lines.
CheckForBadCharacters(filename, lines, error)
CheckForNewlineAtEOF(filename, lines, error)
def ProcessFile(filename, vlevel, extra_check_functions=[]):
"""Does google-lint on a single file.
Args:
filename: The name of the file to parse.
vlevel: The level of errors to report. Every error of confidence
>= verbose_level will be reported. 0 is a good default.
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error
"""
_SetVerboseLevel(vlevel)
lf_lines = []
crlf_lines = []
try:
# Support the UNIX convention of using "-" for stdin. Note that
# we are not opening the file with universal newline support
# (which codecs doesn't support anyway), so the resulting lines do
# contain trailing '\r' characters if we are reading a file that
# has CRLF endings.
# If after the split a trailing '\r' is present, it is removed
# below.
if filename == '-':
lines = codecs.StreamReaderWriter(sys.stdin,
codecs.getreader('utf8'),
codecs.getwriter('utf8'),
'replace').read().split('\n')
else:
lines = codecs.open(filename, 'r', 'utf8', 'replace').read().split('\n')
# Remove trailing '\r'.
# The -1 accounts for the extra trailing blank line we get from split()
for linenum in range(len(lines) - 1):
if lines[linenum].endswith('\r'):
lines[linenum] = lines[linenum].rstrip('\r')
crlf_lines.append(linenum + 1)
else:
lf_lines.append(linenum + 1)
except IOError:
sys.stderr.write(
"Skipping input '%s': Can't open for reading\n" % filename)
return
# Note, if no dot is found, this will give the entire filename as the ext.
file_extension = filename[filename.rfind('.') + 1:]
# When reading from stdin, the extension is unknown, so no cpplint tests
# should rely on the extension.
if filename != '-' and file_extension not in _valid_extensions:
sys.stderr.write('Ignoring %s; not a valid file name '
'(%s)\n' % (filename, ', '.join(_valid_extensions)))
else:
ProcessFileData(filename, file_extension, lines, Error,
extra_check_functions)
# If end-of-line sequences are a mix of LF and CR-LF, issue
# warnings on the lines with CR.
#
# Don't issue any warnings if all lines are uniformly LF or CR-LF,
# since critique can handle these just fine, and the style guide
# doesn't dictate a particular end of line sequence.
#
# We can't depend on os.linesep to determine what the desired
# end-of-line sequence should be, since that will return the
# server-side end-of-line sequence.
if lf_lines and crlf_lines:
# Warn on every line with CR. An alternative approach might be to
# check whether the file is mostly CRLF or just LF, and warn on the
# minority, we bias toward LF here since most tools prefer LF.
for linenum in crlf_lines:
Error(filename, linenum, 'whitespace/newline', 1,
'Unexpected \\r (^M) found; better to use only \\n')
# sys.stderr.write('Done processing %s\n' % filename)
def PrintUsage(message):
"""Prints a brief usage string and exits, optionally with an error message.
Args:
message: The optional error message.
"""
sys.stderr.write(_USAGE)
if message:
sys.exit('\nFATAL ERROR: ' + message)
else:
sys.exit(1)
def PrintCategories():
"""Prints a list of all the error-categories used by error messages.
These are the categories used to filter messages via --filter.
"""
sys.stderr.write(''.join(' %s\n' % cat for cat in _ERROR_CATEGORIES))
sys.exit(0)
def ParseArguments(args):
"""Parses the command line arguments.
This may set the output format and verbosity level as side-effects.
Args:
args: The command line arguments:
Returns:
The list of filenames to lint.
"""
try:
(opts, filenames) = getopt.getopt(args, '', ['help', 'output=', 'verbose=',
'counting=',
'filter=',
'root=',
'linelength=',
'extensions='])
except getopt.GetoptError:
PrintUsage('Invalid arguments.')
verbosity = _VerboseLevel()
output_format = _OutputFormat()
filters = ''
counting_style = ''
for (opt, val) in opts:
if opt == '--help':
PrintUsage(None)
elif opt == '--output':
if val not in ('emacs', 'vs7', 'eclipse'):
PrintUsage('The only allowed output formats are emacs, vs7 and eclipse.')
output_format = val
elif opt == '--verbose':
verbosity = int(val)
elif opt == '--filter':
filters = val
if not filters:
PrintCategories()
elif opt == '--counting':
if val not in ('total', 'toplevel', 'detailed'):
PrintUsage('Valid counting options are total, toplevel, and detailed')
counting_style = val
elif opt == '--root':
global _root
_root = val
elif opt == '--linelength':
global _line_length
try:
_line_length = int(val)
except ValueError:
PrintUsage('Line length must be digits.')
elif opt == '--extensions':
global _valid_extensions
try:
_valid_extensions = set(val.split(','))
except ValueError:
PrintUsage('Extensions must be comma seperated list.')
if not filenames:
PrintUsage('No files were specified.')
_SetOutputFormat(output_format)
_SetVerboseLevel(verbosity)
_SetFilters(filters)
_SetCountingStyle(counting_style)
return filenames
def main():
filenames = ParseArguments(sys.argv[1:])
# Change stderr to write with replacement characters so we don't die
# if we try to print something containing non-ASCII characters.
sys.stderr = codecs.StreamReaderWriter(sys.stderr,
codecs.getreader('utf8'),
codecs.getwriter('utf8'),
'replace')
_cpplint_state.ResetErrorCounts()
for filename in filenames:
ProcessFile(filename, _cpplint_state.verbose_level)
_cpplint_state.PrintErrorCounts()
sys.exit(_cpplint_state.error_count > 0)
if __name__ == '__main__':
main()
| apache-2.0 |
z3ntu/razer-drivers | scripts/create_fake_device.py | 1 | 8006 | #!/usr/bin/env python3
import argparse
import atexit
import cmd
import os
import shutil
import sys
import tempfile
PYLIB = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'pylib')
sys.path.insert(1, PYLIB)
import razer._fake_driver as fake_driver
class FakeDevicePrompt(cmd.Cmd):
def __init__(self, device_map, *args, **kwargs):
super(FakeDevicePrompt, self).__init__(*args, **kwargs)
self._device_map = device_map
self._current_device = None
self._ep = {}
self._read = []
self._write = []
# If only 1 device, auto use that
if len(self._device_map) == 1:
self._change_device(list(self._device_map.keys())[0])
else:
self._change_device(None)
def _change_device(self, device_name=None):
if device_name is not None:
self._current_device = device_name
self.prompt = self._current_device + "> "
for endpoint, details in self._device_map[self._current_device].endpoints.items():
self._ep[endpoint] = details[2]
self._read = [endpoint for endpoint, perm in self._ep.items()]
self._write = [endpoint for endpoint, perm in self._ep.items() if perm in ('w', 'rw')]
else:
self._current_device = None
self.prompt = "> "
def do_dev(self, arg):
"""
Change current device
"""
if arg in self._device_map:
if arg is None or len(arg) == 0:
print('Need to specify a device name. One of: {0}'.format(','.join(self._device_map.keys())))
else:
self._change_device(arg)
else:
print('Invalid device name: {0}'.format(arg))
def complete_dev(self, text, line, begidx, endidx):
if not text:
completions = list(self._device_map.keys())
else:
completions = [item for item in list(self._device_map.keys()) if item.startswith(text)]
return completions
def do_list(self, arg):
"""List available device files"""
if self._current_device is not None:
print('Device files')
print('------------')
for endpoint, permission in self._ep.items():
if permission in ('r', 'rw'):
print(" {0:-<2}- {1}".format(permission, endpoint))
else:
print(" {0:->2}- {1}".format(permission, endpoint))
print()
print('Event files')
print('-----------')
for event_id, event_value in sorted(self._device_map[self._current_device].events.items(), key=lambda x: x[0]):
print(" {0: >2} {1}".format(event_id, event_value[0]))
else:
print('Devices')
print('-------')
for device in list(self._device_map.keys()):
print(' {0}'.format(device))
def do_ls(self, arg):
"""List available device files"""
self.do_list(arg)
def do_read(self, arg, binary=False):
"""Read ASCII from given device file"""
if self._current_device is not None:
if arg in self._ep:
result = self._device_map[self._current_device].get(arg, binary=binary)
print(result)
elif arg in self._ep:
print('Device endpoint not readable')
else:
print("Device endpoint not found")
def do_binary_read(self, arg):
"""Read binary from given device file"""
self.do_read(arg, binary=True)
def complete_read(self, text, line, begidx, endidx):
if not text:
completions = self._read
else:
completions = [item for item in self._read if item.startswith(text)]
return completions
complete_binary_read = complete_read
def do_write(self, arg):
"""Write ASCII to device file. DEVICE_FILE DATA"""
if self._current_device is not None:
try:
device_file, data = arg.split(' ', 1)
if device_file in self._ep:
if len(data) > 0:
self._device_map[self._current_device].set(device_file, data)
print("{0}: {1}".format(device_file, self._device_map[self._current_device].get(device_file)))
else:
print("Device endpoint not found")
except ValueError:
print("Must specify a device enpoint then a space then data to write")
def complete_write(self, text, line, begidx, endidx):
if not text:
completions = self._write
else:
completions = [item for item in self._write if item.startswith(text)]
return completions
def do_event(self, arg):
"""Emit an event, format: EVENT_ID KEY_ID STATE
Where state in 'up' 'down' and 'repeat'
"""
if self._current_device is not None:
event_file, key_id, value = arg.split(' ')
if event_file not in self._device_map[self._current_device].events:
print("Event ID {0} is invalid".format(event_file))
else:
try:
bytes_written = self._device_map[self._current_device].emit_kb_event(event_file, int(key_id), value)
print("Wrote {0} bytes to {1}".format(bytes_written, self._device_map[self._current_device].events[event_file][0]))
except ValueError as err:
print("Caught exception: {0}".format(err))
def do_exit(self, arg):
"""Exit"""
if self._current_device is not None:
self._change_device(None)
return False
else:
return True
def do_EOF(self, arg):
"""Press Ctrl+D to exit"""
self.do_exit(arg)
def create_envionment(device_name, destination):
os.makedirs(destination, exist_ok=True)
try:
fake_device = fake_driver.FakeDevice(device_name, tmp_dir=destination)
return fake_device
except ValueError:
print('Device {0}.cfg not found'.format(device_name))
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('device', metavar='DEVICE', nargs='+', help='Device config name')
parser.add_argument('--dest', metavar='DESTDIR', required=False, default=None, help='Directory to create driver files in. If omitted then a tmp directory is used')
parser.add_argument('--non-interactive', dest='interactive', action='store_false', help='Dont display prompt, just hang until killed')
parser.add_argument('--clear-dest', action='store_true', help='Clear the destination folder if it exists before starting')
parser.add_argument('--create-only', action='store_true', help='Create the target structure and then exit')
return parser.parse_args()
def run():
args = parse_args()
if args.dest is None:
destination = tempfile.mkdtemp(prefix='tmp_', suffix='_fakerazer')
else:
destination = args.dest
if args.clear_dest and os.path.exists(destination):
shutil.rmtree(destination, ignore_errors=True)
device_map = {}
for device in args.device:
# Device name: FakeDriver
device_map[device] = create_envionment(device, destination)
if not args.create_only:
# Register cleanup
if args.dest is None:
atexit.register(lambda: shutil.rmtree(destination, ignore_errors=True))
else:
for device in device_map.values():
# device = FakeDriver
atexit.register(device.close)
print("Device test directory: {0}".format(destination))
try:
if not args.interactive:
input()
else:
FakeDevicePrompt(device_map).cmdloop()
except KeyboardInterrupt:
pass
if __name__ == '__main__':
run() | gpl-2.0 |
100health/RedoxBlog | node_modules/grunt-docker/node_modules/docker/node_modules/pygmentize-bundled/vendor/pygments/build-2.7/pygments/formatters/img.py | 268 | 18059 | # -*- coding: utf-8 -*-
"""
pygments.formatters.img
~~~~~~~~~~~~~~~~~~~~~~~
Formatter for Pixmap output.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import sys
from pygments.formatter import Formatter
from pygments.util import get_bool_opt, get_int_opt, \
get_list_opt, get_choice_opt
# Import this carefully
try:
from PIL import Image, ImageDraw, ImageFont
pil_available = True
except ImportError:
pil_available = False
try:
import _winreg
except ImportError:
_winreg = None
__all__ = ['ImageFormatter', 'GifImageFormatter', 'JpgImageFormatter',
'BmpImageFormatter']
# For some unknown reason every font calls it something different
STYLES = {
'NORMAL': ['', 'Roman', 'Book', 'Normal', 'Regular', 'Medium'],
'ITALIC': ['Oblique', 'Italic'],
'BOLD': ['Bold'],
'BOLDITALIC': ['Bold Oblique', 'Bold Italic'],
}
# A sane default for modern systems
DEFAULT_FONT_NAME_NIX = 'Bitstream Vera Sans Mono'
DEFAULT_FONT_NAME_WIN = 'Courier New'
class PilNotAvailable(ImportError):
"""When Python imaging library is not available"""
class FontNotFound(Exception):
"""When there are no usable fonts specified"""
class FontManager(object):
"""
Manages a set of fonts: normal, italic, bold, etc...
"""
def __init__(self, font_name, font_size=14):
self.font_name = font_name
self.font_size = font_size
self.fonts = {}
self.encoding = None
if sys.platform.startswith('win'):
if not font_name:
self.font_name = DEFAULT_FONT_NAME_WIN
self._create_win()
else:
if not font_name:
self.font_name = DEFAULT_FONT_NAME_NIX
self._create_nix()
def _get_nix_font_path(self, name, style):
from commands import getstatusoutput
exit, out = getstatusoutput('fc-list "%s:style=%s" file' %
(name, style))
if not exit:
lines = out.splitlines()
if lines:
path = lines[0].strip().strip(':')
return path
def _create_nix(self):
for name in STYLES['NORMAL']:
path = self._get_nix_font_path(self.font_name, name)
if path is not None:
self.fonts['NORMAL'] = ImageFont.truetype(path, self.font_size)
break
else:
raise FontNotFound('No usable fonts named: "%s"' %
self.font_name)
for style in ('ITALIC', 'BOLD', 'BOLDITALIC'):
for stylename in STYLES[style]:
path = self._get_nix_font_path(self.font_name, stylename)
if path is not None:
self.fonts[style] = ImageFont.truetype(path, self.font_size)
break
else:
if style == 'BOLDITALIC':
self.fonts[style] = self.fonts['BOLD']
else:
self.fonts[style] = self.fonts['NORMAL']
def _lookup_win(self, key, basename, styles, fail=False):
for suffix in ('', ' (TrueType)'):
for style in styles:
try:
valname = '%s%s%s' % (basename, style and ' '+style, suffix)
val, _ = _winreg.QueryValueEx(key, valname)
return val
except EnvironmentError:
continue
else:
if fail:
raise FontNotFound('Font %s (%s) not found in registry' %
(basename, styles[0]))
return None
def _create_win(self):
try:
key = _winreg.OpenKey(
_winreg.HKEY_LOCAL_MACHINE,
r'Software\Microsoft\Windows NT\CurrentVersion\Fonts')
except EnvironmentError:
try:
key = _winreg.OpenKey(
_winreg.HKEY_LOCAL_MACHINE,
r'Software\Microsoft\Windows\CurrentVersion\Fonts')
except EnvironmentError:
raise FontNotFound('Can\'t open Windows font registry key')
try:
path = self._lookup_win(key, self.font_name, STYLES['NORMAL'], True)
self.fonts['NORMAL'] = ImageFont.truetype(path, self.font_size)
for style in ('ITALIC', 'BOLD', 'BOLDITALIC'):
path = self._lookup_win(key, self.font_name, STYLES[style])
if path:
self.fonts[style] = ImageFont.truetype(path, self.font_size)
else:
if style == 'BOLDITALIC':
self.fonts[style] = self.fonts['BOLD']
else:
self.fonts[style] = self.fonts['NORMAL']
finally:
_winreg.CloseKey(key)
def get_char_size(self):
"""
Get the character size.
"""
return self.fonts['NORMAL'].getsize('M')
def get_font(self, bold, oblique):
"""
Get the font based on bold and italic flags.
"""
if bold and oblique:
return self.fonts['BOLDITALIC']
elif bold:
return self.fonts['BOLD']
elif oblique:
return self.fonts['ITALIC']
else:
return self.fonts['NORMAL']
class ImageFormatter(Formatter):
"""
Create a PNG image from source code. This uses the Python Imaging Library to
generate a pixmap from the source code.
*New in Pygments 0.10.*
Additional options accepted:
`image_format`
An image format to output to that is recognised by PIL, these include:
* "PNG" (default)
* "JPEG"
* "BMP"
* "GIF"
`line_pad`
The extra spacing (in pixels) between each line of text.
Default: 2
`font_name`
The font name to be used as the base font from which others, such as
bold and italic fonts will be generated. This really should be a
monospace font to look sane.
Default: "Bitstream Vera Sans Mono"
`font_size`
The font size in points to be used.
Default: 14
`image_pad`
The padding, in pixels to be used at each edge of the resulting image.
Default: 10
`line_numbers`
Whether line numbers should be shown: True/False
Default: True
`line_number_start`
The line number of the first line.
Default: 1
`line_number_step`
The step used when printing line numbers.
Default: 1
`line_number_bg`
The background colour (in "#123456" format) of the line number bar, or
None to use the style background color.
Default: "#eed"
`line_number_fg`
The text color of the line numbers (in "#123456"-like format).
Default: "#886"
`line_number_chars`
The number of columns of line numbers allowable in the line number
margin.
Default: 2
`line_number_bold`
Whether line numbers will be bold: True/False
Default: False
`line_number_italic`
Whether line numbers will be italicized: True/False
Default: False
`line_number_separator`
Whether a line will be drawn between the line number area and the
source code area: True/False
Default: True
`line_number_pad`
The horizontal padding (in pixels) between the line number margin, and
the source code area.
Default: 6
`hl_lines`
Specify a list of lines to be highlighted. *New in Pygments 1.2.*
Default: empty list
`hl_color`
Specify the color for highlighting lines. *New in Pygments 1.2.*
Default: highlight color of the selected style
"""
# Required by the pygments mapper
name = 'img'
aliases = ['img', 'IMG', 'png']
filenames = ['*.png']
unicodeoutput = False
default_image_format = 'png'
def __init__(self, **options):
"""
See the class docstring for explanation of options.
"""
if not pil_available:
raise PilNotAvailable(
'Python Imaging Library is required for this formatter')
Formatter.__init__(self, **options)
# Read the style
self.styles = dict(self.style)
if self.style.background_color is None:
self.background_color = '#fff'
else:
self.background_color = self.style.background_color
# Image options
self.image_format = get_choice_opt(
options, 'image_format', ['png', 'jpeg', 'gif', 'bmp'],
self.default_image_format, normcase=True)
self.image_pad = get_int_opt(options, 'image_pad', 10)
self.line_pad = get_int_opt(options, 'line_pad', 2)
# The fonts
fontsize = get_int_opt(options, 'font_size', 14)
self.fonts = FontManager(options.get('font_name', ''), fontsize)
self.fontw, self.fonth = self.fonts.get_char_size()
# Line number options
self.line_number_fg = options.get('line_number_fg', '#886')
self.line_number_bg = options.get('line_number_bg', '#eed')
self.line_number_chars = get_int_opt(options,
'line_number_chars', 2)
self.line_number_bold = get_bool_opt(options,
'line_number_bold', False)
self.line_number_italic = get_bool_opt(options,
'line_number_italic', False)
self.line_number_pad = get_int_opt(options, 'line_number_pad', 6)
self.line_numbers = get_bool_opt(options, 'line_numbers', True)
self.line_number_separator = get_bool_opt(options,
'line_number_separator', True)
self.line_number_step = get_int_opt(options, 'line_number_step', 1)
self.line_number_start = get_int_opt(options, 'line_number_start', 1)
if self.line_numbers:
self.line_number_width = (self.fontw * self.line_number_chars +
self.line_number_pad * 2)
else:
self.line_number_width = 0
self.hl_lines = []
hl_lines_str = get_list_opt(options, 'hl_lines', [])
for line in hl_lines_str:
try:
self.hl_lines.append(int(line))
except ValueError:
pass
self.hl_color = options.get('hl_color',
self.style.highlight_color) or '#f90'
self.drawables = []
def get_style_defs(self, arg=''):
raise NotImplementedError('The -S option is meaningless for the image '
'formatter. Use -O style=<stylename> instead.')
def _get_line_height(self):
"""
Get the height of a line.
"""
return self.fonth + self.line_pad
def _get_line_y(self, lineno):
"""
Get the Y coordinate of a line number.
"""
return lineno * self._get_line_height() + self.image_pad
def _get_char_width(self):
"""
Get the width of a character.
"""
return self.fontw
def _get_char_x(self, charno):
"""
Get the X coordinate of a character position.
"""
return charno * self.fontw + self.image_pad + self.line_number_width
def _get_text_pos(self, charno, lineno):
"""
Get the actual position for a character and line position.
"""
return self._get_char_x(charno), self._get_line_y(lineno)
def _get_linenumber_pos(self, lineno):
"""
Get the actual position for the start of a line number.
"""
return (self.image_pad, self._get_line_y(lineno))
def _get_text_color(self, style):
"""
Get the correct color for the token from the style.
"""
if style['color'] is not None:
fill = '#' + style['color']
else:
fill = '#000'
return fill
def _get_style_font(self, style):
"""
Get the correct font for the style.
"""
return self.fonts.get_font(style['bold'], style['italic'])
def _get_image_size(self, maxcharno, maxlineno):
"""
Get the required image size.
"""
return (self._get_char_x(maxcharno) + self.image_pad,
self._get_line_y(maxlineno + 0) + self.image_pad)
def _draw_linenumber(self, posno, lineno):
"""
Remember a line number drawable to paint later.
"""
self._draw_text(
self._get_linenumber_pos(posno),
str(lineno).rjust(self.line_number_chars),
font=self.fonts.get_font(self.line_number_bold,
self.line_number_italic),
fill=self.line_number_fg,
)
def _draw_text(self, pos, text, font, **kw):
"""
Remember a single drawable tuple to paint later.
"""
self.drawables.append((pos, text, font, kw))
def _create_drawables(self, tokensource):
"""
Create drawables for the token content.
"""
lineno = charno = maxcharno = 0
for ttype, value in tokensource:
while ttype not in self.styles:
ttype = ttype.parent
style = self.styles[ttype]
# TODO: make sure tab expansion happens earlier in the chain. It
# really ought to be done on the input, as to do it right here is
# quite complex.
value = value.expandtabs(4)
lines = value.splitlines(True)
#print lines
for i, line in enumerate(lines):
temp = line.rstrip('\n')
if temp:
self._draw_text(
self._get_text_pos(charno, lineno),
temp,
font = self._get_style_font(style),
fill = self._get_text_color(style)
)
charno += len(temp)
maxcharno = max(maxcharno, charno)
if line.endswith('\n'):
# add a line for each extra line in the value
charno = 0
lineno += 1
self.maxcharno = maxcharno
self.maxlineno = lineno
def _draw_line_numbers(self):
"""
Create drawables for the line numbers.
"""
if not self.line_numbers:
return
for p in xrange(self.maxlineno):
n = p + self.line_number_start
if (n % self.line_number_step) == 0:
self._draw_linenumber(p, n)
def _paint_line_number_bg(self, im):
"""
Paint the line number background on the image.
"""
if not self.line_numbers:
return
if self.line_number_fg is None:
return
draw = ImageDraw.Draw(im)
recth = im.size[-1]
rectw = self.image_pad + self.line_number_width - self.line_number_pad
draw.rectangle([(0, 0),
(rectw, recth)],
fill=self.line_number_bg)
draw.line([(rectw, 0), (rectw, recth)], fill=self.line_number_fg)
del draw
def format(self, tokensource, outfile):
"""
Format ``tokensource``, an iterable of ``(tokentype, tokenstring)``
tuples and write it into ``outfile``.
This implementation calculates where it should draw each token on the
pixmap, then calculates the required pixmap size and draws the items.
"""
self._create_drawables(tokensource)
self._draw_line_numbers()
im = Image.new(
'RGB',
self._get_image_size(self.maxcharno, self.maxlineno),
self.background_color
)
self._paint_line_number_bg(im)
draw = ImageDraw.Draw(im)
# Highlight
if self.hl_lines:
x = self.image_pad + self.line_number_width - self.line_number_pad + 1
recth = self._get_line_height()
rectw = im.size[0] - x
for linenumber in self.hl_lines:
y = self._get_line_y(linenumber - 1)
draw.rectangle([(x, y), (x + rectw, y + recth)],
fill=self.hl_color)
for pos, value, font, kw in self.drawables:
draw.text(pos, value, font=font, **kw)
im.save(outfile, self.image_format.upper())
# Add one formatter per format, so that the "-f gif" option gives the correct result
# when used in pygmentize.
class GifImageFormatter(ImageFormatter):
"""
Create a GIF image from source code. This uses the Python Imaging Library to
generate a pixmap from the source code.
*New in Pygments 1.0.* (You could create GIF images before by passing a
suitable `image_format` option to the `ImageFormatter`.)
"""
name = 'img_gif'
aliases = ['gif']
filenames = ['*.gif']
default_image_format = 'gif'
class JpgImageFormatter(ImageFormatter):
"""
Create a JPEG image from source code. This uses the Python Imaging Library to
generate a pixmap from the source code.
*New in Pygments 1.0.* (You could create JPEG images before by passing a
suitable `image_format` option to the `ImageFormatter`.)
"""
name = 'img_jpg'
aliases = ['jpg', 'jpeg']
filenames = ['*.jpg']
default_image_format = 'jpeg'
class BmpImageFormatter(ImageFormatter):
"""
Create a bitmap image from source code. This uses the Python Imaging Library to
generate a pixmap from the source code.
*New in Pygments 1.0.* (You could create bitmap images before by passing a
suitable `image_format` option to the `ImageFormatter`.)
"""
name = 'img_bmp'
aliases = ['bmp', 'bitmap']
filenames = ['*.bmp']
default_image_format = 'bmp'
| mit |
ondra-novak/chromium.src | build/android/gyp/pack_arm_relocations.py | 5 | 4082 | #!/usr/bin/env python
#
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Pack ARM relative relocations in a library (or copy unchanged).
If --enable-packing and --configuration-name=='Release', invoke the
relocation_packer tool to pack the .rel.dyn section in the given library
files. This step is inserted after the libraries are stripped. Packing
adds a new .android.rel.dyn section to the file and reduces the size of
.rel.dyn accordingly.
Currently packing only understands ARM32 shared libraries. For all other
architectures --enable-packing should be set to zero. In this case the
script copies files verbatim, with no attempt to pack relative relocations.
Any library listed in --exclude-packing-list is also copied verbatim,
irrespective of any --enable-packing setting. Typically this would be
'libchromium_android_linker.so'.
"""
import json
import optparse
import os
import shlex
import shutil
import sys
import tempfile
from util import build_utils
def PackArmLibraryRelocations(android_pack_relocations,
android_objcopy,
library_path,
output_path):
if not build_utils.IsTimeStale(output_path, [library_path]):
return
# Copy and add a 'NULL' .android.rel.dyn section for the packing tool.
with tempfile.NamedTemporaryFile() as stream:
stream.write('NULL')
stream.flush()
objcopy_command = [android_objcopy,
'--add-section', '.android.rel.dyn=%s' % stream.name,
library_path, output_path]
build_utils.CheckOutput(objcopy_command)
# Pack R_ARM_RELATIVE relocations.
pack_command = [android_pack_relocations, output_path]
build_utils.CheckOutput(pack_command)
def CopyArmLibraryUnchanged(library_path, output_path):
if not build_utils.IsTimeStale(output_path, [library_path]):
return
shutil.copy(library_path, output_path)
def main():
parser = optparse.OptionParser()
parser.add_option('--configuration-name',
default='Release',
help='Gyp configuration name (i.e. Debug, Release)')
parser.add_option('--enable-packing',
choices=['0', '1'],
help=('Pack relocations if 1 and configuration name is \'Release\','
' otherwise plain file copy'))
parser.add_option('--exclude-packing-list',
default='',
help='Names of any libraries explicitly not packed')
parser.add_option('--android-pack-relocations',
help='Path to the ARM relocations packer binary')
parser.add_option('--android-objcopy',
help='Path to the toolchain\'s objcopy binary')
parser.add_option('--stripped-libraries-dir',
help='Directory for stripped libraries')
parser.add_option('--packed-libraries-dir',
help='Directory for packed libraries')
parser.add_option('--libraries-file',
help='Path to json file containing list of libraries')
parser.add_option('--stamp', help='Path to touch on success')
options, _ = parser.parse_args()
enable_packing = (options.enable_packing == '1' and
options.configuration_name == 'Release')
exclude_packing_set = set(shlex.split(options.exclude_packing_list))
with open(options.libraries_file, 'r') as libfile:
libraries = json.load(libfile)
build_utils.MakeDirectory(options.packed_libraries_dir)
for library in libraries:
library_path = os.path.join(options.stripped_libraries_dir, library)
output_path = os.path.join(options.packed_libraries_dir, library)
if enable_packing and library not in exclude_packing_set:
PackArmLibraryRelocations(options.android_pack_relocations,
options.android_objcopy,
library_path,
output_path)
else:
CopyArmLibraryUnchanged(library_path, output_path)
if options.stamp:
build_utils.Touch(options.stamp)
return 0
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause |
dcroc16/skunk_works | google_appengine/google/storage/speckle/python/django/management/commands/getoauthtoken.py | 14 | 2381 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""OAuth 2.0 command for Django Google SQL Service backend."""
from google.storage.speckle.python.api import rdbms_googleapi
import apiclient
from django.core.management import base
from oauth2client import client
class Command(base.NoArgsCommand):
"""Management command to fetch OAuth2 credentials for Google SQL Service."""
requires_model_validation = False
can_import_settings = False
help = ('Fetches OAuth2 credentials for accessing the Google SQL Service via'
'the Google API driver')
def handle_noargs(self, **unused_options):
"""Perform an OAuth 2.0 oob flow.
After the flow completes, instructions are provided to manually store the
OAuth2 refresh_token in the project settings file.
"""
flow = rdbms_googleapi.GetFlow()
self.stdout.write('\nGo to the following link in your browser:\n%s\n\n' %
flow.step1_get_authorize_url('oob'))
accepted = 'n'
while accepted.lower() == 'n':
accepted = raw_input('Have you authorized me? (y/n) ')
code = raw_input('What is the verification code? ').strip()
try:
credential = flow.step2_exchange(code)
except client.FlowExchangeError:
raise base.CommandError('The authentication has failed.')
self.stdout.write(
'\nAdd your OAuth refresh token (%s) as an "OAUTH2_SECRET" parameter to'
' your database OPTIONS. For example:\n' % credential.refresh_token)
self.stdout.write("""
DATABASES = {
'default': {
'ENGINE': 'google.storage.speckle.python.django.backend',
'INSTANCE': 'examplecom:instance',
'NAME': 'dbname',
'OPTIONS': {
'OAUTH2_SECRET': '%s',
}
}
}\n""" % credential.refresh_token)
| mit |
Qalthos/ansible | lib/ansible/modules/storage/netapp/na_ontap_svm_options.py | 59 | 5200 | #!/usr/bin/python
# (c) 2018, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
short_description: NetApp ONTAP Modify SVM Options
author: NetApp Ansible Team (@carchi8py) <ng-ansibleteam@netapp.com>
description:
- Modify ONTAP SVM Options
- Only Options that appear on "vserver options show" can be set
extends_documentation_fragment:
- netapp.na_ontap
module: na_ontap_svm_options
version_added: "2.7"
options:
name:
description:
- Name of the option.
value:
description:
- Value of the option.
- Value must be in quote
vserver:
description:
- The name of the vserver to which this option belongs to.
required: True
'''
EXAMPLES = """
- name: Set SVM Options
na_ontap_svm_options:
vserver: "{{ netapp_vserver_name }}"
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
name: snmp.enable
value: 'on'
"""
RETURN = """
"""
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible.module_utils.netapp as netapp_utils
from ansible.module_utils.netapp_module import NetAppModule
HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
class NetAppONTAPSvnOptions(object):
def __init__(self):
self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
self.argument_spec.update(dict(
name=dict(required=False, type="str", default=None),
value=dict(required=False, type='str', default=None),
vserver=dict(required=True, type='str')
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
supports_check_mode=True
)
self.na_helper = NetAppModule()
self.parameters = self.na_helper.set_parameters(self.module.params)
if HAS_NETAPP_LIB is False:
self.module.fail_json(msg="the python NetApp-Lib module is required")
else:
self.server = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=self.parameters['vserver'])
return
def set_options(self):
"""
Set a specific option
:return: None
"""
option_obj = netapp_utils.zapi.NaElement("options-set")
option_obj.add_new_child('name', self.parameters['name'])
option_obj.add_new_child('value', self.parameters['value'])
try:
result = self.server.invoke_successfully(option_obj, True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg="Error setting options: %s" % to_native(error), exception=traceback.format_exc())
def list_options(self):
"""
List all Options on the Vserver
:return: None
"""
option_obj = netapp_utils.zapi.NaElement("options-list-info")
try:
result = self.server.invoke_successfully(option_obj, True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg="Error getting options: %s" % to_native(error), exception=traceback.format_exc())
def is_option_set(self):
"""
Checks to see if an option is set or not
:return: If option is set return True, else return False
"""
option_obj = netapp_utils.zapi.NaElement("options-get-iter")
options_info = netapp_utils.zapi.NaElement("option-info")
if self.parameters.get('name') is not None:
options_info.add_new_child("name", self.parameters['name'])
if self.parameters.get('value') is not None:
options_info.add_new_child("value", self.parameters['value'])
if "vserver" in self.parameters.keys():
if self.parameters['vserver'] is not None:
options_info.add_new_child("vserver", self.parameters['vserver'])
query = netapp_utils.zapi.NaElement("query")
query.add_child_elem(options_info)
option_obj.add_child_elem(query)
try:
result = self.server.invoke_successfully(option_obj, True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg="Error finding option: %s" % to_native(error), exception=traceback.format_exc())
if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
return True
return False
def apply(self):
changed = False
netapp_utils.ems_log_event("na_ontap_svm_options", self.server)
is_set = self.is_option_set()
if not is_set:
self.set_options()
changed = True
self.module.exit_json(changed=changed)
def main():
"""
Execute action from playbook
:return: none
"""
cg_obj = NetAppONTAPSvnOptions()
cg_obj.apply()
if __name__ == '__main__':
main()
| gpl-3.0 |
Hellowlol/plexpy | lib/requests/packages/chardet/sbcharsetprober.py | 2927 | 4793 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from . import constants
from .charsetprober import CharSetProber
from .compat import wrap_ord
SAMPLE_SIZE = 64
SB_ENOUGH_REL_THRESHOLD = 1024
POSITIVE_SHORTCUT_THRESHOLD = 0.95
NEGATIVE_SHORTCUT_THRESHOLD = 0.05
SYMBOL_CAT_ORDER = 250
NUMBER_OF_SEQ_CAT = 4
POSITIVE_CAT = NUMBER_OF_SEQ_CAT - 1
#NEGATIVE_CAT = 0
class SingleByteCharSetProber(CharSetProber):
def __init__(self, model, reversed=False, nameProber=None):
CharSetProber.__init__(self)
self._mModel = model
# TRUE if we need to reverse every pair in the model lookup
self._mReversed = reversed
# Optional auxiliary prober for name decision
self._mNameProber = nameProber
self.reset()
def reset(self):
CharSetProber.reset(self)
# char order of last character
self._mLastOrder = 255
self._mSeqCounters = [0] * NUMBER_OF_SEQ_CAT
self._mTotalSeqs = 0
self._mTotalChar = 0
# characters that fall in our sampling range
self._mFreqChar = 0
def get_charset_name(self):
if self._mNameProber:
return self._mNameProber.get_charset_name()
else:
return self._mModel['charsetName']
def feed(self, aBuf):
if not self._mModel['keepEnglishLetter']:
aBuf = self.filter_without_english_letters(aBuf)
aLen = len(aBuf)
if not aLen:
return self.get_state()
for c in aBuf:
order = self._mModel['charToOrderMap'][wrap_ord(c)]
if order < SYMBOL_CAT_ORDER:
self._mTotalChar += 1
if order < SAMPLE_SIZE:
self._mFreqChar += 1
if self._mLastOrder < SAMPLE_SIZE:
self._mTotalSeqs += 1
if not self._mReversed:
i = (self._mLastOrder * SAMPLE_SIZE) + order
model = self._mModel['precedenceMatrix'][i]
else: # reverse the order of the letters in the lookup
i = (order * SAMPLE_SIZE) + self._mLastOrder
model = self._mModel['precedenceMatrix'][i]
self._mSeqCounters[model] += 1
self._mLastOrder = order
if self.get_state() == constants.eDetecting:
if self._mTotalSeqs > SB_ENOUGH_REL_THRESHOLD:
cf = self.get_confidence()
if cf > POSITIVE_SHORTCUT_THRESHOLD:
if constants._debug:
sys.stderr.write('%s confidence = %s, we have a'
'winner\n' %
(self._mModel['charsetName'], cf))
self._mState = constants.eFoundIt
elif cf < NEGATIVE_SHORTCUT_THRESHOLD:
if constants._debug:
sys.stderr.write('%s confidence = %s, below negative'
'shortcut threshhold %s\n' %
(self._mModel['charsetName'], cf,
NEGATIVE_SHORTCUT_THRESHOLD))
self._mState = constants.eNotMe
return self.get_state()
def get_confidence(self):
r = 0.01
if self._mTotalSeqs > 0:
r = ((1.0 * self._mSeqCounters[POSITIVE_CAT]) / self._mTotalSeqs
/ self._mModel['mTypicalPositiveRatio'])
r = r * self._mFreqChar / self._mTotalChar
if r >= 1.0:
r = 0.99
return r
| gpl-3.0 |
anastue/netforce | netforce/netforce/model/fields/boolean.py | 4 | 1598 | # Copyright (c) 2012-2015 Netforce Co. Ltd.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
from .field import Field
class Boolean(Field):
def __init__(self, string, **kw):
super().__init__(string=string, **kw)
if not self.function:
self.eager_load = True
def get_col_type(self):
return "bool"
def get_meta(self, context={}):
vals = super(Boolean, self).get_meta(context=context)
vals["type"] = "boolean"
return vals
def validate(self, val):
if val:
return True
return False
| mit |
variac/bazel | third_party/protobuf/3.2.0/python/google/protobuf/internal/text_format_test.py | 12 | 59556 | #! /usr/bin/env python
#
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Test for google.protobuf.text_format."""
__author__ = 'kenton@google.com (Kenton Varda)'
import re
import six
import string
try:
import unittest2 as unittest # PY26, pylint: disable=g-import-not-at-top
except ImportError:
import unittest # pylint: disable=g-import-not-at-top
from google.protobuf.internal import _parameterized
from google.protobuf import any_test_pb2
from google.protobuf import map_unittest_pb2
from google.protobuf import unittest_mset_pb2
from google.protobuf import unittest_pb2
from google.protobuf import unittest_proto3_arena_pb2
from google.protobuf.internal import api_implementation
from google.protobuf.internal import any_test_pb2 as test_extend_any
from google.protobuf.internal import test_util
from google.protobuf.internal import message_set_extensions_pb2
from google.protobuf import descriptor_pool
from google.protobuf import text_format
# Low-level nuts-n-bolts tests.
class SimpleTextFormatTests(unittest.TestCase):
# The members of _QUOTES are formatted into a regexp template that
# expects single characters. Therefore it's an error (in addition to being
# non-sensical in the first place) to try to specify a "quote mark" that is
# more than one character.
def testQuoteMarksAreSingleChars(self):
for quote in text_format._QUOTES:
self.assertEqual(1, len(quote))
# Base class with some common functionality.
class TextFormatBase(unittest.TestCase):
def ReadGolden(self, golden_filename):
with test_util.GoldenFile(golden_filename) as f:
return (f.readlines() if str is bytes else # PY3
[golden_line.decode('utf-8') for golden_line in f])
def CompareToGoldenFile(self, text, golden_filename):
golden_lines = self.ReadGolden(golden_filename)
self.assertMultiLineEqual(text, ''.join(golden_lines))
def CompareToGoldenText(self, text, golden_text):
self.assertEqual(text, golden_text)
def RemoveRedundantZeros(self, text):
# Some platforms print 1e+5 as 1e+005. This is fine, but we need to remove
# these zeros in order to match the golden file.
text = text.replace('e+0','e+').replace('e+0','e+') \
.replace('e-0','e-').replace('e-0','e-')
# Floating point fields are printed with .0 suffix even if they are
# actualy integer numbers.
text = re.compile(r'\.0$', re.MULTILINE).sub('', text)
return text
@_parameterized.Parameters((unittest_pb2), (unittest_proto3_arena_pb2))
class TextFormatTest(TextFormatBase):
def testPrintExotic(self, message_module):
message = message_module.TestAllTypes()
message.repeated_int64.append(-9223372036854775808)
message.repeated_uint64.append(18446744073709551615)
message.repeated_double.append(123.456)
message.repeated_double.append(1.23e22)
message.repeated_double.append(1.23e-18)
message.repeated_string.append('\000\001\a\b\f\n\r\t\v\\\'"')
message.repeated_string.append(u'\u00fc\ua71f')
self.CompareToGoldenText(
self.RemoveRedundantZeros(text_format.MessageToString(message)),
'repeated_int64: -9223372036854775808\n'
'repeated_uint64: 18446744073709551615\n'
'repeated_double: 123.456\n'
'repeated_double: 1.23e+22\n'
'repeated_double: 1.23e-18\n'
'repeated_string:'
' "\\000\\001\\007\\010\\014\\n\\r\\t\\013\\\\\\\'\\""\n'
'repeated_string: "\\303\\274\\352\\234\\237"\n')
def testPrintExoticUnicodeSubclass(self, message_module):
class UnicodeSub(six.text_type):
pass
message = message_module.TestAllTypes()
message.repeated_string.append(UnicodeSub(u'\u00fc\ua71f'))
self.CompareToGoldenText(
text_format.MessageToString(message),
'repeated_string: "\\303\\274\\352\\234\\237"\n')
def testPrintNestedMessageAsOneLine(self, message_module):
message = message_module.TestAllTypes()
msg = message.repeated_nested_message.add()
msg.bb = 42
self.CompareToGoldenText(
text_format.MessageToString(message, as_one_line=True),
'repeated_nested_message { bb: 42 }')
def testPrintRepeatedFieldsAsOneLine(self, message_module):
message = message_module.TestAllTypes()
message.repeated_int32.append(1)
message.repeated_int32.append(1)
message.repeated_int32.append(3)
message.repeated_string.append('Google')
message.repeated_string.append('Zurich')
self.CompareToGoldenText(
text_format.MessageToString(message, as_one_line=True),
'repeated_int32: 1 repeated_int32: 1 repeated_int32: 3 '
'repeated_string: "Google" repeated_string: "Zurich"')
def testPrintNestedNewLineInStringAsOneLine(self, message_module):
message = message_module.TestAllTypes()
message.optional_string = 'a\nnew\nline'
self.CompareToGoldenText(
text_format.MessageToString(message, as_one_line=True),
'optional_string: "a\\nnew\\nline"')
def testPrintExoticAsOneLine(self, message_module):
message = message_module.TestAllTypes()
message.repeated_int64.append(-9223372036854775808)
message.repeated_uint64.append(18446744073709551615)
message.repeated_double.append(123.456)
message.repeated_double.append(1.23e22)
message.repeated_double.append(1.23e-18)
message.repeated_string.append('\000\001\a\b\f\n\r\t\v\\\'"')
message.repeated_string.append(u'\u00fc\ua71f')
self.CompareToGoldenText(
self.RemoveRedundantZeros(text_format.MessageToString(
message, as_one_line=True)),
'repeated_int64: -9223372036854775808'
' repeated_uint64: 18446744073709551615'
' repeated_double: 123.456'
' repeated_double: 1.23e+22'
' repeated_double: 1.23e-18'
' repeated_string: '
'"\\000\\001\\007\\010\\014\\n\\r\\t\\013\\\\\\\'\\""'
' repeated_string: "\\303\\274\\352\\234\\237"')
def testRoundTripExoticAsOneLine(self, message_module):
message = message_module.TestAllTypes()
message.repeated_int64.append(-9223372036854775808)
message.repeated_uint64.append(18446744073709551615)
message.repeated_double.append(123.456)
message.repeated_double.append(1.23e22)
message.repeated_double.append(1.23e-18)
message.repeated_string.append('\000\001\a\b\f\n\r\t\v\\\'"')
message.repeated_string.append(u'\u00fc\ua71f')
# Test as_utf8 = False.
wire_text = text_format.MessageToString(message,
as_one_line=True,
as_utf8=False)
parsed_message = message_module.TestAllTypes()
r = text_format.Parse(wire_text, parsed_message)
self.assertIs(r, parsed_message)
self.assertEqual(message, parsed_message)
# Test as_utf8 = True.
wire_text = text_format.MessageToString(message,
as_one_line=True,
as_utf8=True)
parsed_message = message_module.TestAllTypes()
r = text_format.Parse(wire_text, parsed_message)
self.assertIs(r, parsed_message)
self.assertEqual(message, parsed_message,
'\n%s != %s' % (message, parsed_message))
def testPrintRawUtf8String(self, message_module):
message = message_module.TestAllTypes()
message.repeated_string.append(u'\u00fc\ua71f')
text = text_format.MessageToString(message, as_utf8=True)
self.CompareToGoldenText(text, 'repeated_string: "\303\274\352\234\237"\n')
parsed_message = message_module.TestAllTypes()
text_format.Parse(text, parsed_message)
self.assertEqual(message, parsed_message,
'\n%s != %s' % (message, parsed_message))
def testPrintFloatFormat(self, message_module):
# Check that float_format argument is passed to sub-message formatting.
message = message_module.NestedTestAllTypes()
# We use 1.25 as it is a round number in binary. The proto 32-bit float
# will not gain additional imprecise digits as a 64-bit Python float and
# show up in its str. 32-bit 1.2 is noisy when extended to 64-bit:
# >>> struct.unpack('f', struct.pack('f', 1.2))[0]
# 1.2000000476837158
# >>> struct.unpack('f', struct.pack('f', 1.25))[0]
# 1.25
message.payload.optional_float = 1.25
# Check rounding at 15 significant digits
message.payload.optional_double = -.000003456789012345678
# Check no decimal point.
message.payload.repeated_float.append(-5642)
# Check no trailing zeros.
message.payload.repeated_double.append(.000078900)
formatted_fields = ['optional_float: 1.25',
'optional_double: -3.45678901234568e-6',
'repeated_float: -5642', 'repeated_double: 7.89e-5']
text_message = text_format.MessageToString(message, float_format='.15g')
self.CompareToGoldenText(
self.RemoveRedundantZeros(text_message),
'payload {{\n {0}\n {1}\n {2}\n {3}\n}}\n'.format(
*formatted_fields))
# as_one_line=True is a separate code branch where float_format is passed.
text_message = text_format.MessageToString(message,
as_one_line=True,
float_format='.15g')
self.CompareToGoldenText(
self.RemoveRedundantZeros(text_message),
'payload {{ {0} {1} {2} {3} }}'.format(*formatted_fields))
def testMessageToString(self, message_module):
message = message_module.ForeignMessage()
message.c = 123
self.assertEqual('c: 123\n', str(message))
def testPrintField(self, message_module):
message = message_module.TestAllTypes()
field = message.DESCRIPTOR.fields_by_name['optional_float']
value = message.optional_float
out = text_format.TextWriter(False)
text_format.PrintField(field, value, out)
self.assertEqual('optional_float: 0.0\n', out.getvalue())
out.close()
# Test Printer
out = text_format.TextWriter(False)
printer = text_format._Printer(out)
printer.PrintField(field, value)
self.assertEqual('optional_float: 0.0\n', out.getvalue())
out.close()
def testPrintFieldValue(self, message_module):
message = message_module.TestAllTypes()
field = message.DESCRIPTOR.fields_by_name['optional_float']
value = message.optional_float
out = text_format.TextWriter(False)
text_format.PrintFieldValue(field, value, out)
self.assertEqual('0.0', out.getvalue())
out.close()
# Test Printer
out = text_format.TextWriter(False)
printer = text_format._Printer(out)
printer.PrintFieldValue(field, value)
self.assertEqual('0.0', out.getvalue())
out.close()
def testParseAllFields(self, message_module):
message = message_module.TestAllTypes()
test_util.SetAllFields(message)
ascii_text = text_format.MessageToString(message)
parsed_message = message_module.TestAllTypes()
text_format.Parse(ascii_text, parsed_message)
self.assertEqual(message, parsed_message)
if message_module is unittest_pb2:
test_util.ExpectAllFieldsSet(self, message)
def testParseExotic(self, message_module):
message = message_module.TestAllTypes()
text = ('repeated_int64: -9223372036854775808\n'
'repeated_uint64: 18446744073709551615\n'
'repeated_double: 123.456\n'
'repeated_double: 1.23e+22\n'
'repeated_double: 1.23e-18\n'
'repeated_string: \n'
'"\\000\\001\\007\\010\\014\\n\\r\\t\\013\\\\\\\'\\""\n'
'repeated_string: "foo" \'corge\' "grault"\n'
'repeated_string: "\\303\\274\\352\\234\\237"\n'
'repeated_string: "\\xc3\\xbc"\n'
'repeated_string: "\xc3\xbc"\n')
text_format.Parse(text, message)
self.assertEqual(-9223372036854775808, message.repeated_int64[0])
self.assertEqual(18446744073709551615, message.repeated_uint64[0])
self.assertEqual(123.456, message.repeated_double[0])
self.assertEqual(1.23e22, message.repeated_double[1])
self.assertEqual(1.23e-18, message.repeated_double[2])
self.assertEqual('\000\001\a\b\f\n\r\t\v\\\'"', message.repeated_string[0])
self.assertEqual('foocorgegrault', message.repeated_string[1])
self.assertEqual(u'\u00fc\ua71f', message.repeated_string[2])
self.assertEqual(u'\u00fc', message.repeated_string[3])
def testParseTrailingCommas(self, message_module):
message = message_module.TestAllTypes()
text = ('repeated_int64: 100;\n'
'repeated_int64: 200;\n'
'repeated_int64: 300,\n'
'repeated_string: "one",\n'
'repeated_string: "two";\n')
text_format.Parse(text, message)
self.assertEqual(100, message.repeated_int64[0])
self.assertEqual(200, message.repeated_int64[1])
self.assertEqual(300, message.repeated_int64[2])
self.assertEqual(u'one', message.repeated_string[0])
self.assertEqual(u'two', message.repeated_string[1])
def testParseRepeatedScalarShortFormat(self, message_module):
message = message_module.TestAllTypes()
text = ('repeated_int64: [100, 200];\n'
'repeated_int64: 300,\n'
'repeated_string: ["one", "two"];\n')
text_format.Parse(text, message)
self.assertEqual(100, message.repeated_int64[0])
self.assertEqual(200, message.repeated_int64[1])
self.assertEqual(300, message.repeated_int64[2])
self.assertEqual(u'one', message.repeated_string[0])
self.assertEqual(u'two', message.repeated_string[1])
def testParseRepeatedMessageShortFormat(self, message_module):
message = message_module.TestAllTypes()
text = ('repeated_nested_message: [{bb: 100}, {bb: 200}],\n'
'repeated_nested_message: {bb: 300}\n'
'repeated_nested_message [{bb: 400}];\n')
text_format.Parse(text, message)
self.assertEqual(100, message.repeated_nested_message[0].bb)
self.assertEqual(200, message.repeated_nested_message[1].bb)
self.assertEqual(300, message.repeated_nested_message[2].bb)
self.assertEqual(400, message.repeated_nested_message[3].bb)
def testParseEmptyText(self, message_module):
message = message_module.TestAllTypes()
text = ''
text_format.Parse(text, message)
self.assertEqual(message_module.TestAllTypes(), message)
def testParseInvalidUtf8(self, message_module):
message = message_module.TestAllTypes()
text = 'repeated_string: "\\xc3\\xc3"'
self.assertRaises(text_format.ParseError, text_format.Parse, text, message)
def testParseSingleWord(self, message_module):
message = message_module.TestAllTypes()
text = 'foo'
six.assertRaisesRegex(self, text_format.ParseError, (
r'1:1 : Message type "\w+.TestAllTypes" has no field named '
r'"foo".'), text_format.Parse, text, message)
def testParseUnknownField(self, message_module):
message = message_module.TestAllTypes()
text = 'unknown_field: 8\n'
six.assertRaisesRegex(self, text_format.ParseError, (
r'1:1 : Message type "\w+.TestAllTypes" has no field named '
r'"unknown_field".'), text_format.Parse, text, message)
def testParseBadEnumValue(self, message_module):
message = message_module.TestAllTypes()
text = 'optional_nested_enum: BARR'
six.assertRaisesRegex(self, text_format.ParseError,
(r'1:23 : Enum type "\w+.TestAllTypes.NestedEnum" '
r'has no value named BARR.'), text_format.Parse,
text, message)
message = message_module.TestAllTypes()
text = 'optional_nested_enum: 100'
six.assertRaisesRegex(self, text_format.ParseError,
(r'1:23 : Enum type "\w+.TestAllTypes.NestedEnum" '
r'has no value with number 100.'), text_format.Parse,
text, message)
def testParseBadIntValue(self, message_module):
message = message_module.TestAllTypes()
text = 'optional_int32: bork'
six.assertRaisesRegex(self, text_format.ParseError,
('1:17 : Couldn\'t parse integer: bork'),
text_format.Parse, text, message)
def testParseStringFieldUnescape(self, message_module):
message = message_module.TestAllTypes()
text = r'''repeated_string: "\xf\x62"
repeated_string: "\\xf\\x62"
repeated_string: "\\\xf\\\x62"
repeated_string: "\\\\xf\\\\x62"
repeated_string: "\\\\\xf\\\\\x62"
repeated_string: "\x5cx20"'''
text_format.Parse(text, message)
SLASH = '\\'
self.assertEqual('\x0fb', message.repeated_string[0])
self.assertEqual(SLASH + 'xf' + SLASH + 'x62', message.repeated_string[1])
self.assertEqual(SLASH + '\x0f' + SLASH + 'b', message.repeated_string[2])
self.assertEqual(SLASH + SLASH + 'xf' + SLASH + SLASH + 'x62',
message.repeated_string[3])
self.assertEqual(SLASH + SLASH + '\x0f' + SLASH + SLASH + 'b',
message.repeated_string[4])
self.assertEqual(SLASH + 'x20', message.repeated_string[5])
def testMergeDuplicateScalars(self, message_module):
message = message_module.TestAllTypes()
text = ('optional_int32: 42 ' 'optional_int32: 67')
r = text_format.Merge(text, message)
self.assertIs(r, message)
self.assertEqual(67, message.optional_int32)
def testMergeDuplicateNestedMessageScalars(self, message_module):
message = message_module.TestAllTypes()
text = ('optional_nested_message { bb: 1 } '
'optional_nested_message { bb: 2 }')
r = text_format.Merge(text, message)
self.assertTrue(r is message)
self.assertEqual(2, message.optional_nested_message.bb)
def testParseOneof(self, message_module):
m = message_module.TestAllTypes()
m.oneof_uint32 = 11
m2 = message_module.TestAllTypes()
text_format.Parse(text_format.MessageToString(m), m2)
self.assertEqual('oneof_uint32', m2.WhichOneof('oneof_field'))
def testParseMultipleOneof(self, message_module):
m_string = '\n'.join(['oneof_uint32: 11', 'oneof_string: "foo"'])
m2 = message_module.TestAllTypes()
if message_module is unittest_pb2:
with self.assertRaisesRegexp(text_format.ParseError,
' is specified along with field '):
text_format.Parse(m_string, m2)
else:
text_format.Parse(m_string, m2)
self.assertEqual('oneof_string', m2.WhichOneof('oneof_field'))
# These are tests that aren't fundamentally specific to proto2, but are at
# the moment because of differences between the proto2 and proto3 test schemas.
# Ideally the schemas would be made more similar so these tests could pass.
class OnlyWorksWithProto2RightNowTests(TextFormatBase):
def testPrintAllFieldsPointy(self):
message = unittest_pb2.TestAllTypes()
test_util.SetAllFields(message)
self.CompareToGoldenFile(
self.RemoveRedundantZeros(text_format.MessageToString(
message, pointy_brackets=True)),
'text_format_unittest_data_pointy_oneof.txt')
def testParseGolden(self):
golden_text = '\n'.join(self.ReadGolden(
'text_format_unittest_data_oneof_implemented.txt'))
parsed_message = unittest_pb2.TestAllTypes()
r = text_format.Parse(golden_text, parsed_message)
self.assertIs(r, parsed_message)
message = unittest_pb2.TestAllTypes()
test_util.SetAllFields(message)
self.assertEqual(message, parsed_message)
def testPrintAllFields(self):
message = unittest_pb2.TestAllTypes()
test_util.SetAllFields(message)
self.CompareToGoldenFile(
self.RemoveRedundantZeros(text_format.MessageToString(message)),
'text_format_unittest_data_oneof_implemented.txt')
def testPrintInIndexOrder(self):
message = unittest_pb2.TestFieldOrderings()
message.my_string = '115'
message.my_int = 101
message.my_float = 111
message.optional_nested_message.oo = 0
message.optional_nested_message.bb = 1
self.CompareToGoldenText(
self.RemoveRedundantZeros(text_format.MessageToString(
message, use_index_order=True)),
'my_string: \"115\"\nmy_int: 101\nmy_float: 111\n'
'optional_nested_message {\n oo: 0\n bb: 1\n}\n')
self.CompareToGoldenText(
self.RemoveRedundantZeros(text_format.MessageToString(message)),
'my_int: 101\nmy_string: \"115\"\nmy_float: 111\n'
'optional_nested_message {\n bb: 1\n oo: 0\n}\n')
def testMergeLinesGolden(self):
opened = self.ReadGolden('text_format_unittest_data_oneof_implemented.txt')
parsed_message = unittest_pb2.TestAllTypes()
r = text_format.MergeLines(opened, parsed_message)
self.assertIs(r, parsed_message)
message = unittest_pb2.TestAllTypes()
test_util.SetAllFields(message)
self.assertEqual(message, parsed_message)
def testParseLinesGolden(self):
opened = self.ReadGolden('text_format_unittest_data_oneof_implemented.txt')
parsed_message = unittest_pb2.TestAllTypes()
r = text_format.ParseLines(opened, parsed_message)
self.assertIs(r, parsed_message)
message = unittest_pb2.TestAllTypes()
test_util.SetAllFields(message)
self.assertEqual(message, parsed_message)
def testPrintMap(self):
message = map_unittest_pb2.TestMap()
message.map_int32_int32[-123] = -456
message.map_int64_int64[-2**33] = -2**34
message.map_uint32_uint32[123] = 456
message.map_uint64_uint64[2**33] = 2**34
message.map_string_string['abc'] = '123'
message.map_int32_foreign_message[111].c = 5
# Maps are serialized to text format using their underlying repeated
# representation.
self.CompareToGoldenText(
text_format.MessageToString(message), 'map_int32_int32 {\n'
' key: -123\n'
' value: -456\n'
'}\n'
'map_int64_int64 {\n'
' key: -8589934592\n'
' value: -17179869184\n'
'}\n'
'map_uint32_uint32 {\n'
' key: 123\n'
' value: 456\n'
'}\n'
'map_uint64_uint64 {\n'
' key: 8589934592\n'
' value: 17179869184\n'
'}\n'
'map_string_string {\n'
' key: "abc"\n'
' value: "123"\n'
'}\n'
'map_int32_foreign_message {\n'
' key: 111\n'
' value {\n'
' c: 5\n'
' }\n'
'}\n')
def testMapOrderEnforcement(self):
message = map_unittest_pb2.TestMap()
for letter in string.ascii_uppercase[13:26]:
message.map_string_string[letter] = 'dummy'
for letter in reversed(string.ascii_uppercase[0:13]):
message.map_string_string[letter] = 'dummy'
golden = ''.join(('map_string_string {\n key: "%c"\n value: "dummy"\n}\n'
% (letter,) for letter in string.ascii_uppercase))
self.CompareToGoldenText(text_format.MessageToString(message), golden)
# TODO(teboring): In c/137553523, not serializing default value for map entry
# message has been fixed. This test needs to be disabled in order to submit
# that cl. Add this back when c/137553523 has been submitted.
# def testMapOrderSemantics(self):
# golden_lines = self.ReadGolden('map_test_data.txt')
# message = map_unittest_pb2.TestMap()
# text_format.ParseLines(golden_lines, message)
# candidate = text_format.MessageToString(message)
# # The Python implementation emits "1.0" for the double value that the C++
# # implementation emits as "1".
# candidate = candidate.replace('1.0', '1', 2)
# candidate = candidate.replace('0.0', '0', 2)
# self.assertMultiLineEqual(candidate, ''.join(golden_lines))
# Tests of proto2-only features (MessageSet, extensions, etc.).
class Proto2Tests(TextFormatBase):
def testPrintMessageSet(self):
message = unittest_mset_pb2.TestMessageSetContainer()
ext1 = unittest_mset_pb2.TestMessageSetExtension1.message_set_extension
ext2 = unittest_mset_pb2.TestMessageSetExtension2.message_set_extension
message.message_set.Extensions[ext1].i = 23
message.message_set.Extensions[ext2].str = 'foo'
self.CompareToGoldenText(
text_format.MessageToString(message), 'message_set {\n'
' [protobuf_unittest.TestMessageSetExtension1] {\n'
' i: 23\n'
' }\n'
' [protobuf_unittest.TestMessageSetExtension2] {\n'
' str: \"foo\"\n'
' }\n'
'}\n')
message = message_set_extensions_pb2.TestMessageSet()
ext = message_set_extensions_pb2.message_set_extension3
message.Extensions[ext].text = 'bar'
self.CompareToGoldenText(
text_format.MessageToString(message),
'[google.protobuf.internal.TestMessageSetExtension3] {\n'
' text: \"bar\"\n'
'}\n')
def testPrintMessageSetByFieldNumber(self):
out = text_format.TextWriter(False)
message = unittest_mset_pb2.TestMessageSetContainer()
ext1 = unittest_mset_pb2.TestMessageSetExtension1.message_set_extension
ext2 = unittest_mset_pb2.TestMessageSetExtension2.message_set_extension
message.message_set.Extensions[ext1].i = 23
message.message_set.Extensions[ext2].str = 'foo'
text_format.PrintMessage(message, out, use_field_number=True)
self.CompareToGoldenText(out.getvalue(), '1 {\n'
' 1545008 {\n'
' 15: 23\n'
' }\n'
' 1547769 {\n'
' 25: \"foo\"\n'
' }\n'
'}\n')
out.close()
def testPrintMessageSetAsOneLine(self):
message = unittest_mset_pb2.TestMessageSetContainer()
ext1 = unittest_mset_pb2.TestMessageSetExtension1.message_set_extension
ext2 = unittest_mset_pb2.TestMessageSetExtension2.message_set_extension
message.message_set.Extensions[ext1].i = 23
message.message_set.Extensions[ext2].str = 'foo'
self.CompareToGoldenText(
text_format.MessageToString(message, as_one_line=True),
'message_set {'
' [protobuf_unittest.TestMessageSetExtension1] {'
' i: 23'
' }'
' [protobuf_unittest.TestMessageSetExtension2] {'
' str: \"foo\"'
' }'
' }')
def testParseMessageSet(self):
message = unittest_pb2.TestAllTypes()
text = ('repeated_uint64: 1\n' 'repeated_uint64: 2\n')
text_format.Parse(text, message)
self.assertEqual(1, message.repeated_uint64[0])
self.assertEqual(2, message.repeated_uint64[1])
message = unittest_mset_pb2.TestMessageSetContainer()
text = ('message_set {\n'
' [protobuf_unittest.TestMessageSetExtension1] {\n'
' i: 23\n'
' }\n'
' [protobuf_unittest.TestMessageSetExtension2] {\n'
' str: \"foo\"\n'
' }\n'
'}\n')
text_format.Parse(text, message)
ext1 = unittest_mset_pb2.TestMessageSetExtension1.message_set_extension
ext2 = unittest_mset_pb2.TestMessageSetExtension2.message_set_extension
self.assertEqual(23, message.message_set.Extensions[ext1].i)
self.assertEqual('foo', message.message_set.Extensions[ext2].str)
def testExtensionInsideAnyMessage(self):
message = test_extend_any.TestAny()
text = ('value {\n'
' [type.googleapis.com/google.protobuf.internal.TestAny] {\n'
' [google.protobuf.internal.TestAnyExtension1.extension1] {\n'
' i: 10\n'
' }\n'
' }\n'
'}\n')
text_format.Merge(text, message, descriptor_pool=descriptor_pool.Default())
self.CompareToGoldenText(
text_format.MessageToString(
message, descriptor_pool=descriptor_pool.Default()),
text)
def testParseMessageByFieldNumber(self):
message = unittest_pb2.TestAllTypes()
text = ('34: 1\n' 'repeated_uint64: 2\n')
text_format.Parse(text, message, allow_field_number=True)
self.assertEqual(1, message.repeated_uint64[0])
self.assertEqual(2, message.repeated_uint64[1])
message = unittest_mset_pb2.TestMessageSetContainer()
text = ('1 {\n'
' 1545008 {\n'
' 15: 23\n'
' }\n'
' 1547769 {\n'
' 25: \"foo\"\n'
' }\n'
'}\n')
text_format.Parse(text, message, allow_field_number=True)
ext1 = unittest_mset_pb2.TestMessageSetExtension1.message_set_extension
ext2 = unittest_mset_pb2.TestMessageSetExtension2.message_set_extension
self.assertEqual(23, message.message_set.Extensions[ext1].i)
self.assertEqual('foo', message.message_set.Extensions[ext2].str)
# Can't parse field number without set allow_field_number=True.
message = unittest_pb2.TestAllTypes()
text = '34:1\n'
six.assertRaisesRegex(self, text_format.ParseError, (
r'1:1 : Message type "\w+.TestAllTypes" has no field named '
r'"34".'), text_format.Parse, text, message)
# Can't parse if field number is not found.
text = '1234:1\n'
six.assertRaisesRegex(
self,
text_format.ParseError,
(r'1:1 : Message type "\w+.TestAllTypes" has no field named '
r'"1234".'),
text_format.Parse,
text,
message,
allow_field_number=True)
def testPrintAllExtensions(self):
message = unittest_pb2.TestAllExtensions()
test_util.SetAllExtensions(message)
self.CompareToGoldenFile(
self.RemoveRedundantZeros(text_format.MessageToString(message)),
'text_format_unittest_extensions_data.txt')
def testPrintAllExtensionsPointy(self):
message = unittest_pb2.TestAllExtensions()
test_util.SetAllExtensions(message)
self.CompareToGoldenFile(
self.RemoveRedundantZeros(text_format.MessageToString(
message, pointy_brackets=True)),
'text_format_unittest_extensions_data_pointy.txt')
def testParseGoldenExtensions(self):
golden_text = '\n'.join(self.ReadGolden(
'text_format_unittest_extensions_data.txt'))
parsed_message = unittest_pb2.TestAllExtensions()
text_format.Parse(golden_text, parsed_message)
message = unittest_pb2.TestAllExtensions()
test_util.SetAllExtensions(message)
self.assertEqual(message, parsed_message)
def testParseAllExtensions(self):
message = unittest_pb2.TestAllExtensions()
test_util.SetAllExtensions(message)
ascii_text = text_format.MessageToString(message)
parsed_message = unittest_pb2.TestAllExtensions()
text_format.Parse(ascii_text, parsed_message)
self.assertEqual(message, parsed_message)
def testParseAllowedUnknownExtension(self):
# Skip over unknown extension correctly.
message = unittest_mset_pb2.TestMessageSetContainer()
text = ('message_set {\n'
' [unknown_extension] {\n'
' i: 23\n'
' bin: "\xe0"'
' [nested_unknown_ext]: {\n'
' i: 23\n'
' test: "test_string"\n'
' floaty_float: -0.315\n'
' num: -inf\n'
' multiline_str: "abc"\n'
' "def"\n'
' "xyz."\n'
' [nested_unknown_ext]: <\n'
' i: 23\n'
' i: 24\n'
' pointfloat: .3\n'
' test: "test_string"\n'
' floaty_float: -0.315\n'
' num: -inf\n'
' long_string: "test" "test2" \n'
' >\n'
' }\n'
' }\n'
' [unknown_extension]: 5\n'
'}\n')
text_format.Parse(text, message, allow_unknown_extension=True)
golden = 'message_set {\n}\n'
self.CompareToGoldenText(text_format.MessageToString(message), golden)
# Catch parse errors in unknown extension.
message = unittest_mset_pb2.TestMessageSetContainer()
malformed = ('message_set {\n'
' [unknown_extension] {\n'
' i:\n' # Missing value.
' }\n'
'}\n')
six.assertRaisesRegex(self,
text_format.ParseError,
'Invalid field value: }',
text_format.Parse,
malformed,
message,
allow_unknown_extension=True)
message = unittest_mset_pb2.TestMessageSetContainer()
malformed = ('message_set {\n'
' [unknown_extension] {\n'
' str: "malformed string\n' # Missing closing quote.
' }\n'
'}\n')
six.assertRaisesRegex(self,
text_format.ParseError,
'Invalid field value: "',
text_format.Parse,
malformed,
message,
allow_unknown_extension=True)
message = unittest_mset_pb2.TestMessageSetContainer()
malformed = ('message_set {\n'
' [unknown_extension] {\n'
' str: "malformed\n multiline\n string\n'
' }\n'
'}\n')
six.assertRaisesRegex(self,
text_format.ParseError,
'Invalid field value: "',
text_format.Parse,
malformed,
message,
allow_unknown_extension=True)
message = unittest_mset_pb2.TestMessageSetContainer()
malformed = ('message_set {\n'
' [malformed_extension] <\n'
' i: -5\n'
' \n' # Missing '>' here.
'}\n')
six.assertRaisesRegex(self,
text_format.ParseError,
'5:1 : Expected ">".',
text_format.Parse,
malformed,
message,
allow_unknown_extension=True)
# Don't allow unknown fields with allow_unknown_extension=True.
message = unittest_mset_pb2.TestMessageSetContainer()
malformed = ('message_set {\n'
' unknown_field: true\n'
' \n' # Missing '>' here.
'}\n')
six.assertRaisesRegex(self,
text_format.ParseError,
('2:3 : Message type '
'"proto2_wireformat_unittest.TestMessageSet" has no'
' field named "unknown_field".'),
text_format.Parse,
malformed,
message,
allow_unknown_extension=True)
# Parse known extension correcty.
message = unittest_mset_pb2.TestMessageSetContainer()
text = ('message_set {\n'
' [protobuf_unittest.TestMessageSetExtension1] {\n'
' i: 23\n'
' }\n'
' [protobuf_unittest.TestMessageSetExtension2] {\n'
' str: \"foo\"\n'
' }\n'
'}\n')
text_format.Parse(text, message, allow_unknown_extension=True)
ext1 = unittest_mset_pb2.TestMessageSetExtension1.message_set_extension
ext2 = unittest_mset_pb2.TestMessageSetExtension2.message_set_extension
self.assertEqual(23, message.message_set.Extensions[ext1].i)
self.assertEqual('foo', message.message_set.Extensions[ext2].str)
def testParseBadExtension(self):
message = unittest_pb2.TestAllExtensions()
text = '[unknown_extension]: 8\n'
six.assertRaisesRegex(self, text_format.ParseError,
'1:2 : Extension "unknown_extension" not registered.',
text_format.Parse, text, message)
message = unittest_pb2.TestAllTypes()
six.assertRaisesRegex(self, text_format.ParseError, (
'1:2 : Message type "protobuf_unittest.TestAllTypes" does not have '
'extensions.'), text_format.Parse, text, message)
def testMergeDuplicateExtensionScalars(self):
message = unittest_pb2.TestAllExtensions()
text = ('[protobuf_unittest.optional_int32_extension]: 42 '
'[protobuf_unittest.optional_int32_extension]: 67')
text_format.Merge(text, message)
self.assertEqual(67,
message.Extensions[unittest_pb2.optional_int32_extension])
def testParseDuplicateExtensionScalars(self):
message = unittest_pb2.TestAllExtensions()
text = ('[protobuf_unittest.optional_int32_extension]: 42 '
'[protobuf_unittest.optional_int32_extension]: 67')
six.assertRaisesRegex(self, text_format.ParseError, (
'1:96 : Message type "protobuf_unittest.TestAllExtensions" '
'should not have multiple '
'"protobuf_unittest.optional_int32_extension" extensions.'),
text_format.Parse, text, message)
def testParseDuplicateNestedMessageScalars(self):
message = unittest_pb2.TestAllTypes()
text = ('optional_nested_message { bb: 1 } '
'optional_nested_message { bb: 2 }')
six.assertRaisesRegex(self, text_format.ParseError, (
'1:65 : Message type "protobuf_unittest.TestAllTypes.NestedMessage" '
'should not have multiple "bb" fields.'), text_format.Parse, text,
message)
def testParseDuplicateScalars(self):
message = unittest_pb2.TestAllTypes()
text = ('optional_int32: 42 ' 'optional_int32: 67')
six.assertRaisesRegex(self, text_format.ParseError, (
'1:36 : Message type "protobuf_unittest.TestAllTypes" should not '
'have multiple "optional_int32" fields.'), text_format.Parse, text,
message)
def testParseGroupNotClosed(self):
message = unittest_pb2.TestAllTypes()
text = 'RepeatedGroup: <'
six.assertRaisesRegex(self, text_format.ParseError, '1:16 : Expected ">".',
text_format.Parse, text, message)
text = 'RepeatedGroup: {'
six.assertRaisesRegex(self, text_format.ParseError, '1:16 : Expected "}".',
text_format.Parse, text, message)
def testParseEmptyGroup(self):
message = unittest_pb2.TestAllTypes()
text = 'OptionalGroup: {}'
text_format.Parse(text, message)
self.assertTrue(message.HasField('optionalgroup'))
message.Clear()
message = unittest_pb2.TestAllTypes()
text = 'OptionalGroup: <>'
text_format.Parse(text, message)
self.assertTrue(message.HasField('optionalgroup'))
# Maps aren't really proto2-only, but our test schema only has maps for
# proto2.
def testParseMap(self):
text = ('map_int32_int32 {\n'
' key: -123\n'
' value: -456\n'
'}\n'
'map_int64_int64 {\n'
' key: -8589934592\n'
' value: -17179869184\n'
'}\n'
'map_uint32_uint32 {\n'
' key: 123\n'
' value: 456\n'
'}\n'
'map_uint64_uint64 {\n'
' key: 8589934592\n'
' value: 17179869184\n'
'}\n'
'map_string_string {\n'
' key: "abc"\n'
' value: "123"\n'
'}\n'
'map_int32_foreign_message {\n'
' key: 111\n'
' value {\n'
' c: 5\n'
' }\n'
'}\n')
message = map_unittest_pb2.TestMap()
text_format.Parse(text, message)
self.assertEqual(-456, message.map_int32_int32[-123])
self.assertEqual(-2**34, message.map_int64_int64[-2**33])
self.assertEqual(456, message.map_uint32_uint32[123])
self.assertEqual(2**34, message.map_uint64_uint64[2**33])
self.assertEqual('123', message.map_string_string['abc'])
self.assertEqual(5, message.map_int32_foreign_message[111].c)
class Proto3Tests(unittest.TestCase):
def testPrintMessageExpandAny(self):
packed_message = unittest_pb2.OneString()
packed_message.data = 'string'
message = any_test_pb2.TestAny()
message.any_value.Pack(packed_message)
self.assertEqual(
text_format.MessageToString(message,
descriptor_pool=descriptor_pool.Default()),
'any_value {\n'
' [type.googleapis.com/protobuf_unittest.OneString] {\n'
' data: "string"\n'
' }\n'
'}\n')
def testPrintMessageExpandAnyRepeated(self):
packed_message = unittest_pb2.OneString()
message = any_test_pb2.TestAny()
packed_message.data = 'string0'
message.repeated_any_value.add().Pack(packed_message)
packed_message.data = 'string1'
message.repeated_any_value.add().Pack(packed_message)
self.assertEqual(
text_format.MessageToString(message,
descriptor_pool=descriptor_pool.Default()),
'repeated_any_value {\n'
' [type.googleapis.com/protobuf_unittest.OneString] {\n'
' data: "string0"\n'
' }\n'
'}\n'
'repeated_any_value {\n'
' [type.googleapis.com/protobuf_unittest.OneString] {\n'
' data: "string1"\n'
' }\n'
'}\n')
def testPrintMessageExpandAnyNoDescriptorPool(self):
packed_message = unittest_pb2.OneString()
packed_message.data = 'string'
message = any_test_pb2.TestAny()
message.any_value.Pack(packed_message)
self.assertEqual(
text_format.MessageToString(message, descriptor_pool=None),
'any_value {\n'
' type_url: "type.googleapis.com/protobuf_unittest.OneString"\n'
' value: "\\n\\006string"\n'
'}\n')
def testPrintMessageExpandAnyDescriptorPoolMissingType(self):
packed_message = unittest_pb2.OneString()
packed_message.data = 'string'
message = any_test_pb2.TestAny()
message.any_value.Pack(packed_message)
empty_pool = descriptor_pool.DescriptorPool()
self.assertEqual(
text_format.MessageToString(message, descriptor_pool=empty_pool),
'any_value {\n'
' type_url: "type.googleapis.com/protobuf_unittest.OneString"\n'
' value: "\\n\\006string"\n'
'}\n')
def testPrintMessageExpandAnyPointyBrackets(self):
packed_message = unittest_pb2.OneString()
packed_message.data = 'string'
message = any_test_pb2.TestAny()
message.any_value.Pack(packed_message)
self.assertEqual(
text_format.MessageToString(message,
pointy_brackets=True,
descriptor_pool=descriptor_pool.Default()),
'any_value <\n'
' [type.googleapis.com/protobuf_unittest.OneString] <\n'
' data: "string"\n'
' >\n'
'>\n')
def testPrintMessageExpandAnyAsOneLine(self):
packed_message = unittest_pb2.OneString()
packed_message.data = 'string'
message = any_test_pb2.TestAny()
message.any_value.Pack(packed_message)
self.assertEqual(
text_format.MessageToString(message,
as_one_line=True,
descriptor_pool=descriptor_pool.Default()),
'any_value {'
' [type.googleapis.com/protobuf_unittest.OneString]'
' { data: "string" } '
'}')
def testPrintMessageExpandAnyAsOneLinePointyBrackets(self):
packed_message = unittest_pb2.OneString()
packed_message.data = 'string'
message = any_test_pb2.TestAny()
message.any_value.Pack(packed_message)
self.assertEqual(
text_format.MessageToString(message,
as_one_line=True,
pointy_brackets=True,
descriptor_pool=descriptor_pool.Default()),
'any_value <'
' [type.googleapis.com/protobuf_unittest.OneString]'
' < data: "string" > '
'>')
def testMergeExpandedAny(self):
message = any_test_pb2.TestAny()
text = ('any_value {\n'
' [type.googleapis.com/protobuf_unittest.OneString] {\n'
' data: "string"\n'
' }\n'
'}\n')
text_format.Merge(text, message, descriptor_pool=descriptor_pool.Default())
packed_message = unittest_pb2.OneString()
message.any_value.Unpack(packed_message)
self.assertEqual('string', packed_message.data)
message.Clear()
text_format.Parse(text, message, descriptor_pool=descriptor_pool.Default())
packed_message = unittest_pb2.OneString()
message.any_value.Unpack(packed_message)
self.assertEqual('string', packed_message.data)
def testMergeExpandedAnyRepeated(self):
message = any_test_pb2.TestAny()
text = ('repeated_any_value {\n'
' [type.googleapis.com/protobuf_unittest.OneString] {\n'
' data: "string0"\n'
' }\n'
'}\n'
'repeated_any_value {\n'
' [type.googleapis.com/protobuf_unittest.OneString] {\n'
' data: "string1"\n'
' }\n'
'}\n')
text_format.Merge(text, message, descriptor_pool=descriptor_pool.Default())
packed_message = unittest_pb2.OneString()
message.repeated_any_value[0].Unpack(packed_message)
self.assertEqual('string0', packed_message.data)
message.repeated_any_value[1].Unpack(packed_message)
self.assertEqual('string1', packed_message.data)
def testMergeExpandedAnyPointyBrackets(self):
message = any_test_pb2.TestAny()
text = ('any_value {\n'
' [type.googleapis.com/protobuf_unittest.OneString] <\n'
' data: "string"\n'
' >\n'
'}\n')
text_format.Merge(text, message, descriptor_pool=descriptor_pool.Default())
packed_message = unittest_pb2.OneString()
message.any_value.Unpack(packed_message)
self.assertEqual('string', packed_message.data)
def testMergeExpandedAnyNoDescriptorPool(self):
message = any_test_pb2.TestAny()
text = ('any_value {\n'
' [type.googleapis.com/protobuf_unittest.OneString] {\n'
' data: "string"\n'
' }\n'
'}\n')
with self.assertRaises(text_format.ParseError) as e:
text_format.Merge(text, message, descriptor_pool=None)
self.assertEqual(str(e.exception),
'Descriptor pool required to parse expanded Any field')
def testMergeExpandedAnyDescriptorPoolMissingType(self):
message = any_test_pb2.TestAny()
text = ('any_value {\n'
' [type.googleapis.com/protobuf_unittest.OneString] {\n'
' data: "string"\n'
' }\n'
'}\n')
with self.assertRaises(text_format.ParseError) as e:
empty_pool = descriptor_pool.DescriptorPool()
text_format.Merge(text, message, descriptor_pool=empty_pool)
self.assertEqual(
str(e.exception),
'Type protobuf_unittest.OneString not found in descriptor pool')
def testMergeUnexpandedAny(self):
text = ('any_value {\n'
' type_url: "type.googleapis.com/protobuf_unittest.OneString"\n'
' value: "\\n\\006string"\n'
'}\n')
message = any_test_pb2.TestAny()
text_format.Merge(text, message)
packed_message = unittest_pb2.OneString()
message.any_value.Unpack(packed_message)
self.assertEqual('string', packed_message.data)
class TokenizerTest(unittest.TestCase):
def testSimpleTokenCases(self):
text = ('identifier1:"string1"\n \n\n'
'identifier2 : \n \n123 \n identifier3 :\'string\'\n'
'identifiER_4 : 1.1e+2 ID5:-0.23 ID6:\'aaaa\\\'bbbb\'\n'
'ID7 : "aa\\"bb"\n\n\n\n ID8: {A:inf B:-inf C:true D:false}\n'
'ID9: 22 ID10: -111111111111111111 ID11: -22\n'
'ID12: 2222222222222222222 ID13: 1.23456f ID14: 1.2e+2f '
'false_bool: 0 true_BOOL:t \n true_bool1: 1 false_BOOL1:f '
'False_bool: False True_bool: True')
tokenizer = text_format.Tokenizer(text.splitlines())
methods = [(tokenizer.ConsumeIdentifier, 'identifier1'), ':',
(tokenizer.ConsumeString, 'string1'),
(tokenizer.ConsumeIdentifier, 'identifier2'), ':',
(tokenizer.ConsumeInteger, 123),
(tokenizer.ConsumeIdentifier, 'identifier3'), ':',
(tokenizer.ConsumeString, 'string'),
(tokenizer.ConsumeIdentifier, 'identifiER_4'), ':',
(tokenizer.ConsumeFloat, 1.1e+2),
(tokenizer.ConsumeIdentifier, 'ID5'), ':',
(tokenizer.ConsumeFloat, -0.23),
(tokenizer.ConsumeIdentifier, 'ID6'), ':',
(tokenizer.ConsumeString, 'aaaa\'bbbb'),
(tokenizer.ConsumeIdentifier, 'ID7'), ':',
(tokenizer.ConsumeString, 'aa\"bb'),
(tokenizer.ConsumeIdentifier, 'ID8'), ':', '{',
(tokenizer.ConsumeIdentifier, 'A'), ':',
(tokenizer.ConsumeFloat, float('inf')),
(tokenizer.ConsumeIdentifier, 'B'), ':',
(tokenizer.ConsumeFloat, -float('inf')),
(tokenizer.ConsumeIdentifier, 'C'), ':',
(tokenizer.ConsumeBool, True),
(tokenizer.ConsumeIdentifier, 'D'), ':',
(tokenizer.ConsumeBool, False), '}',
(tokenizer.ConsumeIdentifier, 'ID9'), ':',
(tokenizer.ConsumeInteger, 22),
(tokenizer.ConsumeIdentifier, 'ID10'), ':',
(tokenizer.ConsumeInteger, -111111111111111111),
(tokenizer.ConsumeIdentifier, 'ID11'), ':',
(tokenizer.ConsumeInteger, -22),
(tokenizer.ConsumeIdentifier, 'ID12'), ':',
(tokenizer.ConsumeInteger, 2222222222222222222),
(tokenizer.ConsumeIdentifier, 'ID13'), ':',
(tokenizer.ConsumeFloat, 1.23456),
(tokenizer.ConsumeIdentifier, 'ID14'), ':',
(tokenizer.ConsumeFloat, 1.2e+2),
(tokenizer.ConsumeIdentifier, 'false_bool'), ':',
(tokenizer.ConsumeBool, False),
(tokenizer.ConsumeIdentifier, 'true_BOOL'), ':',
(tokenizer.ConsumeBool, True),
(tokenizer.ConsumeIdentifier, 'true_bool1'), ':',
(tokenizer.ConsumeBool, True),
(tokenizer.ConsumeIdentifier, 'false_BOOL1'), ':',
(tokenizer.ConsumeBool, False),
(tokenizer.ConsumeIdentifier, 'False_bool'), ':',
(tokenizer.ConsumeBool, False),
(tokenizer.ConsumeIdentifier, 'True_bool'), ':',
(tokenizer.ConsumeBool, True)]
i = 0
while not tokenizer.AtEnd():
m = methods[i]
if isinstance(m, str):
token = tokenizer.token
self.assertEqual(token, m)
tokenizer.NextToken()
else:
self.assertEqual(m[1], m[0]())
i += 1
def testConsumeAbstractIntegers(self):
# This test only tests the failures in the integer parsing methods as well
# as the '0' special cases.
int64_max = (1 << 63) - 1
uint32_max = (1 << 32) - 1
text = '-1 %d %d' % (uint32_max + 1, int64_max + 1)
tokenizer = text_format.Tokenizer(text.splitlines())
self.assertEqual(-1, tokenizer.ConsumeInteger())
self.assertEqual(uint32_max + 1, tokenizer.ConsumeInteger())
self.assertEqual(int64_max + 1, tokenizer.ConsumeInteger())
self.assertTrue(tokenizer.AtEnd())
text = '-0 0'
tokenizer = text_format.Tokenizer(text.splitlines())
self.assertEqual(0, tokenizer.ConsumeInteger())
self.assertEqual(0, tokenizer.ConsumeInteger())
self.assertTrue(tokenizer.AtEnd())
def testConsumeIntegers(self):
# This test only tests the failures in the integer parsing methods as well
# as the '0' special cases.
int64_max = (1 << 63) - 1
uint32_max = (1 << 32) - 1
text = '-1 %d %d' % (uint32_max + 1, int64_max + 1)
tokenizer = text_format.Tokenizer(text.splitlines())
self.assertRaises(text_format.ParseError,
text_format._ConsumeUint32, tokenizer)
self.assertRaises(text_format.ParseError,
text_format._ConsumeUint64, tokenizer)
self.assertEqual(-1, text_format._ConsumeInt32(tokenizer))
self.assertRaises(text_format.ParseError,
text_format._ConsumeUint32, tokenizer)
self.assertRaises(text_format.ParseError,
text_format._ConsumeInt32, tokenizer)
self.assertEqual(uint32_max + 1, text_format._ConsumeInt64(tokenizer))
self.assertRaises(text_format.ParseError,
text_format._ConsumeInt64, tokenizer)
self.assertEqual(int64_max + 1, text_format._ConsumeUint64(tokenizer))
self.assertTrue(tokenizer.AtEnd())
text = '-0 -0 0 0'
tokenizer = text_format.Tokenizer(text.splitlines())
self.assertEqual(0, text_format._ConsumeUint32(tokenizer))
self.assertEqual(0, text_format._ConsumeUint64(tokenizer))
self.assertEqual(0, text_format._ConsumeUint32(tokenizer))
self.assertEqual(0, text_format._ConsumeUint64(tokenizer))
self.assertTrue(tokenizer.AtEnd())
def testConsumeByteString(self):
text = '"string1\''
tokenizer = text_format.Tokenizer(text.splitlines())
self.assertRaises(text_format.ParseError, tokenizer.ConsumeByteString)
text = 'string1"'
tokenizer = text_format.Tokenizer(text.splitlines())
self.assertRaises(text_format.ParseError, tokenizer.ConsumeByteString)
text = '\n"\\xt"'
tokenizer = text_format.Tokenizer(text.splitlines())
self.assertRaises(text_format.ParseError, tokenizer.ConsumeByteString)
text = '\n"\\"'
tokenizer = text_format.Tokenizer(text.splitlines())
self.assertRaises(text_format.ParseError, tokenizer.ConsumeByteString)
text = '\n"\\x"'
tokenizer = text_format.Tokenizer(text.splitlines())
self.assertRaises(text_format.ParseError, tokenizer.ConsumeByteString)
def testConsumeBool(self):
text = 'not-a-bool'
tokenizer = text_format.Tokenizer(text.splitlines())
self.assertRaises(text_format.ParseError, tokenizer.ConsumeBool)
def testSkipComment(self):
tokenizer = text_format.Tokenizer('# some comment'.splitlines())
self.assertTrue(tokenizer.AtEnd())
self.assertRaises(text_format.ParseError, tokenizer.ConsumeComment)
def testConsumeComment(self):
tokenizer = text_format.Tokenizer('# some comment'.splitlines(),
skip_comments=False)
self.assertFalse(tokenizer.AtEnd())
self.assertEqual('# some comment', tokenizer.ConsumeComment())
self.assertTrue(tokenizer.AtEnd())
def testConsumeTwoComments(self):
text = '# some comment\n# another comment'
tokenizer = text_format.Tokenizer(text.splitlines(), skip_comments=False)
self.assertEqual('# some comment', tokenizer.ConsumeComment())
self.assertFalse(tokenizer.AtEnd())
self.assertEqual('# another comment', tokenizer.ConsumeComment())
self.assertTrue(tokenizer.AtEnd())
def testConsumeTrailingComment(self):
text = 'some_number: 4\n# some comment'
tokenizer = text_format.Tokenizer(text.splitlines(), skip_comments=False)
self.assertRaises(text_format.ParseError, tokenizer.ConsumeComment)
self.assertEqual('some_number', tokenizer.ConsumeIdentifier())
self.assertEqual(tokenizer.token, ':')
tokenizer.NextToken()
self.assertRaises(text_format.ParseError, tokenizer.ConsumeComment)
self.assertEqual(4, tokenizer.ConsumeInteger())
self.assertFalse(tokenizer.AtEnd())
self.assertEqual('# some comment', tokenizer.ConsumeComment())
self.assertTrue(tokenizer.AtEnd())
def testConsumeLineComment(self):
tokenizer = text_format.Tokenizer('# some comment'.splitlines(),
skip_comments=False)
self.assertFalse(tokenizer.AtEnd())
self.assertEqual((False, '# some comment'),
tokenizer.ConsumeCommentOrTrailingComment())
self.assertTrue(tokenizer.AtEnd())
def testConsumeTwoLineComments(self):
text = '# some comment\n# another comment'
tokenizer = text_format.Tokenizer(text.splitlines(), skip_comments=False)
self.assertEqual((False, '# some comment'),
tokenizer.ConsumeCommentOrTrailingComment())
self.assertFalse(tokenizer.AtEnd())
self.assertEqual((False, '# another comment'),
tokenizer.ConsumeCommentOrTrailingComment())
self.assertTrue(tokenizer.AtEnd())
def testConsumeAndCheckTrailingComment(self):
text = 'some_number: 4 # some comment' # trailing comment on the same line
tokenizer = text_format.Tokenizer(text.splitlines(), skip_comments=False)
self.assertRaises(text_format.ParseError,
tokenizer.ConsumeCommentOrTrailingComment)
self.assertEqual('some_number', tokenizer.ConsumeIdentifier())
self.assertEqual(tokenizer.token, ':')
tokenizer.NextToken()
self.assertRaises(text_format.ParseError,
tokenizer.ConsumeCommentOrTrailingComment)
self.assertEqual(4, tokenizer.ConsumeInteger())
self.assertFalse(tokenizer.AtEnd())
self.assertEqual((True, '# some comment'),
tokenizer.ConsumeCommentOrTrailingComment())
self.assertTrue(tokenizer.AtEnd())
def testHashinComment(self):
text = 'some_number: 4 # some comment # not a new comment'
tokenizer = text_format.Tokenizer(text.splitlines(), skip_comments=False)
self.assertEqual('some_number', tokenizer.ConsumeIdentifier())
self.assertEqual(tokenizer.token, ':')
tokenizer.NextToken()
self.assertEqual(4, tokenizer.ConsumeInteger())
self.assertEqual((True, '# some comment # not a new comment'),
tokenizer.ConsumeCommentOrTrailingComment())
self.assertTrue(tokenizer.AtEnd())
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
pabulumm/neighbors | lib/python3.4/site-packages/pip/compat/ordereddict.py | 141 | 4110 | # Copyright (c) 2009 Raymond Hettinger
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
# flake8: noqa
from UserDict import DictMixin
class OrderedDict(dict, DictMixin):
def __init__(self, *args, **kwds):
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__end
except AttributeError:
self.clear()
self.update(*args, **kwds)
def clear(self):
self.__end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.__map = {} # key --> [key, prev, next]
dict.clear(self)
def __setitem__(self, key, value):
if key not in self:
end = self.__end
curr = end[1]
curr[2] = end[1] = self.__map[key] = [key, curr, end]
dict.__setitem__(self, key, value)
def __delitem__(self, key):
dict.__delitem__(self, key)
key, prev, next = self.__map.pop(key)
prev[2] = next
next[1] = prev
def __iter__(self):
end = self.__end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.__end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
def popitem(self, last=True):
if not self:
raise KeyError('dictionary is empty')
if last:
key = reversed(self).next()
else:
key = iter(self).next()
value = self.pop(key)
return key, value
def __reduce__(self):
items = [[k, self[k]] for k in self]
tmp = self.__map, self.__end
del self.__map, self.__end
inst_dict = vars(self).copy()
self.__map, self.__end = tmp
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def keys(self):
return list(self)
setdefault = DictMixin.setdefault
update = DictMixin.update
pop = DictMixin.pop
values = DictMixin.values
items = DictMixin.items
iterkeys = DictMixin.iterkeys
itervalues = DictMixin.itervalues
iteritems = DictMixin.iteritems
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
def copy(self):
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
if isinstance(other, OrderedDict):
if len(self) != len(other):
return False
for p, q in zip(self.items(), other.items()):
if p != q:
return False
return True
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
| bsd-3-clause |
solomanchannel/pluging.video.monstertv | copy_addon.py | 11 | 8140 | import sys
import os
import json
import urllib
import urlparse
import xbmcaddon
import xbmcgui
import xbmcplugin
import load_channels
import hashlib
import re
import time
import server
import config
addon = xbmcaddon.Addon()
addonname = addon.getAddonInfo('name')
addondir = xbmc.translatePath( addon.getAddonInfo('profile') )
base_url = sys.argv[0]
addon_handle = int(sys.argv[1])
args = urlparse.parse_qs(sys.argv[2][1:])
go = True;
#xbmcgui.Dialog().ok(addonname, 'aaa')
xbmcplugin.setContent(addon_handle, 'movies')
def addPortal(portal):
if portal['url'] == '':
return;
url = build_url({
'mode': 'genres',
'portal' : json.dumps(portal)
});
cmd = 'XBMC.RunPlugin(' + base_url + '?mode=cache&stalker_url=' + portal['url'] + ')';
li = xbmcgui.ListItem(portal['name'], iconImage='DefaultProgram.png')
li.addContextMenuItems([ ('Clear Cache', cmd) ]);
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url, listitem=li, isFolder=True);
def build_url(query):
return base_url + '?' + urllib.urlencode(query)
def homeLevel():
global portal_1, portal_2, portal_3, go;
#todo - check none portal
if go:
addPortal(portal_1);
addPortal(portal_2);
addPortal(portal_3);
xbmcplugin.endOfDirectory(addon_handle);
def genreLevel():
try:
data = load_channels.getGenres(portal['mac'], portal['url'], portal['serial'], addondir);
except Exception as e:
xbmcgui.Dialog().notification(addonname, str(e), xbmcgui.NOTIFICATION_ERROR );
return;
data = data['genres'];
url = build_url({
'mode': 'vod',
'portal' : json.dumps(portal)
});
li = xbmcgui.ListItem('VoD', iconImage='DefaultVideo.png')
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url, listitem=li, isFolder=True);
for id, i in data.iteritems():
title = i["title"];
url = build_url({
'mode': 'channels',
'genre_id': id,
'genre_name': title.title(),
'portal' : json.dumps(portal)
});
if id == '10':
iconImage = 'OverlayLocked.png';
else:
iconImage = 'DefaultVideo.png';
li = xbmcgui.ListItem(title.title(), iconImage=iconImage)
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url, listitem=li, isFolder=True);
xbmcplugin.endOfDirectory(addon_handle);
def vodLevel():
try:
data = load_channels.getVoD(portal['mac'], portal['url'], portal['serial'], addondir);
except Exception as e:
xbmcgui.Dialog().notification(addonname, str(e), xbmcgui.NOTIFICATION_ERROR );
return;
data = data['vod'];
for i in data:
name = i["name"];
cmd = i["cmd"];
logo = i["logo"];
if logo != '':
logo_url = portal['url'] + logo;
else:
logo_url = 'DefaultVideo.png';
url = build_url({
'mode': 'play',
'cmd': cmd,
'tmp' : '0',
'title' : name.encode("utf-8"),
'genre_name' : 'VoD',
'logo_url' : logo_url,
'portal' : json.dumps(portal)
});
li = xbmcgui.ListItem(name, iconImage=logo_url, thumbnailImage=logo_url)
li.setInfo(type='Video', infoLabels={ "Title": name })
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url, listitem=li)
xbmcplugin.addSortMethod(addon_handle, xbmcplugin.SORT_METHOD_UNSORTED);
xbmcplugin.addSortMethod(addon_handle, xbmcplugin.SORT_METHOD_TITLE);
xbmcplugin.endOfDirectory(addon_handle);
def channelLevel():
stop=False;
try:
data = load_channels.getAllChannels(portal['mac'], portal['url'], portal['serial'], addondir);
except Exception as e:
xbmcgui.Dialog().notification(addonname, str(e), xbmcgui.NOTIFICATION_ERROR );
return;
data = data['channels'];
genre_name = args.get('genre_name', None);
genre_id_main = args.get('genre_id', None);
genre_id_main = genre_id_main[0];
if genre_id_main == '10' and portal['parental'] == 'true':
result = xbmcgui.Dialog().input('Parental', hashlib.md5(portal['password'].encode('utf-8')).hexdigest(), type=xbmcgui.INPUT_PASSWORD, option=xbmcgui.PASSWORD_VERIFY);
if result == '':
stop = True;
if stop == False:
for i in data.values():
name = i["name"];
cmd = i["cmd"];
tmp = i["tmp"];
number = i["number"];
genre_id = i["genre_id"];
logo = i["logo"];
if genre_id_main == '*' and genre_id == '10' and portal['parental'] == 'true':
continue;
if genre_id_main == genre_id or genre_id_main == '*':
if logo != '':
logo_url = portal['url'] + '/stalker_portal/misc/logos/320/' + logo;
else:
logo_url = 'DefaultVideo.png';
url = build_url({
'mode': 'play',
'cmd': cmd,
'tmp' : tmp,
'title' : name.encode("utf-8"),
'genre_name' : genre_name,
'logo_url' : logo_url,
'portal' : json.dumps(portal)
});
li = xbmcgui.ListItem(name, iconImage=logo_url, thumbnailImage=logo_url);
li.setInfo(type='Video', infoLabels={
'title': name,
'count' : number
});
xbmcplugin.addDirectoryItem(handle=addon_handle, url=url, listitem=li);
xbmcplugin.addSortMethod(addon_handle, xbmcplugin.SORT_METHOD_PLAYLIST_ORDER);
xbmcplugin.addSortMethod(addon_handle, xbmcplugin.SORT_METHOD_TITLE);
xbmcplugin.addSortMethod(addon_handle, xbmcplugin.SORT_METHOD_PROGRAM_COUNT);
xbmcplugin.endOfDirectory(addon_handle);
def playLevel():
dp = xbmcgui.DialogProgressBG();
dp.create('IPTV', 'Loading ...');
title = args['title'][0];
cmd = args['cmd'][0];
tmp = args['tmp'][0];
genre_name = args['genre_name'][0];
logo_url = args['logo_url'][0];
try:
if genre_name != 'VoD':
url = load_channels.retriveUrl(portal['mac'], portal['url'], portal['serial'], cmd, tmp);
else:
url = load_channels.retriveVoD(portal['mac'], portal['url'], portal['serial'], cmd);
except Exception as e:
dp.close();
xbmcgui.Dialog().notification(addonname, str(e), xbmcgui.NOTIFICATION_ERROR );
return;
dp.update(80);
title = title.decode("utf-8");
title += ' (' + portal['name'] + ')';
# li = xbmcgui.ListItem(title, iconImage=logo_url); <modified 9.0.19
li = xbmcgui.ListItem(title, iconImage='DefaultVideo.png', thumbnailImage=logo_url);
li.setInfo('video', {'Title': title, 'Genre': genre_name});
xbmc.Player().play(item=url, listitem=li);
dp.update(100);
dp.close();
mode = args.get('mode', None);
portal = args.get('portal', None)
if portal is None:
portal_1 = config.portalConfig('1');
portal_2 = config.portalConfig('2');
portal_3 = config.portalConfig('3');
else:
portal = json.loads(portal[0]);
# Modification to force outside call to portal_1 (9.0.19)
portal_2 = config.portalConfig('2');
portal_3 = config.portalConfig('3');
if not ( portal['name'] == portal_2['name'] or portal['name'] == portal_3['name'] ) :
portal = config.portalConfig('1');
if mode is None:
homeLevel();
elif mode[0] == 'cache':
stalker_url = args.get('stalker_url', None);
stalker_url = stalker_url[0];
load_channels.clearCache(stalker_url, addondir);
elif mode[0] == 'genres':
genreLevel();
elif mode[0] == 'vod':
vodLevel();
elif mode[0] == 'channels':
channelLevel();
elif mode[0] == 'play':
playLevel();
elif mode[0] == 'server':
port = addon.getSetting('server_port');
action = args.get('action', None);
action = action[0];
dp = xbmcgui.DialogProgressBG();
dp.create('IPTV', 'Working ...');
if action == 'start':
if server.serverOnline():
xbmcgui.Dialog().notification(addonname, 'Server already started.\nPort: ' + str(port), xbmcgui.NOTIFICATION_INFO );
else:
server.startServer();
time.sleep(5);
if server.serverOnline():
xbmcgui.Dialog().notification(addonname, 'Server started.\nPort: ' + str(port), xbmcgui.NOTIFICATION_INFO );
else:
xbmcgui.Dialog().notification(addonname, 'Server not started. Wait one moment and try again. ', xbmcgui.NOTIFICATION_ERROR );
else:
if server.serverOnline():
server.stopServer();
time.sleep(5);
xbmcgui.Dialog().notification(addonname, 'Server stopped.', xbmcgui.NOTIFICATION_INFO );
else:
xbmcgui.Dialog().notification(addonname, 'Server is already stopped.', xbmcgui.NOTIFICATION_INFO );
dp.close();
| gpl-2.0 |
michellemorales/OpenMM | models/ptn/nets/ptn_im_decoder.py | 10 | 2902 | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Image/Mask decoder used while pretraining the network."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
slim = tf.contrib.slim
_FEATURE_MAP_SIZE = 8
def _postprocess_im(images):
"""Performs post-processing for the images returned from conv net.
Transforms the value from [-1, 1] to [0, 1].
"""
return (images + 1) * 0.5
def model(identities, poses, params, is_training):
"""Decoder model to get image and mask from latent embedding."""
del is_training
f_dim = params.f_dim
fc_dim = params.fc_dim
outputs = dict()
with slim.arg_scope(
[slim.fully_connected, slim.conv2d_transpose],
weights_initializer=tf.truncated_normal_initializer(stddev=0.02, seed=1)):
# Concatenate the identity and pose units
h0 = tf.concat([identities, poses], 1)
h0 = slim.fully_connected(h0, fc_dim, activation_fn=tf.nn.relu)
h1 = slim.fully_connected(h0, fc_dim, activation_fn=tf.nn.relu)
# Mask decoder
dec_m0 = slim.fully_connected(
h1, (_FEATURE_MAP_SIZE**2) * f_dim * 2, activation_fn=tf.nn.relu)
dec_m0 = tf.reshape(
dec_m0, [-1, _FEATURE_MAP_SIZE, _FEATURE_MAP_SIZE, f_dim * 2])
dec_m1 = slim.conv2d_transpose(
dec_m0, f_dim, [5, 5], stride=2, activation_fn=tf.nn.relu)
dec_m2 = slim.conv2d_transpose(
dec_m1, int(f_dim / 2), [5, 5], stride=2, activation_fn=tf.nn.relu)
dec_m3 = slim.conv2d_transpose(
dec_m2, 1, [5, 5], stride=2, activation_fn=tf.nn.sigmoid)
# Image decoder
dec_i0 = slim.fully_connected(
h1, (_FEATURE_MAP_SIZE**2) * f_dim * 4, activation_fn=tf.nn.relu)
dec_i0 = tf.reshape(
dec_i0, [-1, _FEATURE_MAP_SIZE, _FEATURE_MAP_SIZE, f_dim * 4])
dec_i1 = slim.conv2d_transpose(
dec_i0, f_dim * 2, [5, 5], stride=2, activation_fn=tf.nn.relu)
dec_i2 = slim.conv2d_transpose(
dec_i1, f_dim * 2, [5, 5], stride=2, activation_fn=tf.nn.relu)
dec_i3 = slim.conv2d_transpose(
dec_i2, 3, [5, 5], stride=2, activation_fn=tf.nn.tanh)
outputs = dict()
outputs['images'] = _postprocess_im(dec_i3)
outputs['masks'] = dec_m3
return outputs
| gpl-2.0 |
JosmanPS/scikit-learn | sklearn/linear_model/tests/test_omp.py | 272 | 7752 | # Author: Vlad Niculae
# Licence: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.linear_model import (orthogonal_mp, orthogonal_mp_gram,
OrthogonalMatchingPursuit,
OrthogonalMatchingPursuitCV,
LinearRegression)
from sklearn.utils import check_random_state
from sklearn.datasets import make_sparse_coded_signal
n_samples, n_features, n_nonzero_coefs, n_targets = 20, 30, 5, 3
y, X, gamma = make_sparse_coded_signal(n_targets, n_features, n_samples,
n_nonzero_coefs, random_state=0)
G, Xy = np.dot(X.T, X), np.dot(X.T, y)
# this makes X (n_samples, n_features)
# and y (n_samples, 3)
def test_correct_shapes():
assert_equal(orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5).shape,
(n_features,))
assert_equal(orthogonal_mp(X, y, n_nonzero_coefs=5).shape,
(n_features, 3))
def test_correct_shapes_gram():
assert_equal(orthogonal_mp_gram(G, Xy[:, 0], n_nonzero_coefs=5).shape,
(n_features,))
assert_equal(orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5).shape,
(n_features, 3))
def test_n_nonzero_coefs():
assert_true(np.count_nonzero(orthogonal_mp(X, y[:, 0],
n_nonzero_coefs=5)) <= 5)
assert_true(np.count_nonzero(orthogonal_mp(X, y[:, 0], n_nonzero_coefs=5,
precompute=True)) <= 5)
def test_tol():
tol = 0.5
gamma = orthogonal_mp(X, y[:, 0], tol=tol)
gamma_gram = orthogonal_mp(X, y[:, 0], tol=tol, precompute=True)
assert_true(np.sum((y[:, 0] - np.dot(X, gamma)) ** 2) <= tol)
assert_true(np.sum((y[:, 0] - np.dot(X, gamma_gram)) ** 2) <= tol)
def test_with_without_gram():
assert_array_almost_equal(
orthogonal_mp(X, y, n_nonzero_coefs=5),
orthogonal_mp(X, y, n_nonzero_coefs=5, precompute=True))
def test_with_without_gram_tol():
assert_array_almost_equal(
orthogonal_mp(X, y, tol=1.),
orthogonal_mp(X, y, tol=1., precompute=True))
def test_unreachable_accuracy():
assert_array_almost_equal(
orthogonal_mp(X, y, tol=0),
orthogonal_mp(X, y, n_nonzero_coefs=n_features))
assert_array_almost_equal(
assert_warns(RuntimeWarning, orthogonal_mp, X, y, tol=0,
precompute=True),
orthogonal_mp(X, y, precompute=True,
n_nonzero_coefs=n_features))
def test_bad_input():
assert_raises(ValueError, orthogonal_mp, X, y, tol=-1)
assert_raises(ValueError, orthogonal_mp, X, y, n_nonzero_coefs=-1)
assert_raises(ValueError, orthogonal_mp, X, y,
n_nonzero_coefs=n_features + 1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy, tol=-1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy, n_nonzero_coefs=-1)
assert_raises(ValueError, orthogonal_mp_gram, G, Xy,
n_nonzero_coefs=n_features + 1)
def test_perfect_signal_recovery():
idx, = gamma[:, 0].nonzero()
gamma_rec = orthogonal_mp(X, y[:, 0], 5)
gamma_gram = orthogonal_mp_gram(G, Xy[:, 0], 5)
assert_array_equal(idx, np.flatnonzero(gamma_rec))
assert_array_equal(idx, np.flatnonzero(gamma_gram))
assert_array_almost_equal(gamma[:, 0], gamma_rec, decimal=2)
assert_array_almost_equal(gamma[:, 0], gamma_gram, decimal=2)
def test_estimator():
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_nonzero_coefs)
omp.fit(X, y[:, 0])
assert_equal(omp.coef_.shape, (n_features,))
assert_equal(omp.intercept_.shape, ())
assert_true(np.count_nonzero(omp.coef_) <= n_nonzero_coefs)
omp.fit(X, y)
assert_equal(omp.coef_.shape, (n_targets, n_features))
assert_equal(omp.intercept_.shape, (n_targets,))
assert_true(np.count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs)
omp.set_params(fit_intercept=False, normalize=False)
omp.fit(X, y[:, 0])
assert_equal(omp.coef_.shape, (n_features,))
assert_equal(omp.intercept_, 0)
assert_true(np.count_nonzero(omp.coef_) <= n_nonzero_coefs)
omp.fit(X, y)
assert_equal(omp.coef_.shape, (n_targets, n_features))
assert_equal(omp.intercept_, 0)
assert_true(np.count_nonzero(omp.coef_) <= n_targets * n_nonzero_coefs)
def test_identical_regressors():
newX = X.copy()
newX[:, 1] = newX[:, 0]
gamma = np.zeros(n_features)
gamma[0] = gamma[1] = 1.
newy = np.dot(newX, gamma)
assert_warns(RuntimeWarning, orthogonal_mp, newX, newy, 2)
def test_swapped_regressors():
gamma = np.zeros(n_features)
# X[:, 21] should be selected first, then X[:, 0] selected second,
# which will take X[:, 21]'s place in case the algorithm does
# column swapping for optimization (which is the case at the moment)
gamma[21] = 1.0
gamma[0] = 0.5
new_y = np.dot(X, gamma)
new_Xy = np.dot(X.T, new_y)
gamma_hat = orthogonal_mp(X, new_y, 2)
gamma_hat_gram = orthogonal_mp_gram(G, new_Xy, 2)
assert_array_equal(np.flatnonzero(gamma_hat), [0, 21])
assert_array_equal(np.flatnonzero(gamma_hat_gram), [0, 21])
def test_no_atoms():
y_empty = np.zeros_like(y)
Xy_empty = np.dot(X.T, y_empty)
gamma_empty = ignore_warnings(orthogonal_mp)(X, y_empty, 1)
gamma_empty_gram = ignore_warnings(orthogonal_mp)(G, Xy_empty, 1)
assert_equal(np.all(gamma_empty == 0), True)
assert_equal(np.all(gamma_empty_gram == 0), True)
def test_omp_path():
path = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=True)
last = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=False)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
path = orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5, return_path=True)
last = orthogonal_mp_gram(G, Xy, n_nonzero_coefs=5, return_path=False)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
def test_omp_return_path_prop_with_gram():
path = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=True,
precompute=True)
last = orthogonal_mp(X, y, n_nonzero_coefs=5, return_path=False,
precompute=True)
assert_equal(path.shape, (n_features, n_targets, 5))
assert_array_almost_equal(path[:, :, -1], last)
def test_omp_cv():
y_ = y[:, 0]
gamma_ = gamma[:, 0]
ompcv = OrthogonalMatchingPursuitCV(normalize=True, fit_intercept=False,
max_iter=10, cv=5)
ompcv.fit(X, y_)
assert_equal(ompcv.n_nonzero_coefs_, n_nonzero_coefs)
assert_array_almost_equal(ompcv.coef_, gamma_)
omp = OrthogonalMatchingPursuit(normalize=True, fit_intercept=False,
n_nonzero_coefs=ompcv.n_nonzero_coefs_)
omp.fit(X, y_)
assert_array_almost_equal(ompcv.coef_, omp.coef_)
def test_omp_reaches_least_squares():
# Use small simple data; it's a sanity check but OMP can stop early
rng = check_random_state(0)
n_samples, n_features = (10, 8)
n_targets = 3
X = rng.randn(n_samples, n_features)
Y = rng.randn(n_samples, n_targets)
omp = OrthogonalMatchingPursuit(n_nonzero_coefs=n_features)
lstsq = LinearRegression()
omp.fit(X, Y)
lstsq.fit(X, Y)
assert_array_almost_equal(omp.coef_, lstsq.coef_)
| bsd-3-clause |
hushaoqing/my_notes | Python/Scrapy/mytest/testScrapyGraphite/middlewares.py | 1 | 1904 | # -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
class MytestSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Response, dict
# or Item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
| mit |
FICTURE7/youtube-dl | youtube_dl/extractor/infoq.py | 92 | 2315 | from __future__ import unicode_literals
import base64
from .common import InfoExtractor
from ..compat import (
compat_urllib_parse_unquote,
compat_urlparse,
)
class InfoQIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?infoq\.com/(?:[^/]+/)+(?P<id>[^/]+)'
_TESTS = [{
'url': 'http://www.infoq.com/presentations/A-Few-of-My-Favorite-Python-Things',
'md5': 'b5ca0e0a8c1fed93b0e65e48e462f9a2',
'info_dict': {
'id': '12-jan-pythonthings',
'ext': 'mp4',
'description': 'Mike Pirnat presents some tips and tricks, standard libraries and third party packages that make programming in Python a richer experience.',
'title': 'A Few of My Favorite [Python] Things',
},
}, {
'url': 'http://www.infoq.com/fr/presentations/changez-avis-sur-javascript',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
video_title = self._html_search_regex(r'<title>(.*?)</title>', webpage, 'title')
video_description = self._html_search_meta('description', webpage, 'description')
# The server URL is hardcoded
video_url = 'rtmpe://video.infoq.com/cfx/st/'
# Extract video URL
encoded_id = self._search_regex(
r"jsclassref\s*=\s*'([^']*)'", webpage, 'encoded id')
real_id = compat_urllib_parse_unquote(base64.b64decode(encoded_id.encode('ascii')).decode('utf-8'))
playpath = 'mp4:' + real_id
video_filename = playpath.split('/')[-1]
video_id, extension = video_filename.split('.')
http_base = self._search_regex(
r'EXPRESSINSTALL_SWF\s*=\s*[^"]*"((?:https?:)?//[^/"]+/)', webpage,
'HTTP base URL')
formats = [{
'format_id': 'rtmp',
'url': video_url,
'ext': extension,
'play_path': playpath,
}, {
'format_id': 'http',
'url': compat_urlparse.urljoin(url, http_base) + real_id,
}]
self._sort_formats(formats)
return {
'id': video_id,
'title': video_title,
'description': video_description,
'formats': formats,
}
| unlicense |
nanolearningllc/edx-platform-cypress-2 | scripts/release.py | 18 | 20023 | #!/usr/bin/env python
"""
a release-master multitool
"""
from __future__ import print_function, unicode_literals
import sys
import argparse
from datetime import date, timedelta
import re
import collections
import functools
import textwrap
import json
import getpass
try:
from path import Path as path
from git import Repo, Commit
from git.refs.symbolic import SymbolicReference
from dateutil.parser import parse as parse_datestring
import requests
import yaml
except ImportError:
print("Error: missing dependencies! Please run this command to install them:")
print("pip install path.py requests python-dateutil GitPython PyYAML")
sys.exit(1)
try:
from pygments.console import colorize
except ImportError:
colorize = lambda color, text: text
JIRA_RE = re.compile(r"\b[A-Z]{2,}-\d+\b")
PR_BRANCH_RE = re.compile(r"remotes/edx/pr/(\d+)")
def project_root():
directory = path(__file__).abspath().dirname()
while not (directory / ".git").exists():
directory = directory.parent
return directory
PROJECT_ROOT = project_root()
repo = Repo(PROJECT_ROOT)
git = repo.git
PEOPLE_YAML = "https://raw.githubusercontent.com/edx/repo-tools-data/master/people.yaml"
class memoized(object):
"""
Decorator. Caches a function's return value each time it is called.
If called later with the same arguments, the cached value is returned
(not reevaluated).
https://wiki.python.org/moin/PythonDecoratorLibrary#Memoize
"""
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args):
if not isinstance(args, collections.Hashable):
# uncacheable. a list, for instance.
# better to not cache than blow up.
return self.func(*args)
if args in self.cache:
return self.cache[args]
else:
value = self.func(*args)
self.cache[args] = value
return value
def __repr__(self):
'''Return the function's docstring.'''
return self.func.__doc__
def __get__(self, obj, objtype):
'''Support instance methods.'''
return functools.partial(self.__call__, obj)
def make_parser():
parser = argparse.ArgumentParser(description="release master multitool")
parser.add_argument(
'--previous', '--prev', '-p', metavar="GITREV", default="edx/release",
help="previous release [%(default)s]")
parser.add_argument(
'--current', '--curr', '-c', metavar="GITREV", default="HEAD",
help="current release candidate [%(default)s]")
parser.add_argument(
'--date', '-d',
help="expected release date: defaults to "
"next Tuesday [{}]".format(default_release_date()))
parser.add_argument(
'--merge', '-m', action="store_true", default=False,
help="include merge commits")
parser.add_argument(
'--table', '-t', action="store_true", default=False,
help="only print table")
return parser
def ensure_pr_fetch():
"""
Make sure that the git repository contains a remote called "edx" that has
two fetch URLs; one for the main codebase, and one for pull requests.
Returns True if the environment was modified in any way, False otherwise.
"""
modified = False
remotes = git.remote().splitlines()
if 'edx' not in remotes:
git.remote("add", "edx", "https://github.com/edx/edx-platform.git")
modified = True
# it would be nice to use the git-python API to do this, but it doesn't seem
# to support configurations with more than one value per key. :(
edx_fetches = git.config("remote.edx.fetch", get_all=True).splitlines()
pr_fetch = '+refs/pull/*/head:refs/remotes/edx/pr/*'
if pr_fetch not in edx_fetches:
git.config("remote.edx.fetch", pr_fetch, add=True)
modified = True
git.fetch("edx")
return modified
def get_github_creds():
"""
Returns Github credentials if they exist, as a two-tuple of (username, token).
Otherwise, return None.
"""
netrc_auth = requests.utils.get_netrc_auth("https://api.github.com")
if netrc_auth:
return netrc_auth
config_file = path("~/.config/edx-release").expand()
if config_file.isfile():
with open(config_file) as f:
config = json.load(f)
github_creds = config.get("credentials", {}).get("api.github.com", {})
username = github_creds.get("username", "")
token = github_creds.get("token", "")
if username and token:
return (username, token)
return None
def create_github_creds():
"""
https://developer.github.com/v3/oauth_authorizations/#create-a-new-authorization
"""
headers = {"User-Agent": "edx-release"}
payload = {
"note": "edx-release",
"scopes": ["repo"],
}
username = raw_input("Github username: ")
password = getpass.getpass("Github password: ")
response = requests.post(
"https://api.github.com/authorizations",
auth=(username, password),
headers=headers, data=json.dumps(payload),
)
# is the user using two-factor authentication?
otp_header = response.headers.get("X-GitHub-OTP")
if not response.ok and otp_header and otp_header.startswith("required;"):
# get two-factor code, redo the request
headers["X-GitHub-OTP"] = raw_input("Two-factor authentication code: ")
response = requests.post(
"https://api.github.com/authorizations",
auth=(username, password),
headers=headers, data=json.dumps(payload),
)
if not response.ok:
message = response.json()["message"]
if message != "Validation Failed":
raise requests.exceptions.RequestException(message)
else:
# A token called "edx-release" already exists on Github.
# Delete it, and try again.
token_id = get_github_auth_id(username, password, "edx-release")
if token_id:
delete_github_auth_token(username, password, token_id)
response = requests.post(
"https://api.github.com/authorizations",
auth=(username, password),
headers=headers, data=json.dumps(payload),
)
if not response.ok:
message = response.json()["message"]
raise requests.exceptions.RequestException(message)
return (username, response.json()["token"])
def get_github_auth_id(username, password, note):
"""
Return the ID associated with the Github auth token with the given note.
If no such auth token exists, return None.
"""
response = requests.get(
"https://api.github.com/authorizations",
auth=(username, password),
headers={"User-Agent": "edx-release"},
)
if not response.ok:
message = response.json()["message"]
raise requests.exceptions.RequestException(message)
for auth_token in response.json():
if auth_token["note"] == "edx-release":
return auth_token["id"]
return None
def delete_github_auth_token(username, password, token_id):
response = requests.delete(
"https://api.github.com/authorizations/{id}".format(id=token_id),
auth=(username, password),
headers={"User-Agent": "edx-release"},
)
if not response.ok:
message = response.json()["message"]
raise requests.exceptions.RequestException(message)
def ensure_github_creds(attempts=3):
"""
Make sure that we have Github OAuth credentials. This will check the user's
.netrc file, as well as the ~/.config/edx-release file. If no credentials
exist in either place, it will prompt the user to create OAuth credentials,
and store them in ~/.config/edx-release.
Returns False if we found credentials, True if we had to create them.
"""
if get_github_creds():
return False
# Looks like we need to create the OAuth creds
print("We need to set up OAuth authentication with Github's API. "
"Your password will not be stored.", file=sys.stderr)
token = None
for _ in range(attempts):
try:
username, token = create_github_creds()
except requests.exceptions.RequestException as e:
print(
"Invalid authentication: {}".format(e.message),
file=sys.stderr,
)
continue
else:
break
if token:
print("Successfully authenticated to Github", file=sys.stderr)
if not token:
print("Too many invalid authentication attempts.", file=sys.stderr)
return False
config_file = path("~/.config/edx-release").expand()
# make sure parent directory exists
config_file.parent.makedirs_p()
# read existing config if it exists
if config_file.isfile():
with open(config_file) as f:
config = json.load(f)
else:
config = {}
# update config
if 'credentials' not in config:
config["credentials"] = {}
if 'api.github.com' not in config['credentials']:
config["credentials"]["api.github.com"] = {}
config["credentials"]["api.github.com"]["username"] = username
config["credentials"]["api.github.com"]["token"] = token
# write it back out
with open(config_file, "w") as f:
json.dump(config, f)
return True
def default_release_date():
"""
Returns a date object corresponding to the expected date of the next release:
normally, this Tuesday.
"""
today = date.today()
TUESDAY = 2
days_until_tuesday = (TUESDAY - today.isoweekday()) % 7
return today + timedelta(days=days_until_tuesday)
def parse_ticket_references(text):
"""
Given a commit message, return a list of all JIRA ticket references in that
message. If there are no ticket references, return an empty list.
"""
return set(JIRA_RE.findall(text))
class DoesNotExist(Exception):
def __init__(self, message, commit, branch):
self.message = message
self.commit = commit
self.branch = branch
Exception.__init__(self, message)
def get_merge_commit(commit, branch="master"):
"""
Given a commit that was merged into the given branch, return the merge commit
for that event.
http://stackoverflow.com/questions/8475448/find-merge-commit-which-include-a-specific-commit
"""
commit_range = "{}..{}".format(commit, branch)
ancestry_paths = git.rev_list(commit_range, ancestry_path=True).splitlines()
first_parents = git.rev_list(commit_range, first_parent=True).splitlines()
both = set(ancestry_paths) & set(first_parents)
for commit_hash in reversed(ancestry_paths):
if commit_hash in both:
return repo.commit(commit_hash)
# no merge commit!
msg = "No merge commit for {commit} in {branch}!".format(
commit=commit, branch=branch,
)
raise DoesNotExist(msg, commit, branch)
def get_pr_info(num):
"""
Returns the info from the Github API
"""
url = "https://api.github.com/repos/edx/edx-platform/pulls/{num}".format(num=num)
username, token = get_github_creds()
headers = {
"Authorization": "token {}".format(token),
"User-Agent": "edx-release",
}
response = requests.get(url, headers=headers)
result = response.json()
if not response.ok:
raise requests.exceptions.RequestException(result["message"])
return result
def get_merged_prs(start_ref, end_ref):
"""
Return the set of all pull requests (as integers) that were merged between
the start_ref and end_ref.
"""
ensure_pr_fetch()
start_unmerged_branches = set(
branch.strip() for branch in
git.branch(all=True, no_merged=start_ref).splitlines()
)
end_merged_branches = set(
branch.strip() for branch in
git.branch(all=True, merged=end_ref).splitlines()
)
merged_between_refs = start_unmerged_branches & end_merged_branches
merged_prs = set()
for branch in merged_between_refs:
match = PR_BRANCH_RE.search(branch)
if match:
merged_prs.add(int(match.group(1)))
return merged_prs
@memoized
def prs_by_email(start_ref, end_ref):
"""
Returns an ordered dictionary of {email: pr_list}
Email is the email address of the person who merged the pull request
The dictionary is alphabetically ordered by email address
The pull request list is ordered by merge date
"""
username, token = get_github_creds()
headers = {
"Authorization": "token {}".format(token),
"User-Agent": "edx-release",
}
# `emails` maps from other_emails to primary email, based on people.yaml.
emails = {}
people_resp = requests.get(PEOPLE_YAML, headers=headers)
people_resp.raise_for_status()
people = yaml.safe_load(people_resp.text)
for person in people.itervalues():
if 'other_emails' in person:
for other_email in person['other_emails']:
emails[other_email] = person['email']
unordered_data = collections.defaultdict(set)
for pr_num in get_merged_prs(start_ref, end_ref):
ref = "refs/remotes/edx/pr/{num}".format(num=pr_num)
branch = SymbolicReference(repo, ref)
try:
merge = get_merge_commit(branch.commit, end_ref)
except DoesNotExist:
pass # this commit will be included in the commits_without_prs table
else:
email = emails.get(merge.author.email, merge.author.email)
unordered_data[email].add((pr_num, merge))
ordered_data = collections.OrderedDict()
for email in sorted(unordered_data.keys()):
ordered = sorted(unordered_data[email], key=lambda pair: pair[1].authored_date)
ordered_data[email] = [num for num, merge in ordered]
return ordered_data
def generate_pr_table(start_ref, end_ref):
"""
Return a UTF-8 string corresponding to a pull request table to embed in Confluence.
"""
header = "|| Merged By || Author || Title || PR || JIRA || Release Notes? || Verified? ||"
pr_link = "[#{num}|https://github.com/edx/edx-platform/pull/{num}]"
user_link = "[@{user}|https://github.com/{user}]"
rows = [header]
prbe = prs_by_email(start_ref, end_ref)
for email, pull_requests in prbe.items():
for i, pull_request in enumerate(pull_requests):
try:
pr_info = get_pr_info(pull_request)
title = pr_info["title"] or ""
body = pr_info["body"] or ""
author = pr_info["user"]["login"]
except requests.exceptions.RequestException as e:
message = (
"Warning: could not fetch data for #{num}: "
"{message}".format(num=pull_request, message=e.message)
)
print(colorize("red", message), file=sys.stderr)
title = "?"
body = "?"
author = ""
rows.append("| {merged_by} | {author} | {title} | {pull_request} | {jira} | {release_notes} | {verified} |".format(
merged_by=email if i == 0 else "",
author=user_link.format(user=author) if author else "",
title=title.replace("|", "\|").replace('{', '\{').replace('}', '\}'),
pull_request=pr_link.format(num=pull_request),
jira=", ".join(parse_ticket_references(body)),
release_notes="",
verified="",
))
return "\n".join(rows).encode("utf8")
@memoized
def get_commits_not_in_prs(start_ref, end_ref):
"""
Return a tuple of commits that exist between start_ref and end_ref,
but were not merged to the end_ref. If everyone is following the
pull request process correctly, this should return an empty tuple.
"""
return tuple(Commit.iter_items(
repo,
"{start}..{end}".format(start=start_ref, end=end_ref),
first_parent=True, no_merges=True,
))
def generate_commit_table(start_ref, end_ref):
"""
Return a string corresponding to a commit table to embed in Comfluence.
The commits in the table should only be commits that are not in the
pull request table.
"""
header = "|| Author || Summary || Commit || JIRA || Release Notes? || Verified? ||"
commit_link = "[commit|https://github.com/edx/edx-platform/commit/{sha}]"
rows = [header]
commits = get_commits_not_in_prs(start_ref, end_ref)
for commit in commits:
rows.append("| {author} | {summary} | {commit} | {jira} | {release_notes} | {verified} |".format(
author=commit.author.email,
summary=commit.summary.replace("|", "\|"),
commit=commit_link.format(sha=commit.hexsha),
jira=", ".join(parse_ticket_references(commit.message)),
release_notes="",
verified="",
))
return "\n".join(rows)
def generate_email(start_ref, end_ref, release_date=None):
"""
Returns a string roughly approximating an email.
"""
if release_date is None:
release_date = default_release_date()
prbe = prs_by_email(start_ref, end_ref)
email = """
To: {emails}
You merged at least one pull request for edx-platform that is going out
in this upcoming release, and you are responsible for verifying those
changes on the staging servers before the code is released. Please go
to the release page to do so:
https://openedx.atlassian.net/wiki/display/ENG/{date}+Release
The staging server is: https://stage.edx.org
Note that you are responsible for verifying any pull requests that you
merged, whether you wrote the code or not. (If you didn't write the code,
you can and should try to get the person who wrote the code to help
verify the changes -- but even if you can't, you're still responsible!)
If you find any bugs, please notify me and record the bugs on the
release page. Thanks!
By the way, if you have an @edx.org email address and are having trouble logging
into stage, you may need to reset your password.
If you would prefer this email be sent to a different email address of yours,
send a request to oscm@edx.org with the details.
""".format(
emails=", ".join(prbe.keys()),
date=release_date.isoformat(),
)
return textwrap.dedent(email).strip()
def main():
parser = make_parser()
args = parser.parse_args()
if isinstance(args.date, basestring):
# user passed in a custom date, so we need to parse it
args.date = parse_datestring(args.date).date()
ensure_github_creds()
if args.table:
print(generate_pr_table(args.previous, args.current))
return
print("Generating stage verification email and its list of recipients. This may take around a minute...")
print(generate_email(args.previous, args.current, release_date=args.date).encode('UTF-8'))
print("\n")
print("Wiki Table:")
print(
"Type Ctrl+Shift+D on Confluence to embed the following table "
"in your release wiki page"
)
print("\n")
print(generate_pr_table(args.previous, args.current))
commits_without_prs = get_commits_not_in_prs(args.previous, args.current)
if commits_without_prs:
num = len(commits_without_prs)
plural = num > 1
print("\n")
print(
"There {are} {num} {commits} in this release that did not come in "
"through pull requests!".format(
num=num, are="are" if plural else "is",
commits="commits" if plural else "commit"
)
)
print("\n")
print(generate_commit_table(args.previous, args.current))
if __name__ == "__main__":
main()
| agpl-3.0 |
CivicTechTO/open-cabinet | venv/lib/python2.7/site-packages/django/db/backends/utils.py | 430 | 6689 | from __future__ import unicode_literals
import datetime
import decimal
import hashlib
import logging
from time import time
from django.conf import settings
from django.utils.encoding import force_bytes
from django.utils.timezone import utc
logger = logging.getLogger('django.db.backends')
class CursorWrapper(object):
def __init__(self, cursor, db):
self.cursor = cursor
self.db = db
WRAP_ERROR_ATTRS = frozenset(['fetchone', 'fetchmany', 'fetchall', 'nextset'])
def __getattr__(self, attr):
cursor_attr = getattr(self.cursor, attr)
if attr in CursorWrapper.WRAP_ERROR_ATTRS:
return self.db.wrap_database_errors(cursor_attr)
else:
return cursor_attr
def __iter__(self):
with self.db.wrap_database_errors:
for item in self.cursor:
yield item
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
# Ticket #17671 - Close instead of passing thru to avoid backend
# specific behavior. Catch errors liberally because errors in cleanup
# code aren't useful.
try:
self.close()
except self.db.Database.Error:
pass
# The following methods cannot be implemented in __getattr__, because the
# code must run when the method is invoked, not just when it is accessed.
def callproc(self, procname, params=None):
self.db.validate_no_broken_transaction()
with self.db.wrap_database_errors:
if params is None:
return self.cursor.callproc(procname)
else:
return self.cursor.callproc(procname, params)
def execute(self, sql, params=None):
self.db.validate_no_broken_transaction()
with self.db.wrap_database_errors:
if params is None:
return self.cursor.execute(sql)
else:
return self.cursor.execute(sql, params)
def executemany(self, sql, param_list):
self.db.validate_no_broken_transaction()
with self.db.wrap_database_errors:
return self.cursor.executemany(sql, param_list)
class CursorDebugWrapper(CursorWrapper):
# XXX callproc isn't instrumented at this time.
def execute(self, sql, params=None):
start = time()
try:
return super(CursorDebugWrapper, self).execute(sql, params)
finally:
stop = time()
duration = stop - start
sql = self.db.ops.last_executed_query(self.cursor, sql, params)
self.db.queries_log.append({
'sql': sql,
'time': "%.3f" % duration,
})
logger.debug('(%.3f) %s; args=%s' % (duration, sql, params),
extra={'duration': duration, 'sql': sql, 'params': params}
)
def executemany(self, sql, param_list):
start = time()
try:
return super(CursorDebugWrapper, self).executemany(sql, param_list)
finally:
stop = time()
duration = stop - start
try:
times = len(param_list)
except TypeError: # param_list could be an iterator
times = '?'
self.db.queries_log.append({
'sql': '%s times: %s' % (times, sql),
'time': "%.3f" % duration,
})
logger.debug('(%.3f) %s; args=%s' % (duration, sql, param_list),
extra={'duration': duration, 'sql': sql, 'params': param_list}
)
###############################################
# Converters from database (string) to Python #
###############################################
def typecast_date(s):
return datetime.date(*map(int, s.split('-'))) if s else None # returns None if s is null
def typecast_time(s): # does NOT store time zone information
if not s:
return None
hour, minutes, seconds = s.split(':')
if '.' in seconds: # check whether seconds have a fractional part
seconds, microseconds = seconds.split('.')
else:
microseconds = '0'
return datetime.time(int(hour), int(minutes), int(seconds), int(float('.' + microseconds) * 1000000))
def typecast_timestamp(s): # does NOT store time zone information
# "2005-07-29 15:48:00.590358-05"
# "2005-07-29 09:56:00-05"
if not s:
return None
if ' ' not in s:
return typecast_date(s)
d, t = s.split()
# Extract timezone information, if it exists. Currently we just throw
# it away, but in the future we may make use of it.
if '-' in t:
t, tz = t.split('-', 1)
tz = '-' + tz
elif '+' in t:
t, tz = t.split('+', 1)
tz = '+' + tz
else:
tz = ''
dates = d.split('-')
times = t.split(':')
seconds = times[2]
if '.' in seconds: # check whether seconds have a fractional part
seconds, microseconds = seconds.split('.')
else:
microseconds = '0'
tzinfo = utc if settings.USE_TZ else None
return datetime.datetime(int(dates[0]), int(dates[1]), int(dates[2]),
int(times[0]), int(times[1]), int(seconds),
int((microseconds + '000000')[:6]), tzinfo)
def typecast_decimal(s):
if s is None or s == '':
return None
return decimal.Decimal(s)
###############################################
# Converters from Python to database (string) #
###############################################
def rev_typecast_decimal(d):
if d is None:
return None
return str(d)
def truncate_name(name, length=None, hash_len=4):
"""Shortens a string to a repeatable mangled version with the given length.
"""
if length is None or len(name) <= length:
return name
hsh = hashlib.md5(force_bytes(name)).hexdigest()[:hash_len]
return '%s%s' % (name[:length - hash_len], hsh)
def format_number(value, max_digits, decimal_places):
"""
Formats a number into a string with the requisite number of digits and
decimal places.
"""
if value is None:
return None
if isinstance(value, decimal.Decimal):
context = decimal.getcontext().copy()
if max_digits is not None:
context.prec = max_digits
if decimal_places is not None:
value = value.quantize(decimal.Decimal(".1") ** decimal_places, context=context)
else:
context.traps[decimal.Rounded] = 1
value = context.create_decimal(value)
return "{:f}".format(value)
if decimal_places is not None:
return "%.*f" % (decimal_places, value)
return "{:f}".format(value)
| mit |
ZenDevelopmentSystems/scikit-learn | benchmarks/bench_plot_incremental_pca.py | 374 | 6430 | """
========================
IncrementalPCA benchmark
========================
Benchmarks for IncrementalPCA
"""
import numpy as np
import gc
from time import time
from collections import defaultdict
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_lfw_people
from sklearn.decomposition import IncrementalPCA, RandomizedPCA, PCA
def plot_results(X, y, label):
plt.plot(X, y, label=label, marker='o')
def benchmark(estimator, data):
gc.collect()
print("Benching %s" % estimator)
t0 = time()
estimator.fit(data)
training_time = time() - t0
data_t = estimator.transform(data)
data_r = estimator.inverse_transform(data_t)
reconstruction_error = np.mean(np.abs(data - data_r))
return {'time': training_time, 'error': reconstruction_error}
def plot_feature_times(all_times, batch_size, all_components, data):
plt.figure()
plot_results(all_components, all_times['pca'], label="PCA")
plot_results(all_components, all_times['ipca'],
label="IncrementalPCA, bsize=%i" % batch_size)
plot_results(all_components, all_times['rpca'], label="RandomizedPCA")
plt.legend(loc="upper left")
plt.suptitle("Algorithm runtime vs. n_components\n \
LFW, size %i x %i" % data.shape)
plt.xlabel("Number of components (out of max %i)" % data.shape[1])
plt.ylabel("Time (seconds)")
def plot_feature_errors(all_errors, batch_size, all_components, data):
plt.figure()
plot_results(all_components, all_errors['pca'], label="PCA")
plot_results(all_components, all_errors['ipca'],
label="IncrementalPCA, bsize=%i" % batch_size)
plot_results(all_components, all_errors['rpca'], label="RandomizedPCA")
plt.legend(loc="lower left")
plt.suptitle("Algorithm error vs. n_components\n"
"LFW, size %i x %i" % data.shape)
plt.xlabel("Number of components (out of max %i)" % data.shape[1])
plt.ylabel("Mean absolute error")
def plot_batch_times(all_times, n_features, all_batch_sizes, data):
plt.figure()
plot_results(all_batch_sizes, all_times['pca'], label="PCA")
plot_results(all_batch_sizes, all_times['rpca'], label="RandomizedPCA")
plot_results(all_batch_sizes, all_times['ipca'], label="IncrementalPCA")
plt.legend(loc="lower left")
plt.suptitle("Algorithm runtime vs. batch_size for n_components %i\n \
LFW, size %i x %i" % (
n_features, data.shape[0], data.shape[1]))
plt.xlabel("Batch size")
plt.ylabel("Time (seconds)")
def plot_batch_errors(all_errors, n_features, all_batch_sizes, data):
plt.figure()
plot_results(all_batch_sizes, all_errors['pca'], label="PCA")
plot_results(all_batch_sizes, all_errors['ipca'], label="IncrementalPCA")
plt.legend(loc="lower left")
plt.suptitle("Algorithm error vs. batch_size for n_components %i\n \
LFW, size %i x %i" % (
n_features, data.shape[0], data.shape[1]))
plt.xlabel("Batch size")
plt.ylabel("Mean absolute error")
def fixed_batch_size_comparison(data):
all_features = [i.astype(int) for i in np.linspace(data.shape[1] // 10,
data.shape[1], num=5)]
batch_size = 1000
# Compare runtimes and error for fixed batch size
all_times = defaultdict(list)
all_errors = defaultdict(list)
for n_components in all_features:
pca = PCA(n_components=n_components)
rpca = RandomizedPCA(n_components=n_components, random_state=1999)
ipca = IncrementalPCA(n_components=n_components, batch_size=batch_size)
results_dict = {k: benchmark(est, data) for k, est in [('pca', pca),
('ipca', ipca),
('rpca', rpca)]}
for k in sorted(results_dict.keys()):
all_times[k].append(results_dict[k]['time'])
all_errors[k].append(results_dict[k]['error'])
plot_feature_times(all_times, batch_size, all_features, data)
plot_feature_errors(all_errors, batch_size, all_features, data)
def variable_batch_size_comparison(data):
batch_sizes = [i.astype(int) for i in np.linspace(data.shape[0] // 10,
data.shape[0], num=10)]
for n_components in [i.astype(int) for i in
np.linspace(data.shape[1] // 10,
data.shape[1], num=4)]:
all_times = defaultdict(list)
all_errors = defaultdict(list)
pca = PCA(n_components=n_components)
rpca = RandomizedPCA(n_components=n_components, random_state=1999)
results_dict = {k: benchmark(est, data) for k, est in [('pca', pca),
('rpca', rpca)]}
# Create flat baselines to compare the variation over batch size
all_times['pca'].extend([results_dict['pca']['time']] *
len(batch_sizes))
all_errors['pca'].extend([results_dict['pca']['error']] *
len(batch_sizes))
all_times['rpca'].extend([results_dict['rpca']['time']] *
len(batch_sizes))
all_errors['rpca'].extend([results_dict['rpca']['error']] *
len(batch_sizes))
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=n_components,
batch_size=batch_size)
results_dict = {k: benchmark(est, data) for k, est in [('ipca',
ipca)]}
all_times['ipca'].append(results_dict['ipca']['time'])
all_errors['ipca'].append(results_dict['ipca']['error'])
plot_batch_times(all_times, n_components, batch_sizes, data)
# RandomizedPCA error is always worse (approx 100x) than other PCA
# tests
plot_batch_errors(all_errors, n_components, batch_sizes, data)
faces = fetch_lfw_people(resize=.2, min_faces_per_person=5)
# limit dataset to 5000 people (don't care who they are!)
X = faces.data[:5000]
n_samples, h, w = faces.images.shape
n_features = X.shape[1]
X -= X.mean(axis=0)
X /= X.std(axis=0)
fixed_batch_size_comparison(X)
variable_batch_size_comparison(X)
plt.show()
| bsd-3-clause |
switchkiller/ProjDjanko | lib/python2.7/site-packages/django/conf/locale/en/formats.py | 394 | 1815 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'N j, Y'
TIME_FORMAT = 'P'
DATETIME_FORMAT = 'N j, Y, P'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'F j'
SHORT_DATE_FORMAT = 'm/d/Y'
SHORT_DATETIME_FORMAT = 'm/d/Y P'
FIRST_DAY_OF_WEEK = 0 # Sunday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# Kept ISO formats as they are in first position
DATE_INPUT_FORMATS = (
'%Y-%m-%d', '%m/%d/%Y', '%m/%d/%y', # '2006-10-25', '10/25/2006', '10/25/06'
# '%b %d %Y', '%b %d, %Y', # 'Oct 25 2006', 'Oct 25, 2006'
# '%d %b %Y', '%d %b, %Y', # '25 Oct 2006', '25 Oct, 2006'
# '%B %d %Y', '%B %d, %Y', # 'October 25 2006', 'October 25, 2006'
# '%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006'
)
DATETIME_INPUT_FORMATS = (
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%m/%d/%Y %H:%M:%S', # '10/25/2006 14:30:59'
'%m/%d/%Y %H:%M:%S.%f', # '10/25/2006 14:30:59.000200'
'%m/%d/%Y %H:%M', # '10/25/2006 14:30'
'%m/%d/%Y', # '10/25/2006'
'%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59'
'%m/%d/%y %H:%M:%S.%f', # '10/25/06 14:30:59.000200'
'%m/%d/%y %H:%M', # '10/25/06 14:30'
'%m/%d/%y', # '10/25/06'
)
DECIMAL_SEPARATOR = '.'
THOUSAND_SEPARATOR = ','
NUMBER_GROUPING = 3
| gpl-2.0 |
richardliaw/ray | python/ray/tune/suggest/bayesopt.py | 1 | 16596 | from collections import defaultdict
import logging
import pickle
import json
from typing import Dict, Optional, Tuple
from ray.tune import ExperimentAnalysis
from ray.tune.result import DEFAULT_METRIC
from ray.tune.sample import Domain, Float, Quantized
from ray.tune.suggest.suggestion import UNRESOLVED_SEARCH_SPACE, \
UNDEFINED_METRIC_MODE, UNDEFINED_SEARCH_SPACE
from ray.tune.suggest.variant_generator import parse_spec_vars
from ray.tune.utils.util import is_nan_or_inf, unflatten_dict
try: # Python 3 only -- needed for lint test.
import bayes_opt as byo
except ImportError:
byo = None
from ray.tune.suggest import Searcher
from ray.tune.utils import flatten_dict
logger = logging.getLogger(__name__)
def _dict_hash(config, precision):
flatconfig = flatten_dict(config)
for param, value in flatconfig.items():
if isinstance(value, float):
flatconfig[param] = "{:.{digits}f}".format(value, digits=precision)
hashed = json.dumps(flatconfig, sort_keys=True, default=str)
return hashed
class BayesOptSearch(Searcher):
"""Uses fmfn/BayesianOptimization to optimize hyperparameters.
fmfn/BayesianOptimization is a library for Bayesian Optimization. More
info can be found here: https://github.com/fmfn/BayesianOptimization.
This searcher will automatically filter out any NaN, inf or -inf
results.
You will need to install fmfn/BayesianOptimization via the following:
.. code-block:: bash
pip install bayesian-optimization
This algorithm requires setting a search space using the
`BayesianOptimization search space specification`_.
Args:
space (dict): Continuous search space. Parameters will be sampled from
this space which will be used to run trials.
metric (str): The training result objective value attribute. If None
but a mode was passed, the anonymous metric `_metric` will be used
per default.
mode (str): One of {min, max}. Determines whether objective is
minimizing or maximizing the metric attribute.
utility_kwargs (dict): Parameters to define the utility function.
The default value is a dictionary with three keys:
- kind: ucb (Upper Confidence Bound)
- kappa: 2.576
- xi: 0.0
random_state (int): Used to initialize BayesOpt.
random_search_steps (int): Number of initial random searches.
This is necessary to avoid initial local overfitting
of the Bayesian process.
analysis (ExperimentAnalysis): Optionally, the previous analysis
to integrate.
verbose (int): Sets verbosity level for BayesOpt packages.
max_concurrent: Deprecated.
use_early_stopped_trials: Deprecated.
Tune automatically converts search spaces to BayesOptSearch's format:
.. code-block:: python
from ray import tune
from ray.tune.suggest.bayesopt import BayesOptSearch
config = {
"width": tune.uniform(0, 20),
"height": tune.uniform(-100, 100)
}
bayesopt = BayesOptSearch(metric="mean_loss", mode="min")
tune.run(my_func, config=config, search_alg=bayesopt)
If you would like to pass the search space manually, the code would
look like this:
.. code-block:: python
from ray import tune
from ray.tune.suggest.bayesopt import BayesOptSearch
space = {
'width': (0, 20),
'height': (-100, 100),
}
bayesopt = BayesOptSearch(space, metric="mean_loss", mode="min")
tune.run(my_func, search_alg=bayesopt)
"""
# bayes_opt.BayesianOptimization: Optimization object
optimizer = None
def __init__(self,
space: Optional[Dict] = None,
metric: Optional[str] = None,
mode: Optional[str] = None,
utility_kwargs: Optional[Dict] = None,
random_state: int = 42,
random_search_steps: int = 10,
verbose: int = 0,
patience: int = 5,
skip_duplicate: bool = True,
analysis: Optional[ExperimentAnalysis] = None,
max_concurrent: Optional[int] = None,
use_early_stopped_trials: Optional[bool] = None):
"""Instantiate new BayesOptSearch object.
Args:
space (dict): Continuous search space.
Parameters will be sampled from
this space which will be used to run trials.
metric (str): The training result objective value attribute.
mode (str): One of {min, max}. Determines whether objective is
minimizing or maximizing the metric attribute.
utility_kwargs (dict): Parameters to define the utility function.
Must provide values for the keys `kind`, `kappa`, and `xi`.
random_state (int): Used to initialize BayesOpt.
random_search_steps (int): Number of initial random searches.
This is necessary to avoid initial local overfitting
of the Bayesian process.
patience (int): Must be > 0. If the optimizer suggests a set of
hyperparameters more than 'patience' times,
then the whole experiment will stop.
skip_duplicate (bool): If true, BayesOptSearch will not create
a trial with a previously seen set of hyperparameters. By
default, floating values will be reduced to a digit precision
of 5. You can override this by setting
``searcher.repeat_float_precision``.
analysis (ExperimentAnalysis): Optionally, the previous analysis
to integrate.
verbose (int): Sets verbosity level for BayesOpt packages.
max_concurrent: Deprecated.
use_early_stopped_trials: Deprecated.
"""
assert byo is not None, (
"BayesOpt must be installed!. You can install BayesOpt with"
" the command: `pip install bayesian-optimization`.")
if mode:
assert mode in ["min", "max"], "`mode` must be 'min' or 'max'."
self.max_concurrent = max_concurrent
self._config_counter = defaultdict(int)
self._patience = patience
# int: Precision at which to hash values.
self.repeat_float_precision = 5
if self._patience <= 0:
raise ValueError("patience must be set to a value greater than 0!")
self._skip_duplicate = skip_duplicate
super(BayesOptSearch, self).__init__(
metric=metric,
mode=mode,
max_concurrent=max_concurrent,
use_early_stopped_trials=use_early_stopped_trials)
if utility_kwargs is None:
# The defaults arguments are the same
# as in the package BayesianOptimization
utility_kwargs = dict(
kind="ucb",
kappa=2.576,
xi=0.0,
)
if mode == "max":
self._metric_op = 1.
elif mode == "min":
self._metric_op = -1.
self._live_trial_mapping = {}
self._buffered_trial_results = []
self.random_search_trials = random_search_steps
self._total_random_search_trials = 0
self.utility = byo.UtilityFunction(**utility_kwargs)
# Registering the provided analysis, if given
if analysis is not None:
self.register_analysis(analysis)
if isinstance(space, dict) and space:
resolved_vars, domain_vars, grid_vars = parse_spec_vars(space)
if domain_vars or grid_vars:
logger.warning(
UNRESOLVED_SEARCH_SPACE.format(
par="space", cls=type(self)))
space = self.convert_search_space(space, join=True)
self._space = space
self._verbose = verbose
self._random_state = random_state
self.optimizer = None
if space:
self._setup_optimizer()
def _setup_optimizer(self):
if self._metric is None and self._mode:
# If only a mode was passed, use anonymous metric
self._metric = DEFAULT_METRIC
self.optimizer = byo.BayesianOptimization(
f=None,
pbounds=self._space,
verbose=self._verbose,
random_state=self._random_state)
def set_search_properties(self, metric: Optional[str], mode: Optional[str],
config: Dict) -> bool:
if self.optimizer:
return False
space = self.convert_search_space(config)
self._space = space
if metric:
self._metric = metric
if mode:
self._mode = mode
if self._mode == "max":
self._metric_op = 1.
elif self._mode == "min":
self._metric_op = -1.
self._setup_optimizer()
return True
def suggest(self, trial_id: str) -> Optional[Dict]:
"""Return new point to be explored by black box function.
Args:
trial_id (str): Id of the trial.
This is a short alphanumerical string.
Returns:
Either a dictionary describing the new point to explore or
None, when no new point is to be explored for the time being.
"""
if not self.optimizer:
raise RuntimeError(
UNDEFINED_SEARCH_SPACE.format(
cls=self.__class__.__name__, space="space"))
if not self._metric or not self._mode:
raise RuntimeError(
UNDEFINED_METRIC_MODE.format(
cls=self.__class__.__name__,
metric=self._metric,
mode=self._mode))
# If we have more active trials than the allowed maximum
total_live_trials = len(self._live_trial_mapping)
if self.max_concurrent and self.max_concurrent <= total_live_trials:
# we stop the suggestion and return None.
return None
# We compute the new point to explore
config = self.optimizer.suggest(self.utility)
config_hash = _dict_hash(config, self.repeat_float_precision)
# Check if already computed
already_seen = config_hash in self._config_counter
self._config_counter[config_hash] += 1
top_repeats = max(self._config_counter.values())
# If patience is set and we've repeated a trial numerous times,
# we terminate the experiment.
if self._patience is not None and top_repeats > self._patience:
return Searcher.FINISHED
# If we have seen a value before, we'll skip it.
if already_seen and self._skip_duplicate:
logger.info("Skipping duplicated config: {}.".format(config))
return None
# If we are still in the random search part and we are waiting for
# trials to complete
if len(self._buffered_trial_results) < self.random_search_trials:
# We check if we have already maxed out the number of requested
# random search trials
if self._total_random_search_trials == self.random_search_trials:
# If so we stop the suggestion and return None
return None
# Otherwise we increase the total number of rndom search trials
if config:
self._total_random_search_trials += 1
# Save the new trial to the trial mapping
self._live_trial_mapping[trial_id] = config
# Return a deep copy of the mapping
return unflatten_dict(config)
def register_analysis(self, analysis: ExperimentAnalysis):
"""Integrate the given analysis into the gaussian process.
Args:
analysis (ExperimentAnalysis): Optionally, the previous analysis
to integrate.
"""
for (_, report), params in zip(
analysis.dataframe(metric=self._metric,
mode=self._mode).iterrows(),
analysis.get_all_configs().values()):
# We add the obtained results to the
# gaussian process optimizer
self._register_result(params, report)
def on_trial_complete(self,
trial_id: str,
result: Optional[Dict] = None,
error: bool = False):
"""Notification for the completion of trial.
Args:
trial_id (str): Id of the trial.
This is a short alphanumerical string.
result (dict): Dictionary of result.
May be none when some error occurs.
error (bool): Boolean representing a previous error state.
The result should be None when error is True.
"""
# We try to get the parameters used for this trial
params = self._live_trial_mapping.pop(trial_id, None)
# The results may be None if some exception is raised during the trial.
# Also, if the parameters are None (were already processed)
# we interrupt the following procedure.
# Additionally, if somehow the error is True but
# the remaining values are not we also block the method
if result is None or params is None or error:
return
# If we don't have to execute some random search steps
if len(self._buffered_trial_results) >= self.random_search_trials:
# we simply register the obtained result
self._register_result(params, result)
return
# We store the results into a temporary cache
self._buffered_trial_results.append((params, result))
# If the random search finished,
# we update the BO with all the computer points.
if len(self._buffered_trial_results) == self.random_search_trials:
for params, result in self._buffered_trial_results:
self._register_result(params, result)
def _register_result(self, params: Tuple[str], result: Dict):
"""Register given tuple of params and results."""
if is_nan_or_inf(result[self.metric]):
return
self.optimizer.register(params, self._metric_op * result[self.metric])
def save(self, checkpoint_path: str):
"""Storing current optimizer state."""
with open(checkpoint_path, "wb") as f:
pickle.dump(
(self.optimizer, self._buffered_trial_results,
self._total_random_search_trials, self._config_counter), f)
def restore(self, checkpoint_path: str):
"""Restoring current optimizer state."""
with open(checkpoint_path, "rb") as f:
(self.optimizer, self._buffered_trial_results,
self._total_random_search_trials,
self._config_counter) = pickle.load(f)
@staticmethod
def convert_search_space(spec: Dict, join: bool = False) -> Dict:
spec = flatten_dict(spec, prevent_delimiter=True)
resolved_vars, domain_vars, grid_vars = parse_spec_vars(spec)
if grid_vars:
raise ValueError(
"Grid search parameters cannot be automatically converted "
"to a BayesOpt search space.")
def resolve_value(domain: Domain) -> Tuple[float, float]:
sampler = domain.get_sampler()
if isinstance(sampler, Quantized):
logger.warning(
"BayesOpt search does not support quantization. "
"Dropped quantization.")
sampler = sampler.get_sampler()
if isinstance(domain, Float):
if domain.sampler is not None:
logger.warning(
"BayesOpt does not support specific sampling methods. "
"The {} sampler will be dropped.".format(sampler))
return (domain.lower, domain.upper)
raise ValueError("BayesOpt does not support parameters of type "
"`{}`".format(type(domain).__name__))
# Parameter name is e.g. "a/b/c" for nested dicts
bounds = {
"/".join(path): resolve_value(domain)
for path, domain in domain_vars
}
if join:
spec.update(bounds)
bounds = spec
return bounds
| apache-2.0 |
jakobzhao/wbcrawler3 | wbcrawler/parallel.py | 1 | 8020 | # !/usr/bin/python
# -*- coding: utf-8 -*-
#
# Created on Oct 16, 2015
# @author: Bo Zhao
# @email: bo_zhao@hks.harvard.edu
# @website: http://yenching.org
# @organization: Harvard Kennedy School
import sys
from multiprocessing.dummy import Pool as ThreadPool
# from multiprocessing.dummy import Lock
import socket
from pymongo import MongoClient, DESCENDING, ASCENDING
from selenium.common.exceptions import StaleElementReferenceException, TimeoutException, WebDriverException
from wbcrawler.robot import register, unregister
from wbcrawler.parser import parse_repost, parse_path, parse_info
from wbcrawler.log import *
from wbcrawler.settings import UTC
from httplib import BadStatusLine
from urllib2 import URLError
reload(sys)
sys.setdefaultencoding('utf-8')
start = datetime.datetime.now()
# lock = Lock()
# calculate the sum of robots in each category
def create_robots(rr, pr, ir, settings):
num_of_robots = rr + pr + ir
robots = []
for robot_id in range(0, num_of_robots):
robot = {}
for i in range(robot_id, num_of_robots):
if robot == {}:
# with lock:
robot = register(settings)
else:
break
if robot != {}:
if robot_id < rr: # repost
robot['type'] = 'repost'
robots.append(robot)
elif robot_id in range(rr, rr + pr): # path
robot['type'] = 'path'
robots.append(robot)
elif robot_id >= rr + pr: # info
robot['type'] = 'info'
robots.append(robot)
nrr, npr, nir = 0, 0, 0
for robot in robots:
if robot['type'] == 'repost':
robot['id'] = nrr
nrr += 1
elif robot['type'] == 'path':
robot['id'] = npr
npr += 1
elif robot['type'] == 'info':
robot['id'] = nir
nir += 1
for robot in robots:
if robot['type'] == 'repost':
robot['count'] = nrr
elif robot['type'] == 'path':
robot['count'] = npr
elif robot['type'] == 'info':
robot['count'] = nir
return robots
def crawling_job(robot):
if robot['type'] == 'repost':
repost_crawling(robot)
elif robot['type'] == 'path':
path_crawling(robot)
elif robot['type'] == 'info':
info_crawling(robot)
else:
pass
def repost_crawling(rbt):
utc_now = datetime.datetime.utcnow() - datetime.timedelta(days=rbt['settings']['replies_control_days'])
rr = rbt['count']
client = MongoClient(rbt['settings']['address'], rbt['settings']['port'])
db = client[rbt['settings']['project']]
try:
round_start = datetime.datetime.now()
utc_end = datetime.datetime(2015, 10, 29, 0, 0, 0, 0, tzinfo=UTC)
# search_json = {"timestamp": {"$gt": utc_now}, "timestamp": {"$lt": utc_end}, 'keyword': {'$ne': '五中全会'}, "fwd_count": {"$gt": rbt['settings']['min_fwd_times']}, "deleted": {"$eq": None}}
# search_json = {"timestamp": {"$gt": utc_now}, {"$and": ["fwd_count": {"$gt": rbt['settings']['min_fwd_times']}, "fwd_count": {"$gt": 200000}]}, "deleted": {"$eq": None}}
search_json = {"timestamp": {"$gt": utc_end}, '$and': [{"fwd_count": {"$gt": 99}}, {"fwd_count": {"$lt": 100000000}}], "replies": {"$eq": []}, "deleted": {"$eq": None}}
# search_json = {"keyword": u'新型城镇化', "fwd_count": {"$gt": rbt['settings']['min_fwd_times']}}
count = db.posts.find(search_json).count()
slc = count / rr
posts = db.posts.find(search_json).skip(slc * rbt['id']).limit(slc) # .sort({'fwd_count': -1}). .sort([('mid', DESCENDING), ('fwd_count', ASCENDING)])
parse_repost(posts, rbt, db)
log(NOTICE, "Time per round: %d mins." % int((datetime.datetime.now() - round_start).seconds / 60))
except KeyboardInterrupt:
log(ERROR, "prorgam is interrupted.", "repost_crawling")
finally:
unregister(rbt)
log(NOTICE, "Time: %d mins." % int((datetime.datetime.now() - start).seconds / 60))
def path_crawling(rbt):
# utc_now = datetime.datetime.utcnow() - datetime.timedelta(days=rbt['settings']['replies_control_days'])
pr = rbt['count']
client = MongoClient(rbt['settings']['address'], rbt['settings']['port'])
db = client[rbt['settings']['project']]
try:
round_start = datetime.datetime.now()
# search_json = {'latlng': [0, 0]}
# search_json = {}
search_json = {'path': []}
count = db.users.find(search_json).count()
slc = count / pr
users = db.users.find(search_json).skip(slc * rbt['id']).limit(slc)
parse_path(users, rbt, db)
log(NOTICE, "Time: %d mins." % int((datetime.datetime.now() - round_start).seconds / 60))
except KeyboardInterrupt:
log(ERROR, "Program is interrupted.", 'path_crawling')
finally:
unregister(rbt)
log(NOTICE, "Time: %d mins." % int((datetime.datetime.now() - start).seconds / 60))
def info_crawling(rbt):
ir = rbt['count']
client = MongoClient(rbt['settings']['address'], rbt['settings']['port'])
db = client[rbt['settings']['project']]
try:
round_start = datetime.datetime.now()
search_json = {'$or': [{'latlng': [0, 0]}, {'latlng': [-1, -1]}]}
# search_json = {'latlng': [0, 0]}
# search_json = {'msg': {'$exists': False}}
count = db.users.find(search_json).count()
slc = count / ir
users = db.users.find(search_json).skip(slc * rbt['id']).limit(slc)
parse_info(users, rbt, db)
log(NOTICE, "Time: %d mins." % int((datetime.datetime.now() - round_start).seconds / 60))
except KeyboardInterrupt:
log(ERROR, "Program is interrupted.", 'info_crawling')
finally:
unregister(rbt)
log(NOTICE, "Time: %d min(s)." % int((datetime.datetime.now() - start).seconds / 60))
def parallel_crawling(rr, pr, ir, settings):
# Make the Pool of workers
pool = ThreadPool(rr + pr + ir)
# Open the urls in their own threads
# and return the results
try:
robots = create_robots(rr, pr, ir, settings)
pool.map(crawling_job, robots)
except NameError, e:
log(FATALITY, 'NameError: ' + e.message, 'parallel_crawlling')
except OSError, e:
log(FATALITY, 'OSError: ' + e.message, 'parallel_crawlling')
except TypeError, e:
log(FATALITY, e.message, 'parallel_crawlling') # AttributeError: 'str' object has no attribute 'device_iden'
except StaleElementReferenceException:
log(FATALITY, "StateElementReferenceException: Too many robots", 'parallel_crawlling')
except TimeoutException:
log(FATALITY, "TimeoutException: Too many robots", 'parallel_crawlling')
except socket.error:
log(FATALITY, "SocketError: The browser is forced to close", 'parallel_crawlling')
except URLError, e:
log(FATALITY, "urllib2.URLError", 'parallel_crawlling')
except ValueError, e:
log(FATALITY, "ValueError: could not convert string to float", 'parallel_crawlling')
except IndexError, e:
log(FATALITY, "IndexError: List index out of range", 'parallel_crawlling')
# except WindowsError, e:
# # [Error 32] The process cannot access the file because it is being used by another process:
# # 'c:\\users\\bo\\appdata\\local\\temp\\tmphrg3yv.webdriver.xpi\\resource\\modules\\web-element-cache.js'
# print e.message, str(e)
# log(FATALITY, "WindowsError: The browser is forced to close", 'parallel_crawlling')
except WebDriverException:
log(FATALITY, "WebDriverError: The browser is forced to close.", 'parallel_crawlling')
# close the pool and wait for the work to finish
except BadStatusLine, e:
log(FATALITY, "BadStatusline: The browser is forced to close." + e.message, 'parallel_crawlling')
pool.close()
pool.join()
return
| mit |
bregman-arie/ansible | lib/ansible/modules/network/cumulus/_cl_ports.py | 33 | 2580 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Cumulus Networks <ce-ceng@cumulusnetworks.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['deprecated'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cl_ports
version_added: "2.1"
author: "Cumulus Networks (@CumulusNetworks)"
short_description: Configure Cumulus Switch port attributes (ports.conf)
deprecated:
removed_in: "2.5"
why: The M(nclu) module is designed to be easier to use for individuals who are new to Cumulus Linux by exposing the NCLU interface in an automatable way.
alternative: Use M(nclu) instead.
description:
- Set the initial port attribute defined in the Cumulus Linux ports.conf,
file. This module does not do any error checking at the moment. Be careful
to not include ports that do not exist on the switch. Carefully read the
original ports.conf file for any exceptions or limitations.
For more details go the Configure Switch Port Attribute Documentation at
U(http://docs.cumulusnetworks.com).
options:
speed_10g:
description:
- List of ports to run initial run at 10G.
speed_40g:
description:
- List of ports to run initial run at 40G.
speed_4_by_10g:
description:
- List of 40G ports that will be unganged to run as 4 10G ports.
speed_40g_div_4:
description:
- List of 10G ports that will be ganged to form a 40G port.
'''
EXAMPLES = '''
# Use cl_ports module to manage the switch attributes defined in the
# ports.conf file on Cumulus Linux
## Unganged port configuration on certain ports
- name: configure ports.conf setup
cl_ports:
speed_4_by_10g:
- swp1
- swp32
speed_40g:
- swp2-31
## Unganged port configuration on certain ports
- name: configure ports.conf setup
cl_ports:
speed_4_by_10g:
- swp1-3
- swp6
speed_40g:
- swp4-5
- swp7-32
'''
RETURN = '''
changed:
description: whether the interface was changed
returned: changed
type: bool
sample: True
msg:
description: human-readable report of success or failure
returned: always
type: string
sample: "interface bond0 config updated"
'''
from ansible.module_utils.common.removed import removed_module
if __name__ == '__main__':
removed_module()
| gpl-3.0 |
google-code-export/pyglet | experimental/modeswitch/win32device.py | 28 | 4045 | #!/usr/bin/python
# $Id:$
import ctypes
import pyglet
from pyglet.window.win32 import _user32
from pyglet.window.win32.constants import *
from pyglet.window.win32.types import *
WCHAR = ctypes.c_wchar
BCHAR = ctypes.c_wchar
class MONITORINFOEX(ctypes.Structure):
_fields_ = (
('cbSize', DWORD),
('rcMonitor', RECT),
('rcWork', RECT),
('dwFlags', DWORD),
('szDevice', WCHAR * CCHDEVICENAME)
)
class DEVMODE(ctypes.Structure):
_fields_ = (
('dmDeviceName', BCHAR * CCHDEVICENAME),
('dmSpecVersion', WORD),
('dmDriverVersion', WORD),
('dmSize', WORD),
('dmDriverExtra', WORD),
('dmFields', DWORD),
# Just using largest union member here
('dmOrientation', ctypes.c_short),
('dmPaperSize', ctypes.c_short),
('dmPaperLength', ctypes.c_short),
('dmPaperWidth', ctypes.c_short),
('dmScale', ctypes.c_short),
('dmCopies', ctypes.c_short),
('dmDefaultSource', ctypes.c_short),
('dmPrintQuality', ctypes.c_short),
# End union
('dmColor', ctypes.c_short),
('dmDuplex', ctypes.c_short),
('dmYResolution', ctypes.c_short),
('dmTTOption', ctypes.c_short),
('dmCollate', ctypes.c_short),
('dmFormName', BCHAR * CCHFORMNAME),
('dmLogPixels', WORD),
('dmBitsPerPel', DWORD),
('dmPelsWidth', DWORD),
('dmPelsHeight', DWORD),
('dmDisplayFlags', DWORD), # union with dmNup
('dmDisplayFrequency', DWORD),
('dmICMMethod', DWORD),
('dmICMIntent', DWORD),
('dmDitherType', DWORD),
('dmReserved1', DWORD),
('dmReserved2', DWORD),
('dmPanningWidth', DWORD),
('dmPanningHeight', DWORD),
)
class Win32Mode(object):
def __init__(self, devmode):
self._devmode = devmode
self.width = devmode.dmPelsWidth
self.height = devmode.dmPelsHeight
self.rate = devmode.dmDisplayFrequency
# Find available modes for each screen
screens = pyglet.window.get_platform().get_default_display().get_screens()
for screen in screens:
handle = screen._handle
info = MONITORINFOEX()
info.cbSize = ctypes.sizeof(MONITORINFOEX)
_user32.GetMonitorInfoW(handle, ctypes.byref(info))
screen.device_name = info.szDevice
screen.modes = []
i = 0
while True:
mode = DEVMODE()
mode.dmSize = ctypes.sizeof(DEVMODE)
r = _user32.EnumDisplaySettingsW(screen.device_name, i,
ctypes.byref(mode))
if not r:
break
screen.modes.append(Win32Mode(mode))
i += 1
def set_mode(screen, width, height, rate=None):
# Find best matching mode. Should factor out common with X11 mode select
best_mode = None
for mode in screen.modes:
if width > mode.width or height > mode.height:
continue
if not best_mode:
best_mode = mode
continue
if mode.width == best_mode.width:
if mode.height < best_mode.height:
if (rate is not None and
abs(rate - mode.rate) <
abs(rate - best_mode.rate)):
best_mode = mode
elif mode.height < best_mode.height:
best_mode = mode
elif mode.width < best_mode.width:
best_mode = mode
if best_mode is None:
raise Exception('No mode is in range of requested resolution.')
_user32.ChangeDisplaySettingsExW(screen.device_name,
ctypes.byref(best_mode._devmode),
None,
CDS_FULLSCREEN,
None)
window = pyglet.window.Window()
set_mode(screens[1], 800, 600)
pyglet.app.run()
| bsd-3-clause |
Kazade/NeHe-Website | google_appengine/lib/cherrypy/cherrypy/test/test_conn.py | 53 | 26382 | """Tests for TCP connection handling, including proper and timely close."""
import socket
import sys
import time
timeout = 1
import cherrypy
from cherrypy._cpcompat import HTTPConnection, HTTPSConnection, NotConnected, BadStatusLine
from cherrypy._cpcompat import ntob, urlopen, unicodestr
from cherrypy.test import webtest
from cherrypy import _cperror
pov = 'pPeErRsSiIsStTeEnNcCeE oOfF vViIsSiIoOnN'
def setup_server():
def raise500():
raise cherrypy.HTTPError(500)
class Root:
def index(self):
return pov
index.exposed = True
page1 = index
page2 = index
page3 = index
def hello(self):
return "Hello, world!"
hello.exposed = True
def timeout(self, t):
return str(cherrypy.server.httpserver.timeout)
timeout.exposed = True
def stream(self, set_cl=False):
if set_cl:
cherrypy.response.headers['Content-Length'] = 10
def content():
for x in range(10):
yield str(x)
return content()
stream.exposed = True
stream._cp_config = {'response.stream': True}
def error(self, code=500):
raise cherrypy.HTTPError(code)
error.exposed = True
def upload(self):
if not cherrypy.request.method == 'POST':
raise AssertionError("'POST' != request.method %r" %
cherrypy.request.method)
return "thanks for '%s'" % cherrypy.request.body.read()
upload.exposed = True
def custom(self, response_code):
cherrypy.response.status = response_code
return "Code = %s" % response_code
custom.exposed = True
def err_before_read(self):
return "ok"
err_before_read.exposed = True
err_before_read._cp_config = {'hooks.on_start_resource': raise500}
def one_megabyte_of_a(self):
return ["a" * 1024] * 1024
one_megabyte_of_a.exposed = True
def custom_cl(self, body, cl):
cherrypy.response.headers['Content-Length'] = cl
if not isinstance(body, list):
body = [body]
newbody = []
for chunk in body:
if isinstance(chunk, unicodestr):
chunk = chunk.encode('ISO-8859-1')
newbody.append(chunk)
return newbody
custom_cl.exposed = True
# Turn off the encoding tool so it doens't collapse
# our response body and reclaculate the Content-Length.
custom_cl._cp_config = {'tools.encode.on': False}
cherrypy.tree.mount(Root())
cherrypy.config.update({
'server.max_request_body_size': 1001,
'server.socket_timeout': timeout,
})
from cherrypy.test import helper
class ConnectionCloseTests(helper.CPWebCase):
setup_server = staticmethod(setup_server)
def test_HTTP11(self):
if cherrypy.server.protocol_version != "HTTP/1.1":
return self.skip()
self.PROTOCOL = "HTTP/1.1"
self.persistent = True
# Make the first request and assert there's no "Connection: close".
self.getPage("/")
self.assertStatus('200 OK')
self.assertBody(pov)
self.assertNoHeader("Connection")
# Make another request on the same connection.
self.getPage("/page1")
self.assertStatus('200 OK')
self.assertBody(pov)
self.assertNoHeader("Connection")
# Test client-side close.
self.getPage("/page2", headers=[("Connection", "close")])
self.assertStatus('200 OK')
self.assertBody(pov)
self.assertHeader("Connection", "close")
# Make another request on the same connection, which should error.
self.assertRaises(NotConnected, self.getPage, "/")
def test_Streaming_no_len(self):
self._streaming(set_cl=False)
def test_Streaming_with_len(self):
self._streaming(set_cl=True)
def _streaming(self, set_cl):
if cherrypy.server.protocol_version == "HTTP/1.1":
self.PROTOCOL = "HTTP/1.1"
self.persistent = True
# Make the first request and assert there's no "Connection: close".
self.getPage("/")
self.assertStatus('200 OK')
self.assertBody(pov)
self.assertNoHeader("Connection")
# Make another, streamed request on the same connection.
if set_cl:
# When a Content-Length is provided, the content should stream
# without closing the connection.
self.getPage("/stream?set_cl=Yes")
self.assertHeader("Content-Length")
self.assertNoHeader("Connection", "close")
self.assertNoHeader("Transfer-Encoding")
self.assertStatus('200 OK')
self.assertBody('0123456789')
else:
# When no Content-Length response header is provided,
# streamed output will either close the connection, or use
# chunked encoding, to determine transfer-length.
self.getPage("/stream")
self.assertNoHeader("Content-Length")
self.assertStatus('200 OK')
self.assertBody('0123456789')
chunked_response = False
for k, v in self.headers:
if k.lower() == "transfer-encoding":
if str(v) == "chunked":
chunked_response = True
if chunked_response:
self.assertNoHeader("Connection", "close")
else:
self.assertHeader("Connection", "close")
# Make another request on the same connection, which should error.
self.assertRaises(NotConnected, self.getPage, "/")
# Try HEAD. See http://www.cherrypy.org/ticket/864.
self.getPage("/stream", method='HEAD')
self.assertStatus('200 OK')
self.assertBody('')
self.assertNoHeader("Transfer-Encoding")
else:
self.PROTOCOL = "HTTP/1.0"
self.persistent = True
# Make the first request and assert Keep-Alive.
self.getPage("/", headers=[("Connection", "Keep-Alive")])
self.assertStatus('200 OK')
self.assertBody(pov)
self.assertHeader("Connection", "Keep-Alive")
# Make another, streamed request on the same connection.
if set_cl:
# When a Content-Length is provided, the content should
# stream without closing the connection.
self.getPage("/stream?set_cl=Yes",
headers=[("Connection", "Keep-Alive")])
self.assertHeader("Content-Length")
self.assertHeader("Connection", "Keep-Alive")
self.assertNoHeader("Transfer-Encoding")
self.assertStatus('200 OK')
self.assertBody('0123456789')
else:
# When a Content-Length is not provided,
# the server should close the connection.
self.getPage("/stream", headers=[("Connection", "Keep-Alive")])
self.assertStatus('200 OK')
self.assertBody('0123456789')
self.assertNoHeader("Content-Length")
self.assertNoHeader("Connection", "Keep-Alive")
self.assertNoHeader("Transfer-Encoding")
# Make another request on the same connection, which should error.
self.assertRaises(NotConnected, self.getPage, "/")
def test_HTTP10_KeepAlive(self):
self.PROTOCOL = "HTTP/1.0"
if self.scheme == "https":
self.HTTP_CONN = HTTPSConnection
else:
self.HTTP_CONN = HTTPConnection
# Test a normal HTTP/1.0 request.
self.getPage("/page2")
self.assertStatus('200 OK')
self.assertBody(pov)
# Apache, for example, may emit a Connection header even for HTTP/1.0
## self.assertNoHeader("Connection")
# Test a keep-alive HTTP/1.0 request.
self.persistent = True
self.getPage("/page3", headers=[("Connection", "Keep-Alive")])
self.assertStatus('200 OK')
self.assertBody(pov)
self.assertHeader("Connection", "Keep-Alive")
# Remove the keep-alive header again.
self.getPage("/page3")
self.assertStatus('200 OK')
self.assertBody(pov)
# Apache, for example, may emit a Connection header even for HTTP/1.0
## self.assertNoHeader("Connection")
class PipelineTests(helper.CPWebCase):
setup_server = staticmethod(setup_server)
def test_HTTP11_Timeout(self):
# If we timeout without sending any data,
# the server will close the conn with a 408.
if cherrypy.server.protocol_version != "HTTP/1.1":
return self.skip()
self.PROTOCOL = "HTTP/1.1"
# Connect but send nothing.
self.persistent = True
conn = self.HTTP_CONN
conn.auto_open = False
conn.connect()
# Wait for our socket timeout
time.sleep(timeout * 2)
# The request should have returned 408 already.
response = conn.response_class(conn.sock, method="GET")
response.begin()
self.assertEqual(response.status, 408)
conn.close()
# Connect but send half the headers only.
self.persistent = True
conn = self.HTTP_CONN
conn.auto_open = False
conn.connect()
conn.send(ntob('GET /hello HTTP/1.1'))
conn.send(("Host: %s" % self.HOST).encode('ascii'))
# Wait for our socket timeout
time.sleep(timeout * 2)
# The conn should have already sent 408.
response = conn.response_class(conn.sock, method="GET")
response.begin()
self.assertEqual(response.status, 408)
conn.close()
def test_HTTP11_Timeout_after_request(self):
# If we timeout after at least one request has succeeded,
# the server will close the conn without 408.
if cherrypy.server.protocol_version != "HTTP/1.1":
return self.skip()
self.PROTOCOL = "HTTP/1.1"
# Make an initial request
self.persistent = True
conn = self.HTTP_CONN
conn.putrequest("GET", "/timeout?t=%s" % timeout, skip_host=True)
conn.putheader("Host", self.HOST)
conn.endheaders()
response = conn.response_class(conn.sock, method="GET")
response.begin()
self.assertEqual(response.status, 200)
self.body = response.read()
self.assertBody(str(timeout))
# Make a second request on the same socket
conn._output(ntob('GET /hello HTTP/1.1'))
conn._output(ntob("Host: %s" % self.HOST, 'ascii'))
conn._send_output()
response = conn.response_class(conn.sock, method="GET")
response.begin()
self.assertEqual(response.status, 200)
self.body = response.read()
self.assertBody("Hello, world!")
# Wait for our socket timeout
time.sleep(timeout * 2)
# Make another request on the same socket, which should error
conn._output(ntob('GET /hello HTTP/1.1'))
conn._output(ntob("Host: %s" % self.HOST, 'ascii'))
conn._send_output()
response = conn.response_class(conn.sock, method="GET")
try:
response.begin()
except:
if not isinstance(sys.exc_info()[1],
(socket.error, BadStatusLine)):
self.fail("Writing to timed out socket didn't fail"
" as it should have: %s" % sys.exc_info()[1])
else:
if response.status != 408:
self.fail("Writing to timed out socket didn't fail"
" as it should have: %s" %
response.read())
conn.close()
# Make another request on a new socket, which should work
self.persistent = True
conn = self.HTTP_CONN
conn.putrequest("GET", "/", skip_host=True)
conn.putheader("Host", self.HOST)
conn.endheaders()
response = conn.response_class(conn.sock, method="GET")
response.begin()
self.assertEqual(response.status, 200)
self.body = response.read()
self.assertBody(pov)
# Make another request on the same socket,
# but timeout on the headers
conn.send(ntob('GET /hello HTTP/1.1'))
# Wait for our socket timeout
time.sleep(timeout * 2)
response = conn.response_class(conn.sock, method="GET")
try:
response.begin()
except:
if not isinstance(sys.exc_info()[1],
(socket.error, BadStatusLine)):
self.fail("Writing to timed out socket didn't fail"
" as it should have: %s" % sys.exc_info()[1])
else:
self.fail("Writing to timed out socket didn't fail"
" as it should have: %s" %
response.read())
conn.close()
# Retry the request on a new connection, which should work
self.persistent = True
conn = self.HTTP_CONN
conn.putrequest("GET", "/", skip_host=True)
conn.putheader("Host", self.HOST)
conn.endheaders()
response = conn.response_class(conn.sock, method="GET")
response.begin()
self.assertEqual(response.status, 200)
self.body = response.read()
self.assertBody(pov)
conn.close()
def test_HTTP11_pipelining(self):
if cherrypy.server.protocol_version != "HTTP/1.1":
return self.skip()
self.PROTOCOL = "HTTP/1.1"
# Test pipelining. httplib doesn't support this directly.
self.persistent = True
conn = self.HTTP_CONN
# Put request 1
conn.putrequest("GET", "/hello", skip_host=True)
conn.putheader("Host", self.HOST)
conn.endheaders()
for trial in range(5):
# Put next request
conn._output(ntob('GET /hello HTTP/1.1'))
conn._output(ntob("Host: %s" % self.HOST, 'ascii'))
conn._send_output()
# Retrieve previous response
response = conn.response_class(conn.sock, method="GET")
response.begin()
body = response.read(13)
self.assertEqual(response.status, 200)
self.assertEqual(body, ntob("Hello, world!"))
# Retrieve final response
response = conn.response_class(conn.sock, method="GET")
response.begin()
body = response.read()
self.assertEqual(response.status, 200)
self.assertEqual(body, ntob("Hello, world!"))
conn.close()
def test_100_Continue(self):
if cherrypy.server.protocol_version != "HTTP/1.1":
return self.skip()
self.PROTOCOL = "HTTP/1.1"
self.persistent = True
conn = self.HTTP_CONN
# Try a page without an Expect request header first.
# Note that httplib's response.begin automatically ignores
# 100 Continue responses, so we must manually check for it.
conn.putrequest("POST", "/upload", skip_host=True)
conn.putheader("Host", self.HOST)
conn.putheader("Content-Type", "text/plain")
conn.putheader("Content-Length", "4")
conn.endheaders()
conn.send(ntob("d'oh"))
response = conn.response_class(conn.sock, method="POST")
version, status, reason = response._read_status()
self.assertNotEqual(status, 100)
conn.close()
# Now try a page with an Expect header...
conn.connect()
conn.putrequest("POST", "/upload", skip_host=True)
conn.putheader("Host", self.HOST)
conn.putheader("Content-Type", "text/plain")
conn.putheader("Content-Length", "17")
conn.putheader("Expect", "100-continue")
conn.endheaders()
response = conn.response_class(conn.sock, method="POST")
# ...assert and then skip the 100 response
version, status, reason = response._read_status()
self.assertEqual(status, 100)
while True:
line = response.fp.readline().strip()
if line:
self.fail("100 Continue should not output any headers. Got %r" % line)
else:
break
# ...send the body
body = ntob("I am a small file")
conn.send(body)
# ...get the final response
response.begin()
self.status, self.headers, self.body = webtest.shb(response)
self.assertStatus(200)
self.assertBody("thanks for '%s'" % body)
conn.close()
class ConnectionTests(helper.CPWebCase):
setup_server = staticmethod(setup_server)
def test_readall_or_close(self):
if cherrypy.server.protocol_version != "HTTP/1.1":
return self.skip()
self.PROTOCOL = "HTTP/1.1"
if self.scheme == "https":
self.HTTP_CONN = HTTPSConnection
else:
self.HTTP_CONN = HTTPConnection
# Test a max of 0 (the default) and then reset to what it was above.
old_max = cherrypy.server.max_request_body_size
for new_max in (0, old_max):
cherrypy.server.max_request_body_size = new_max
self.persistent = True
conn = self.HTTP_CONN
# Get a POST page with an error
conn.putrequest("POST", "/err_before_read", skip_host=True)
conn.putheader("Host", self.HOST)
conn.putheader("Content-Type", "text/plain")
conn.putheader("Content-Length", "1000")
conn.putheader("Expect", "100-continue")
conn.endheaders()
response = conn.response_class(conn.sock, method="POST")
# ...assert and then skip the 100 response
version, status, reason = response._read_status()
self.assertEqual(status, 100)
while True:
skip = response.fp.readline().strip()
if not skip:
break
# ...send the body
conn.send(ntob("x" * 1000))
# ...get the final response
response.begin()
self.status, self.headers, self.body = webtest.shb(response)
self.assertStatus(500)
# Now try a working page with an Expect header...
conn._output(ntob('POST /upload HTTP/1.1'))
conn._output(ntob("Host: %s" % self.HOST, 'ascii'))
conn._output(ntob("Content-Type: text/plain"))
conn._output(ntob("Content-Length: 17"))
conn._output(ntob("Expect: 100-continue"))
conn._send_output()
response = conn.response_class(conn.sock, method="POST")
# ...assert and then skip the 100 response
version, status, reason = response._read_status()
self.assertEqual(status, 100)
while True:
skip = response.fp.readline().strip()
if not skip:
break
# ...send the body
body = ntob("I am a small file")
conn.send(body)
# ...get the final response
response.begin()
self.status, self.headers, self.body = webtest.shb(response)
self.assertStatus(200)
self.assertBody("thanks for '%s'" % body)
conn.close()
def test_No_Message_Body(self):
if cherrypy.server.protocol_version != "HTTP/1.1":
return self.skip()
self.PROTOCOL = "HTTP/1.1"
# Set our HTTP_CONN to an instance so it persists between requests.
self.persistent = True
# Make the first request and assert there's no "Connection: close".
self.getPage("/")
self.assertStatus('200 OK')
self.assertBody(pov)
self.assertNoHeader("Connection")
# Make a 204 request on the same connection.
self.getPage("/custom/204")
self.assertStatus(204)
self.assertNoHeader("Content-Length")
self.assertBody("")
self.assertNoHeader("Connection")
# Make a 304 request on the same connection.
self.getPage("/custom/304")
self.assertStatus(304)
self.assertNoHeader("Content-Length")
self.assertBody("")
self.assertNoHeader("Connection")
def test_Chunked_Encoding(self):
if cherrypy.server.protocol_version != "HTTP/1.1":
return self.skip()
if (hasattr(self, 'harness') and
"modpython" in self.harness.__class__.__name__.lower()):
# mod_python forbids chunked encoding
return self.skip()
self.PROTOCOL = "HTTP/1.1"
# Set our HTTP_CONN to an instance so it persists between requests.
self.persistent = True
conn = self.HTTP_CONN
# Try a normal chunked request (with extensions)
body = ntob("8;key=value\r\nxx\r\nxxxx\r\n5\r\nyyyyy\r\n0\r\n"
"Content-Type: application/json\r\n"
"\r\n")
conn.putrequest("POST", "/upload", skip_host=True)
conn.putheader("Host", self.HOST)
conn.putheader("Transfer-Encoding", "chunked")
conn.putheader("Trailer", "Content-Type")
# Note that this is somewhat malformed:
# we shouldn't be sending Content-Length.
# RFC 2616 says the server should ignore it.
conn.putheader("Content-Length", "3")
conn.endheaders()
conn.send(body)
response = conn.getresponse()
self.status, self.headers, self.body = webtest.shb(response)
self.assertStatus('200 OK')
self.assertBody("thanks for '%s'" % ntob('xx\r\nxxxxyyyyy'))
# Try a chunked request that exceeds server.max_request_body_size.
# Note that the delimiters and trailer are included.
body = ntob("3e3\r\n" + ("x" * 995) + "\r\n0\r\n\r\n")
conn.putrequest("POST", "/upload", skip_host=True)
conn.putheader("Host", self.HOST)
conn.putheader("Transfer-Encoding", "chunked")
conn.putheader("Content-Type", "text/plain")
# Chunked requests don't need a content-length
## conn.putheader("Content-Length", len(body))
conn.endheaders()
conn.send(body)
response = conn.getresponse()
self.status, self.headers, self.body = webtest.shb(response)
self.assertStatus(413)
conn.close()
def test_Content_Length_in(self):
# Try a non-chunked request where Content-Length exceeds
# server.max_request_body_size. Assert error before body send.
self.persistent = True
conn = self.HTTP_CONN
conn.putrequest("POST", "/upload", skip_host=True)
conn.putheader("Host", self.HOST)
conn.putheader("Content-Type", "text/plain")
conn.putheader("Content-Length", "9999")
conn.endheaders()
response = conn.getresponse()
self.status, self.headers, self.body = webtest.shb(response)
self.assertStatus(413)
self.assertBody("The entity sent with the request exceeds "
"the maximum allowed bytes.")
conn.close()
def test_Content_Length_out_preheaders(self):
# Try a non-chunked response where Content-Length is less than
# the actual bytes in the response body.
self.persistent = True
conn = self.HTTP_CONN
conn.putrequest("GET", "/custom_cl?body=I+have+too+many+bytes&cl=5",
skip_host=True)
conn.putheader("Host", self.HOST)
conn.endheaders()
response = conn.getresponse()
self.status, self.headers, self.body = webtest.shb(response)
self.assertStatus(500)
self.assertBody(
"The requested resource returned more bytes than the "
"declared Content-Length.")
conn.close()
def test_Content_Length_out_postheaders(self):
# Try a non-chunked response where Content-Length is less than
# the actual bytes in the response body.
self.persistent = True
conn = self.HTTP_CONN
conn.putrequest("GET", "/custom_cl?body=I+too&body=+have+too+many&cl=5",
skip_host=True)
conn.putheader("Host", self.HOST)
conn.endheaders()
response = conn.getresponse()
self.status, self.headers, self.body = webtest.shb(response)
self.assertStatus(200)
self.assertBody("I too")
conn.close()
def test_598(self):
remote_data_conn = urlopen('%s://%s:%s/one_megabyte_of_a/' %
(self.scheme, self.HOST, self.PORT,))
buf = remote_data_conn.read(512)
time.sleep(timeout * 0.6)
remaining = (1024 * 1024) - 512
while remaining:
data = remote_data_conn.read(remaining)
if not data:
break
else:
buf += data
remaining -= len(data)
self.assertEqual(len(buf), 1024 * 1024)
self.assertEqual(buf, ntob("a" * 1024 * 1024))
self.assertEqual(remaining, 0)
remote_data_conn.close()
class BadRequestTests(helper.CPWebCase):
setup_server = staticmethod(setup_server)
def test_No_CRLF(self):
self.persistent = True
conn = self.HTTP_CONN
conn.send(ntob('GET /hello HTTP/1.1\n\n'))
response = conn.response_class(conn.sock, method="GET")
response.begin()
self.body = response.read()
self.assertBody("HTTP requires CRLF terminators")
conn.close()
conn.connect()
conn.send(ntob('GET /hello HTTP/1.1\r\n\n'))
response = conn.response_class(conn.sock, method="GET")
response.begin()
self.body = response.read()
self.assertBody("HTTP requires CRLF terminators")
conn.close()
| bsd-3-clause |
scrollback/kuma | kuma/contentflagging/migrations/0002_unique_hash_index.py | 5 | 6338 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
try:
# Removing unique constraint on 'ContentFlag', fields ['ip', 'object_pk', 'user_agent', 'content_type', 'session_key', 'user']
db.delete_unique('contentflagging_contentflag', ['ip', 'object_pk', 'user_agent', 'content_type_id', 'session_key', 'user_id'])
except:
# This constraint may have already been removed, so ignore exceptions
pass
# Deleting field 'ContentFlag.session_key'
db.delete_column('contentflagging_contentflag', 'session_key')
# Adding field 'ContentFlag.unique_hash'
db.add_column('contentflagging_contentflag', 'unique_hash', self.gf('django.db.models.fields.CharField')(max_length=32, unique=True, null=True, db_index=True), keep_default=False)
def backwards(self, orm):
# Adding field 'ContentFlag.session_key'
db.add_column('contentflagging_contentflag', 'session_key', self.gf('django.db.models.fields.CharField')(max_length=40, null=True, blank=True), keep_default=False)
# Deleting field 'ContentFlag.unique_hash'
db.delete_column('contentflagging_contentflag', 'unique_hash')
# Adding unique constraint on 'ContentFlag', fields ['ip', 'object_pk', 'user_agent', 'content_type', 'session_key', 'user']
db.create_unique('contentflagging_contentflag', ['ip', 'object_pk', 'user_agent', 'content_type_id', 'session_key', 'user_id'])
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contentflagging.contentflag': {
'Meta': {'ordering': "('-created',)", 'object_name': 'ContentFlag'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'content_type_set_for_contentflag'", 'to': "orm['contenttypes.ContentType']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'explanation': ('django.db.models.fields.TextField', [], {'max_length': '255', 'blank': 'True'}),
'flag_status': ('django.db.models.fields.CharField', [], {'default': "'flagged'", 'max_length': '16'}),
'flag_type': ('django.db.models.fields.CharField', [], {'max_length': '64', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'object_pk': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'unique_hash': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True', 'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'user_agent': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['contentflagging']
| mpl-2.0 |
Michal-Fularz/codingame_solutions | codingame_solutions/easy/easy_Chuck_Norris.py | 1 | 1322 | __author__ = 'Amin'
import sys
import math
def prepare_answer(bit_type, count, flag_without_trailing_space=False):
answer = ""
if bit_type == 1:
answer += "0"
else:
answer += "00"
answer += " "
for i in xrange(0, count):
answer += "0"
if not flag_without_trailing_space:
answer += " "
return answer
MESSAGE = raw_input()
characters = list(MESSAGE)
bits = []
for char in characters:
# iterate over each bit
for i in reversed(xrange(0, 7)):
bit = (ord(char) >> i) & 0x01
bits.append(bit)
answer = ""
bit_type = 0
count = 0
for bit in bits:
if count == 0:
if bit == 1:
bit_type = 1
else:
bit_type = 0
count += 1
else:
if bit != bit_type:
# the sign has changed
answer += prepare_answer(bit_type, count)
bit_type = bit
count = 1
else:
count += 1
# add the last part (accumulated but not added to answer)
answer += prepare_answer(bit_type, count, flag_without_trailing_space=True)
# instead of using flag in function it is possible to just remove last character
# (space) like this
#answer = answer[:-1]
# Write an asction using print
# To debug: print >> sys.tderr, "Debug messages..."
print answer
| mit |
start-jsk/jsk_apc | demos/grasp_data_generator/grasp_data_generator/visualizations/vis_occluded_grasp_instance_segmentation.py | 1 | 7143 | from __future__ import division
import numpy as np
from chainercv.visualizations.colormap import voc_colormap
from chainercv.visualizations import vis_image
from grasp_data_generator.models.occluded_grasp_mask_rcnn.utils \
import rot_lbl_to_rot
def vis_occluded_grasp_instance_segmentation(
img, ins_label, label=None, bbox=None, score=None,
sg_label=None, dg_label=None, label_names=None, rotate_angle=None,
instance_colors=None, alpha=0.7, linewidth=1., fontsize=8, prefix=None,
axes=None,
):
from matplotlib import pyplot as plt
if bbox is not None and len(bbox) != len(ins_label):
raise ValueError('The length of mask must be same as that of bbox')
if label is not None and len(bbox) != len(label):
raise ValueError('The length of label must be same as that of bbox')
if score is not None and len(bbox) != len(score):
raise ValueError('The length of score must be same as that of bbox')
n_inst = len(bbox)
if instance_colors is None:
instance_colors = voc_colormap(list(range(1, n_inst + 1)))
instance_colors = np.array(instance_colors)
if axes is None:
f, axes = plt.subplots(1, 5, sharey=True)
else:
f = None
ins_names = ['background', 'visible', 'occluded']
for ins_id, ax in enumerate(axes[:3]):
if prefix is None:
ax.set_title(ins_names[ins_id])
else:
ax.set_title('{0} : {1}'.format(prefix, ins_names[ins_id]))
ax = vis_image(img, ax=ax)
_, H, W = img.shape
canvas_img = np.zeros((H, W, 4), dtype=np.uint8)
for i, (bb, ins_lbl) in enumerate(zip(bbox, ins_label)):
# The length of `colors` can be smaller than the number of
# instances if a non-default `colors` is used.
color = instance_colors[i % len(instance_colors)]
rgba = np.append(color, alpha * 255)
bb = np.round(bb).astype(np.int32)
y_min, x_min, y_max, x_max = bb
if y_max > y_min and x_max > x_min:
ins_mask = ins_lbl[y_min:y_max, x_min:x_max] == ins_id
canvas_img[y_min:y_max, x_min:x_max][ins_mask] = rgba
xy = (bb[1], bb[0])
height = bb[2] - bb[0]
width = bb[3] - bb[1]
ax.add_patch(plt.Rectangle(
xy, width, height, fill=False,
edgecolor=color / 255, linewidth=linewidth, alpha=alpha))
caption = []
if label is not None and label_names is not None:
lb = label[i]
if not (0 <= lb < len(label_names)):
raise ValueError('No corresponding name is given')
caption.append(label_names[lb])
if score is not None:
sc = score[i]
caption.append('{:.2f}'.format(sc))
if len(caption) > 0:
ax.text((x_max + x_min) / 2, y_min,
': '.join(caption),
style='italic',
bbox={'facecolor': color / 255, 'alpha': alpha},
fontsize=fontsize, color='white')
ax.imshow(canvas_img)
ax3, ax4 = axes[3:5]
if prefix is None:
ax3.set_title('single grasp')
else:
ax3.set_title('{0} : single grasp'.format(prefix))
ax3 = vis_image(img, ax=ax3)
_, H, W = img.shape
canvas_img = np.zeros((H, W, 4), dtype=np.uint8)
for i, (bb, sg_lbl) in enumerate(zip(bbox, sg_label)):
count = np.bincount(sg_lbl.flatten(), minlength=1)
# no grasp mask
if len(count) == 1:
continue
rot_id = np.argmax(count[1:]) + 1
# The length of `colors` can be smaller than the number of
# instances if a non-default `colors` is used.
color = instance_colors[i % len(instance_colors)]
rgba = np.append(color, alpha * 255)
bb = np.round(bb).astype(np.int32)
y_min, x_min, y_max, x_max = bb
if y_max > y_min and x_max > x_min:
canvas_img[sg_lbl == rot_id] = rgba
xy = (bb[1], bb[0])
height = bb[2] - bb[0]
width = bb[3] - bb[1]
ax3.add_patch(plt.Rectangle(
xy, width, height, fill=False,
edgecolor=color / 255, linewidth=linewidth, alpha=alpha))
caption = []
if label is not None and label_names is not None:
lb = label[i]
if not (0 <= lb < len(label_names)):
raise ValueError('No corresponding name is given')
caption.append(label_names[lb])
if score is not None:
sc = score[i]
caption.append('{:.2f}'.format(sc))
if rotate_angle is not None:
rot = rot_lbl_to_rot(rot_id, rotate_angle)
caption.append('{} degree'.format(rot))
if len(caption) > 0:
ax3.text((x_max + x_min) / 2, y_min,
': '.join(caption),
style='italic',
bbox={'facecolor': color / 255, 'alpha': alpha},
fontsize=fontsize, color='white')
ax3.imshow(canvas_img)
if prefix is None:
ax4.set_title('dual grasp')
else:
ax4.set_title('{0} : dual grasp'.format(prefix))
ax4 = vis_image(img, ax=ax4)
_, H, W = img.shape
canvas_img = np.zeros((H, W, 4), dtype=np.uint8)
for i, (bb, dg_lbl) in enumerate(zip(bbox, dg_label)):
count = np.bincount(dg_lbl.flatten(), minlength=1)
# no grasp mask
if len(count) == 1:
continue
rot_id = np.argmax(count[1:]) + 1
# The length of `colors` can be smaller than the number of
# instances if a non-default `colors` is used.
color = instance_colors[i % len(instance_colors)]
rgba = np.append(color, alpha * 255)
bb = np.round(bb).astype(np.int32)
y_min, x_min, y_max, x_max = bb
if y_max > y_min and x_max > x_min:
canvas_img[dg_lbl == rot_id] = rgba
xy = (bb[1], bb[0])
height = bb[2] - bb[0]
width = bb[3] - bb[1]
ax4.add_patch(plt.Rectangle(
xy, width, height, fill=False,
edgecolor=color / 255, linewidth=linewidth, alpha=alpha))
caption = []
if label is not None and label_names is not None:
lb = label[i]
if not (0 <= lb < len(label_names)):
raise ValueError('No corresponding name is given')
caption.append(label_names[lb])
if score is not None:
sc = score[i]
caption.append('{:.2f}'.format(sc))
if rotate_angle is not None and dg_lbl.max() > 0:
rot = rot_lbl_to_rot(rot_id, rotate_angle)
caption.append('{} degree'.format(rot))
if len(caption) > 0:
ax4.text((x_max + x_min) / 2, y_min,
': '.join(caption),
style='italic',
bbox={'facecolor': color / 255, 'alpha': alpha},
fontsize=fontsize, color='white')
ax4.imshow(canvas_img)
return f, axes
| bsd-3-clause |
wangyum/mxnet | example/gluon/super_resolution.py | 23 | 7631 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import argparse, tarfile
import math
import os
import numpy as np
import mxnet as mx
import mxnet.ndarray as F
from mxnet import gluon
from mxnet.gluon import nn
from mxnet import autograd as ag
from mxnet.test_utils import download
from mxnet.image import CenterCropAug, ResizeAug
from mxnet.io import PrefetchingIter
from data import ImagePairIter
# CLI
parser = argparse.ArgumentParser(description='Super-resolution using an efficient sub-pixel convolution neural network.')
parser.add_argument('--upscale_factor', type=int, default=3, help="super resolution upscale factor. default is 3.")
parser.add_argument('--batch_size', type=int, default=4, help='training batch size, per device. default is 4.')
parser.add_argument('--test_batch_size', type=int, default=100, help='test batch size')
parser.add_argument('--epochs', type=int, default=30, help='number of training epochs')
parser.add_argument('--lr', type=float, default=0.001, help='learning Rate. default is 0.001.')
parser.add_argument('--use-gpu', action='store_true', help='whether to use GPU.')
parser.add_argument('--seed', type=int, default=123, help='random seed to use. Default=123')
parser.add_argument('--resolve_img', type=str, help='input image to use')
opt = parser.parse_args()
print(opt)
upscale_factor = opt.upscale_factor
batch_size, test_batch_size = opt.batch_size, opt.test_batch_size
color_flag = 0
# get data
dataset_path = "dataset"
dataset_url = "http://www2.eecs.berkeley.edu/Research/Projects/CS/vision/bsds/BSDS300-images.tgz"
def get_dataset(prefetch=False):
image_path = os.path.join(dataset_path, "BSDS300/images")
if not os.path.exists(image_path):
os.makedirs(dataset_path)
file_name = download(dataset_url)
with tarfile.open(file_name) as tar:
for item in tar:
tar.extract(item, dataset_path)
os.remove(file_name)
crop_size = 256
crop_size -= crop_size % upscale_factor
input_crop_size = crop_size // upscale_factor
input_transform = [CenterCropAug((crop_size, crop_size)), ResizeAug(input_crop_size)]
target_transform = [CenterCropAug((crop_size, crop_size))]
iters = (ImagePairIter(os.path.join(image_path, "train"),
(input_crop_size, input_crop_size),
(crop_size, crop_size),
batch_size, color_flag, input_transform, target_transform),
ImagePairIter(os.path.join(image_path, "test"),
(input_crop_size, input_crop_size),
(crop_size, crop_size),
test_batch_size, color_flag,
input_transform, target_transform))
return [PrefetchingIter(i) for i in iters] if prefetch else iters
train_data, val_data = get_dataset()
mx.random.seed(opt.seed)
ctx = [mx.gpu(0)] if opt.use_gpu else [mx.cpu()]
# define model
def _rearrange(raw, F, upscale_factor):
# (N, C * r^2, H, W) -> (N, C, r^2, H, W)
splitted = F.reshape(raw, shape=(0, -4, -1, upscale_factor**2, 0, 0))
# (N, C, r^2, H, W) -> (N, C, r, r, H, W)
unflatten = F.reshape(splitted, shape=(0, 0, -4, upscale_factor, upscale_factor, 0, 0))
# (N, C, r, r, H, W) -> (N, C, H, r, W, r)
swapped = F.transpose(unflatten, axes=(0, 1, 4, 2, 5, 3))
# (N, C, H, r, W, r) -> (N, C, H*r, W*r)
return F.reshape(swapped, shape=(0, 0, -3, -3))
class SuperResolutionNet(gluon.Block):
def __init__(self, upscale_factor):
super(SuperResolutionNet, self).__init__()
with self.name_scope():
self.conv1 = nn.Conv2D(64, (5, 5), strides=(1, 1), padding=(2, 2))
self.conv2 = nn.Conv2D(64, (3, 3), strides=(1, 1), padding=(1, 1))
self.conv3 = nn.Conv2D(32, (3, 3), strides=(1, 1), padding=(1, 1))
self.conv4 = nn.Conv2D(upscale_factor ** 2, (3, 3), strides=(1, 1), padding=(1, 1))
self.upscale_factor = upscale_factor
def forward(self, x):
x = F.Activation(self.conv1(x), act_type='relu')
x = F.Activation(self.conv2(x), act_type='relu')
x = F.Activation(self.conv3(x), act_type='relu')
return _rearrange(self.conv4(x), F, self.upscale_factor)
net = SuperResolutionNet(upscale_factor)
metric = mx.metric.MSE()
def test(ctx):
val_data.reset()
avg_psnr = 0
batches = 0
for batch in val_data:
batches += 1
data = gluon.utils.split_and_load(batch.data[0], ctx_list=ctx, batch_axis=0)
label = gluon.utils.split_and_load(batch.label[0], ctx_list=ctx, batch_axis=0)
outputs = []
for x in data:
outputs.append(net(x))
metric.update(label, outputs)
avg_psnr += 10 * math.log10(1/metric.get()[1])
metric.reset()
avg_psnr /= batches
print('validation avg psnr: %f'%avg_psnr)
def train(epoch, ctx):
if isinstance(ctx, mx.Context):
ctx = [ctx]
net.initialize(mx.init.Orthogonal(), ctx=ctx)
# re-initialize conv4's weight to be Orthogonal
net.conv4.collect_params().initialize(mx.init.Orthogonal(scale=1), force_reinit=True, ctx=ctx)
trainer = gluon.Trainer(net.collect_params(), 'adam', {'learning_rate': opt.lr})
loss = gluon.loss.L2Loss()
for i in range(epoch):
train_data.reset()
for batch in train_data:
data = gluon.utils.split_and_load(batch.data[0], ctx_list=ctx, batch_axis=0)
label = gluon.utils.split_and_load(batch.label[0], ctx_list=ctx, batch_axis=0)
outputs = []
with ag.record():
for x, y in zip(data, label):
z = net(x)
L = loss(z, y)
L.backward()
outputs.append(z)
trainer.step(batch.data[0].shape[0])
metric.update(label, outputs)
name, acc = metric.get()
metric.reset()
print('training mse at epoch %d: %s=%f'%(i, name, acc))
test(ctx)
net.save_params('superres.params')
def resolve(ctx):
from PIL import Image
if isinstance(ctx, list):
ctx = [ctx[0]]
net.load_params('superres.params', ctx=ctx)
img = Image.open(opt.resolve_img).convert('YCbCr')
y, cb, cr = img.split()
data = mx.nd.expand_dims(mx.nd.expand_dims(mx.nd.array(y), axis=0), axis=0)
out_img_y = mx.nd.reshape(net(data), shape=(-3, -2)).asnumpy()
out_img_y = out_img_y.clip(0, 255)
out_img_y = Image.fromarray(np.uint8(out_img_y[0]), mode='L')
out_img_cb = cb.resize(out_img_y.size, Image.BICUBIC)
out_img_cr = cr.resize(out_img_y.size, Image.BICUBIC)
out_img = Image.merge('YCbCr', [out_img_y, out_img_cb, out_img_cr]).convert('RGB')
out_img.save('resolved.png')
if opt.resolve_img:
resolve(ctx)
else:
train(opt.epochs, ctx)
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.