prefix stringlengths 0 918k | middle stringlengths 0 812k | suffix stringlengths 0 962k |
|---|---|---|
import vtk
reader = vtk.vtkRectilinearGridReader()
reader.SetFileName("D:/Notebooks_Bogota2017/SS_2017/data/jet4_0.500.vtk")
reader.Update()
output = reader.GetOutput()
xmi, xma, ymi, yma, zmi, zma = output.GetBounds()
# Color Transfer Function and LookUpTable
# Create transfer mapping scalar value to color
colorTransferFunction = vtk.vtkColorTransferFunction()
colorTransferFunction.AddRGBPoint(0.0, 1.0, 0.0, 0.0)
colorTransferFunction.AddRGBPoint(0.15, 0.0, 0.0, 1.0)
colorTransferFunction.AddRGBPoint(0.3, 0.0, 1.0, 0.0)
tableSize = 30
lut = vtk.vtkLookupTable()
lut.SetNumberOfTableValues(tableSize)
lut.Build()
for i in range(0,tableSize):
rgb = list(colorTransferFunction.GetColor(float(i)/tableSize))+[0.2]
lut.SetTableValue(i,rgb)
# A plane for the seeds
plane = vtk.vtkPlaneSource()
plane.SetOrigin(0, 0, 0)
plane.SetPoint1(xma, 0, 0)
plane.SetPoint2(0, 0, zma)
plane.SetXResolution(20)
plane.SetYResolution(20)
# Add the outline of the plane
outline = vtk.vtkOutlineFilter()
outline.SetInputData(plane.GetOutput())
outlineMapper = vtk.vtkPolyDataMapper()
outlineMapper.SetInputConnection(outline.GetOutputPort())
outlineActor = vtk.vtkActor()
outlineActor.SetMapper(outlineMapper)
outlineActor.GetProperty().SetColor(1,1,1)
# Compute streamlines
streamline = vtk.vtkStreamTracer()
streamline.SetSourceConnection(plane.GetOutputPort())
streamline.SetInputConnection(reader.GetOutputPort())
streamline.SetIntegrationDirectionToForward()
#streamline.SetIntegrationDirectionToBackward()
#streamline.SetIntegrationDirectionToBoth()
streamline.SetMaximumPropagation(1)
streamline.SetComputeVorticity(True)
# Visualize stream as ribbons (= Stream ribbons); i.e. we need to pass the streamlines through the ribbon filter
streamRibbons = vtk.vtkRibbonFilter()
streamRibbons.SetInputConnection(streamline.GetOutputPort())
streamRibbons.SetWidth(0.01)
streamRibbons.Update()
streamRibbonsMapper = vtk.vtkPolyDataMapper()
streamRibbonsMapper.SetScalarModeToUsePointFieldData()
streamRibbonsMapper.SetInputConnection(streamRibbons.GetOutputPort())
# ***TODO: apply a transfer function to the stream ribbons
streamRibbonsActor = vtk.vtkActor()
streamRibbonsActor.SetMapper(streamRibbonsMapper)
# Visualize stream as tubes (= Stream tubes)
streamTubes = vtk.vtkTubeFilter()
streamTubes.SetInputConnection(streamline.GetOutputPort())
streamTubes.SetRadius(0.01)
streamTubes.Update()
streamTubeMapper = vtk.vtkPolyDataMapper()
streamTubeMapper.SetLookupTable(lut)
streamTubeMapper.SetInputConnection(streamTubes.GetOutputPort())
streamTubeMapper.SetScalarVisibility(True)
streamTubeMapper.SetScalarModeToUsePointFieldData()
streamTubeMapper.SelectColorArray('vectors')
streamTubeMapper.SetScalarRange((reader.GetOutput().GetPointData().GetVectors().GetRange(-1)))
streamTubeActor = vtk.vtkActor()
streamTubeActor.SetMapper(streamTubeMapper)
# Visualize stream as lines (= Stream lines)
# Pass the streamlines to the mapper
streamlineMapper = vtk.vtkPolyDataMapper()
streamlineMapper.SetLookupTable(lut)
streamlineMapper.SetInputConnection(streamline.GetOutputPort())
streamlineMapper.SetScalarVisibility(True)
streamlineMapper.SetScalarModeToUsePointFieldData()
streamlineMapper.SelectColorArray('vectors')
streamlineMapper.SetScalarRange((reader.GetOutput().GetPointData().GetVectors().GetRange(-1)))
# Pass the ma | pper to the actor
streamlineActor = vtk.vtkActor()
streamlineActor.SetMapper(streamlineMapper)
streamlineActor.GetProperty().SetLineWidth(2.0)
# Add the outline of the data set
gOutline = vtk.vtkRectilinearGridOutlineFilter()
gOutline.SetInputData(output)
gOutlineMapper = vtk.vtkPolyDataMapper()
gOutlineMapper.SetInputConnection(gOutline.GetOutputPort())
gOutlineActor = vtk.vtkActor()
gOutline | Actor.SetMapper(gOutlineMapper)
gOutlineActor.GetProperty().SetColor(0.5,0.5,0.5)
# Rendering / Window
renderer = vtk.vtkRenderer()
renderer.SetBackground(0.0, 0.0, 0.0)
#renderer.AddActor(streamlineActor)
# renderer.AddActor(streamRibbonsActor)
renderer.AddActor(streamTubeActor)
renderer.AddActor(outlineActor)
renderer.AddActor(gOutlineActor)
renderWindow = vtk.vtkRenderWindow()
renderWindow.AddRenderer(renderer)
renderWindow.SetSize(500, 500)
renderWindow.Render()
interactor = vtk.vtkRenderWindowInteractor()
interactor.SetInteractorStyle(vtk.vtkInteractorStyleTrackballCamera())
interactor.SetRenderWindow(renderWindow)
interactor.Initialize()
interactor.Start() |
ut()
second_submission_key = student_work.Submission(
reviewee_key=second_reviewee_key, unit_id=self.unit_id).put()
older_assigned_and_completed_key = peer.ReviewSummary(
assigned_count=1, completed_count=1,
reviewee_key=second_reviewee_key,
submission_key=second_submission_key, unit_id=self.unit_id
).put()
third_reviewee_key = models.Student(
key_name='reviewee3@example.com').put()
third_submission_key = student_work.Submission(
reviewee_key=third_reviewee_key, unit_id=self.unit_id).put()
younger_assigned_and_completed_key = peer.ReviewSummary(
assigned_count=1, completed_count=1,
reviewee_key=third_reviewee_key,
submission_key=third_submission_key, unit_id=self.unit_id
).put()
fourth_reviewee_key = models.Student(
key_name='reviewee4@example.com').put()
fourth_submission_key = student_work.Submission(
reviewee_key=fourth_reviewee_key, unit_id=self.unit_id).put()
completed_but_not_assigned_key = peer.ReviewSummary(
assigned_count=0, completed_count=1,
reviewee_key=fourth_reviewee_key,
submission_key=fourth_submission_key, unit_id=self.unit_id
).put()
fifth_reviewee_key = models.Student(
key_name='reviewee5@example.com').put()
fifth_submission_key = student_work.Submission(
reviewee_key=fifth_reviewee_key, unit_id=self.unit_id).put()
assigned_but_not_completed_key = peer.ReviewSummary(
assigned_count=1, completed_count=0,
reviewee_key=fifth_reviewee_key,
submission_key=fifth_submission_key, unit_id=self.unit_id
).put()
results = review_module.Manager.get_assignment_candidates_query(
self.unit_id).fetch(5)
self.assertEqual([
assigned_but_not_completed_key,
completed_but_not_assigned_key,
older_assigned_and_completed_key,
younger_assigned_and_completed_key
], [r.key() for r in results])
def test_get_expiry_query_filters_and_orders_correctly(self):
summary_key = peer.ReviewSummary(
assigned_count=2, completed_count=1, reviewee_key=self.reviewee_key,
submission_key=self.submission_key, unit_id=self.unit_id
).put()
unused_completed_step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=self.reviewee_key,
reviewer_key=self.reviewer_key, submission_key=self.submission_key,
state=domain.REVIEW_STATE_COMPLETED, unit_id=self.unit_id
).put()
second_reviewee_key = models.Student(
key_name='reviewee2@example.com').put()
second_submission_key = student_work.Submission(
reviewee_key=second_reviewee_key, unit_id=self.unit_id).put()
unused_removed_step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO, removed=True,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=second_reviewee_key,
reviewer_key=self.reviewer_key,
submission_key=second_submission_key,
state=domain.REVIEW_STATE_ASSIGNED, unit_id=self.unit_id
).put()
third_reviewee_key = models.Student(
key_name='reviewee3@example.com').put()
third_submission_key = student_work.Submission(
reviewee_key=third_reviewee_key, unit_id=self.unit_id).put()
unused_other_unit_step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=third_reviewee_key,
reviewer_key=self.reviewer_key,
submission_key=third_submission_key,
state=domain.REVIEW_STATE_ASSIGNED,
unit_id=str(int(self.unit_id) + 1)
).put()
fourth_reviewee_key = models.Student(
key_name='reviewee4@example.com').put()
fourth_submission_key = student_work.Submission(
reviewee_key=fourth_reviewee_key, unit_id=self.unit_id).put()
first_assigned_step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=fourth_reviewee_key,
reviewer_key=self.reviewer_key,
submission_key=fourth_submission_key,
state=domain.REVIEW_STATE_ASSIGNED, unit_id=self.unit_id
).put()
fifth_reviewee_key = models.Student(
key_name='reviewee5@example.com').put()
fifth_submission_key = student_work.Submission(
reviewee_key=fifth_reviewee_key, unit_id=self.unit_id).put()
second_assigned_step_key = peer.ReviewStep(
assigner_kind=domain.ASSIGNER_KIND_AUTO,
review_key=db.Key.from_path(student_work.Review.kind(), 'review'),
review_summary_key=summary_key, reviewee_key=fifth_reviewee_key,
reviewer_key=self.reviewer_key,
submission_key=fifth_submission_key,
state=domain.REVIEW_STATE_ASSIGNED, unit_id=self.unit_id
).put()
zero_review_window_query = review_module.Manager.get_expiry_query(
0, self.unit_id)
future_review_window_query = review_module.Manager.get_expiry_query(
1, self.unit_id)
self.assertEqual(
[first_assigned_step_key, second_assigned_step_key],
zero_review_window_query.fetch(3))
# No items are > 1 minute old, so we expect an empty result set.
self.assertEqual(None, future_review_window_query.get())
def test_get_new_review_creates_step_and_updates_summary(self):
summary_key = peer.ReviewSummary(
reviewee_key=self.reviewee_key, submission_key=self.submission_key,
unit_id=self.unit_id
).put()
summary = db.get(summary_key)
self.assertEqual(0, summary.assigned_count)
step_key = review_module.Manager.get_new_review(
self.unit_id, self.reviewer_key)
step, summary = db.get([step_key, summary_key])
self.assertEqual(domain.ASSIGNER_KIND_AUTO, step.assigner_kind)
self.assertEqual(summary.key(), step.review_summary_key)
self.assertEqual(self.reviewee_key, step.reviewee_key)
self.assertEqual(self.reviewer_key, step.reviewer_key)
self.assertEqual(domain.REVIEW_STATE_ASSIGNED, step.state)
self.assertEqual(self.submission_key, step.submission_key)
self.assertEqual(self.unit_id, step.unit_id)
self.assertEqual(1, summary.assigned_count)
def test_get_new_review_raises_key_error_when_summary_missing(self):
summary_key = peer.ReviewSummary(
reviewee_key=self.reviewee_key, submission_key=self.submission_key,
unit_id=self.unit_id
).put()
# Create and bind a function that we can swap in to pick the review
# candidate but as a side effect delete the review summary, causing a
# the | lookup by key to fail.
def pick_and_remove(unused_cls, candidates):
db.delete(summary_key)
return candidates[0]
fn = types.MethodType(
pick_and_remove, review_module.Manager(), review_module.Manager)
self.swap(
| review_module.Manager, '_choose_assignment_candidate', fn)
self.assertRaises(
KeyError, review_module.Manager.get_new_review, self.unit_id,
self.reviewer_key)
def test_get_new_review_raises_not_assignable_when_already_assigned(self):
summary_key = peer.ReviewSummary(
assigned_count=1, reviewee_key=self.reviewee_key,
submission_key=self.submission_key, unit_id=self.unit_ |
d on rasterio files. Try to load your data with ds.load()'
'first.')
class RasterioArrayWrapper(BackendArray):
"""A wrapper around rasterio dataset objects"""
def __init__(self, manager, lock, vrt_params=None):
from rasterio.vrt import WarpedVRT
self.manager = manager
self.lock = lock
# cannot save riods as an attribute: this would break pickleability
riods = manager.acquire()
if vrt_params is not None:
riods = WarpedVRT(riods, **vrt_params)
self.vrt_params = vrt_params
self._shape = (riods.count, riods.height, riods.width)
dtypes = riods.dtypes
if not np.all(np.asarray(dtypes) == dtypes[0]):
raise ValueError('All bands should have the same dtype')
self._dtype = np.dtype(dtypes[0])
@property
def dtype(self):
return self._dtype
@property
def shape(self):
return self._shape
def _get_indexer(self, key):
""" Get indexer for rasterio array.
Parameter
---------
key: tuple of int
Returns
-------
band_key: an indexer for the 1st dimension
window: two tuples. Each consists of (start, stop).
squeeze_axis: axes to be squeezed
np_ind: indexer for loaded numpy array
See also
--------
indexing.decompose_indexer
"""
assert len(key) == 3, 'rasterio datasets should always be 3D'
# bands cannot be windowed but they can be listed
band_key = key[0]
np_inds = []
# bands (axis=0) cannot be windowed but they can be listed
if isinstance(band_key, slice):
start, stop, step = band_key.indices(self.shape[0])
band_key = np.arange(start, stop, step)
# be sure we give out a list
band_key = (np.asarray(band_key) + 1).tolist()
if isinstance(band_key, list): # if band_key is not a scalar
np_inds.append(slice(None))
# but other dims can only be windowed
window = []
squeeze_axis = []
for i, (k, n) in enumerate(zip(key[1:], self.shape[1:])):
if isinstance(k, slice):
# step is always positive. see indexing.decompose_indexer
start, stop, step = k.indices(n)
np_inds.append(slice(None, None, step))
elif is_scalar(k):
# windowed operations will always return an array
# we will have to squeeze it later
squeeze_axis.append(- (2 - i))
start = k
stop = k + 1
else:
start, stop = np.min(k), np.max(k) + 1
np_inds.append(k - start)
window.append((start, stop))
if isinstance(key[1], np.ndarray) and isinstance(key[2], np.ndarray):
# do outer-style indexing
np_inds[-2:] = np.ix_(*np_inds[-2:])
return band_key, tuple(window), tuple(squeeze_axis), tuple(np_inds)
def _getitem(self, key):
from rasterio.vrt import WarpedVRT
band_key, window, squeeze_axis, np_inds = self._get_indexer(key)
if not band_key or any(start == stop for (start, stop) in window):
# no need to do IO
shape = (len(band_key),) + tuple(
stop - start for (start, stop) in window)
out = np.zeros(shape, dtype=self.dtype)
else:
with self.lock:
riods = self.manager.acquire(needs_lock=False)
if self.vrt_params is not None:
riods = WarpedVRT(riods, **self.vrt_params)
out = riods.read(band_key, window=window)
if squeeze_axis:
out = np.squeeze(out, axis=squeeze_axis)
return out[np_inds]
def __getitem__(self, key):
return indexing.explicit_indexing_adapter(
key, self.shape, indexing.IndexingSupport.OUTER, self._getitem)
def _parse_envi(meta):
"""Parse ENVI metadata into Python data structures.
See the link for information on the ENVI header file format:
http://www.harrisgeospatial.com/docs/enviheaderfiles.html
Parameters
----------
meta : dict
Dictionary of keys and str values to parse, as returned by the rasterio
tags(ns='ENVI') call.
Returns
-------
parsed_meta : dict
Dictionary containing the original keys and the parsed values
"""
def parsevec(s):
return np.fromstring(s.strip('{}'), dtype='float', sep=',')
def default(s):
return s.strip('{}')
parse = {'wavelength': parsevec,
'fwhm': parsevec}
parsed_meta = {k: parse.get(k, default)(v) for k, v in meta.items()}
return parsed_meta
def open_rasterio(filename, parse_coordinates=None, chunks=None, cache=None,
lock=None):
"""Open a file with rasterio (experimental).
This should work with any file that rasterio can open (most often:
geoTIFF). The x and y coordinates are generated automatically from the
file's geoinformation, shifted to the center of each pixel (see
`"PixelIsArea" Raster Space
<http://web.archive.org/web/20160326194152/http://remotesensing.org/geotiff/spec/geotiff2.5.html#2.5.2>`_
for more information).
You can generate 2D coordinates from the file's attributes with::
from affine import Affine
da = xr.open_rasterio('path_to_file.tif')
transform = Affine.from_gdal(*da.attrs['transform'])
nx, ny = da.sizes['x'], da.sizes['y']
x, y = np.meshgrid(np.arange(nx)+0.5, np.arange(ny)+0.5) * transform
Parameters
----------
filename : str, rasterio.DatasetReader, or rasterio.WarpedVRT
Path to the file to open. Or already open rasterio dataset.
parse_coordinates : bool, optional
Whether to parse the x and y coordinates out of the file's
``transform`` attribute or not. The default is to automatically
parse the coordinates only if they are rectilinear (1D).
It can be useful to set ``parse_coordinates=False``
if your files are very large or if you don't need the coordinates.
chunks : int, tuple or dict, optional
Chunk sizes along each dimension, e.g., ``5``, ``(5, 5)`` or
``{'x': 5, 'y': 5}``. If chunks is provided, it used to load the new
DataArray into a dask array.
cache : bool, optional
If True, cache data loaded from the underlying datastore in memory as
NumPy arrays when accessed to avoid reading from the underlying data-
store multiple times. Defaults to True unless you specify the `chunks`
argument to use dask, in which case it defaults to False.
lock : False, True or threading.Lock, optional
If chunks is provided, this argument is passed on to
:py:func:`dask.array.from_array`. By default, a global lock is
used to avoid issues with concurrent access to the same file when using
dask's multithreaded backend.
Returns
-------
data : DataArray
The newly created DataArray.
"""
import rasterio
from rasterio.vrt import WarpedVRT
vrt_params = None
if isinstance(filename, rasterio.io.DatasetReader):
filename = filename.name
elif isinstance(filename, rasterio.vrt. | WarpedVRT):
vrt = filename
filename = vrt.src_dataset.name
vrt_params = dict(crs=vrt.crs.to_string(),
resampling=vrt.resampling,
src_nodata=vrt.src_nodata,
| dst_nodata=vrt.dst_nodata,
tolerance=vrt.tolerance,
transform=vrt.transform,
width=vrt.width,
height=vrt.height,
warp_extras=vrt.warp_extras)
if lock is None:
lock = RASTERIO_LOCK
manager = CachingFileManager(rasterio.open, filename, lock=lock, mode='r')
riods = manager.acquire()
if vrt_params is not None:
riods = WarpedVRT(riods, **vrt_params)
if cache is None:
cache = ch |
#!/usr/bin/python
#----------------------------------------------------------------------
# Copyright (c) 2013-2016 Raytheon BBN Technologies
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and/or hardware specification (the "Work") to
# deal in the Work without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Work, and to permit persons to whom the Work
# is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Work.
#
# THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT |
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS
# IN THE WORK.
#----------------------------------------------------------------------
import json
import sys
from vmoc.VMOCConfig import VM | OCSliceConfiguration, VMOCVLANConfiguration
# Usage register_controller.sh slice [vlan controller ....] [unregister]
if len(sys.argv) < 2:
print "Usage: register_controller.py slice [vlan controller ...] [unregister]"
sys.exit()
print sys.argv[1]
print sys.argv[2]
slice_id = sys.argv[1]
vlan_controllers = json.loads(sys.argv[2])
vlan_configs = []
for i in range(len(vlan_controllers)):
if i == 2*(i/2):
vlan_tag = vlan_controllers[i]
controller_url = vlan_controllers[i+1]
vlan_config = \
VMOCVLANConfiguration(vlan_tag=vlan_tag, \
controller_url=controller_url)
vlan_configs.append(vlan_config)
slice_config = \
VMOCSliceConfiguration(slice_id=slice_id, vlan_configs=vlan_configs)
unregister = False
if len(sys.argv)>3:
unregister = bool(sys.argv[3])
print str(slice_config)
command = 'register'
if unregister: command = 'unregister'
command = command + " " + json.dumps(slice_config.__attr__())
print command
|
import sys
sys.path = ['..'] + sys.path
import zope
from twisted.internet import reactor
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.transport import TTwisted
from thrift.protocol import TBinaryProtocol
from lib.genpy.snowflake import Snowflake
from lib.genpy.snowflake.ttypes import *
import idworker
class SnowflakeServer(object):
zope.interface.implements(Snowflake.Iface)
def __init__(self, worker_id, datacenter_id):
self.worker = idworker.IdWorker(worker_id, datacenter_id)
def get_worker_id(self):
return self.worker.get_worker_id()
def get_datacenter_id(self):
return self.worker.get_datacenter_id()
def get_timestamp(self):
return self.worker.get_timestamp()
def get_id(self):
return self.worker.get_id()
def print_usage():
print 'python snowflakeserver.py <port> <worker_id | > <datacenter_id>'
print 'e.g. python snowflakeserver.py 1111 1 1'
def main():
if len(sys.argv) != 4:
return print_us | age()
port = int(sys.argv[1])
worker_id = int(sys.argv[2])
datacenter_id = int(sys.argv[3])
reactor.listenTCP(port, TTwisted.ThriftServerFactory(
processor=Snowflake.Processor(SnowflakeServer(worker_id, datacenter_id)),
iprot_factory=TBinaryProtocol.TBinaryProtocolFactory()
))
reactor.run()
if __name__ == '__main__':
sys.exit(main()) |
'''
Convert an image file to a data uri.
Copyright 2012 GoodCrypto
Last modifie | d: 2013-11-13
This file is open source, licensed under GPLv3 <http://www.gnu.org/licenses/>.
'''
import os.path
from django import template
import reinhardt.data_image
register = template.Library()
@register.filter
def data_img(filename, browser=None):
''' Encode an image file in base 64 as a data uri.
The filename is relative to settings.STATIC_URL/settings.STATIC_ROOT.
If the datauri is too large or anything goes wrong,
returns the url to the filename.
|
Example:
<img alt="embedded image" src="{{ 'images/myimage.png'|data_img:browser }}">
'''
return reinhardt.data_image.data_image(filename, browser=browser)
|
# Copyright (c) 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""ResourceFinder is a helper class for finding resources given their name."""
import codecs
import os
from py_vulcanize import module
from py_vulcanize import style_sheet as style_sheet_module
from py_vulcanize import resource as resource_module
from py_vulcanize import html_module
from py_vulcanize import strip_js_comments
class ResourceLoader(object):
"""Manges loading modules and their dependencies from files.
Modules handle parsing and the construction of their individual dependency
pointers. The loader deals with bookkeeping of what has been loaded, and
mapping names to file resources.
"""
def __init__(self, project):
self.project = project
self.stripped_js_by_filename = {}
self.loaded_modules = {}
self.loaded_raw_scripts = {}
self.loaded_style_sheets = {}
self.loaded_images = {}
@property
def source_paths(self):
"""A list of base directories to search for modules under."""
return self.project.source_paths
def FindResource(self, some_path, binary=False):
"""Finds a Resource for the given path.
Args:
some_path: A relative o | r absolute path to a file.
Returns:
A Resource or None.
"""
if os.path.isabs(some_path):
return self.FindResourceGivenAbsolutePath(some_path, binary)
else:
return self.FindResourceGivenRelativePath(some_path, binary)
def FindResourceGivenAbsolutePath(self, absolute_path, binary=False):
"""Returns a Resource for the given absolute path."""
candidate_paths = []
for source_path in self.source_paths | :
if absolute_path.startswith(source_path):
candidate_paths.append(source_path)
if len(candidate_paths) == 0:
return None
# Sort by length. Longest match wins.
candidate_paths.sort(lambda x, y: len(x) - len(y))
longest_candidate = candidate_paths[-1]
return resource_module.Resource(longest_candidate, absolute_path, binary)
def FindResourceGivenRelativePath(self, relative_path, binary=False):
"""Returns a Resource for the given relative path."""
absolute_path = None
for script_path in self.source_paths:
absolute_path = os.path.join(script_path, relative_path)
if os.path.exists(absolute_path):
return resource_module.Resource(script_path, absolute_path, binary)
return None
def _FindResourceGivenNameAndSuffix(
self, requested_name, extension, return_resource=False):
"""Searches for a file and reads its contents.
Args:
requested_name: The name of the resource that was requested.
extension: The extension for this requested resource.
Returns:
A (path, contents) pair.
"""
pathy_name = requested_name.replace('.', os.sep)
filename = pathy_name + extension
resource = self.FindResourceGivenRelativePath(filename)
if return_resource:
return resource
if not resource:
return None, None
return _read_file(resource.absolute_path)
def FindModuleResource(self, requested_module_name):
"""Finds a module javascript file and returns a Resource, or none."""
js_resource = self._FindResourceGivenNameAndSuffix(
requested_module_name, '.js', return_resource=True)
html_resource = self._FindResourceGivenNameAndSuffix(
requested_module_name, '.html', return_resource=True)
if js_resource and html_resource:
if html_module.IsHTMLResourceTheModuleGivenConflictingResourceNames(
js_resource, html_resource):
return html_resource
return js_resource
elif js_resource:
return js_resource
return html_resource
def LoadModule(self, module_name=None, module_filename=None,
excluded_scripts=None):
assert bool(module_name) ^ bool(module_filename), (
'Must provide either module_name or module_filename.')
if module_filename:
resource = self.FindResource(module_filename)
if not resource:
raise Exception('Could not find %s in %s' % (
module_filename, repr(self.source_paths)))
module_name = resource.name
else:
resource = None # Will be set if we end up needing to load.
if module_name in self.loaded_modules:
assert self.loaded_modules[module_name].contents
return self.loaded_modules[module_name]
if not resource: # happens when module_name was given
resource = self.FindModuleResource(module_name)
if not resource:
raise module.DepsException('No resource for module "%s"' % module_name)
m = html_module.HTMLModule(self, module_name, resource)
self.loaded_modules[module_name] = m
# Fake it, this is probably either polymer.min.js or platform.js which are
# actually .js files....
if resource.absolute_path.endswith('.js'):
return m
m.Parse(excluded_scripts)
m.Load(excluded_scripts)
return m
def LoadRawScript(self, relative_raw_script_path):
resource = None
for source_path in self.source_paths:
possible_absolute_path = os.path.join(
source_path, os.path.normpath(relative_raw_script_path))
if os.path.exists(possible_absolute_path):
resource = resource_module.Resource(
source_path, possible_absolute_path)
break
if not resource:
raise module.DepsException(
'Could not find a file for raw script %s in %s' %
(relative_raw_script_path, self.source_paths))
assert relative_raw_script_path == resource.unix_style_relative_path, (
'Expected %s == %s' % (relative_raw_script_path,
resource.unix_style_relative_path))
if resource.absolute_path in self.loaded_raw_scripts:
return self.loaded_raw_scripts[resource.absolute_path]
raw_script = module.RawScript(resource)
self.loaded_raw_scripts[resource.absolute_path] = raw_script
return raw_script
def LoadStyleSheet(self, name):
if name in self.loaded_style_sheets:
return self.loaded_style_sheets[name]
resource = self._FindResourceGivenNameAndSuffix(
name, '.css', return_resource=True)
if not resource:
raise module.DepsException(
'Could not find a file for stylesheet %s' % name)
style_sheet = style_sheet_module.StyleSheet(self, name, resource)
style_sheet.load()
self.loaded_style_sheets[name] = style_sheet
return style_sheet
def LoadImage(self, abs_path):
if abs_path in self.loaded_images:
return self.loaded_images[abs_path]
if not os.path.exists(abs_path):
raise module.DepsException("url('%s') did not exist" % abs_path)
res = self.FindResourceGivenAbsolutePath(abs_path, binary=True)
if res is None:
raise module.DepsException("url('%s') was not in search path" % abs_path)
image = style_sheet_module.Image(res)
self.loaded_images[abs_path] = image
return image
def GetStrippedJSForFilename(self, filename, early_out_if_no_py_vulcanize):
if filename in self.stripped_js_by_filename:
return self.stripped_js_by_filename[filename]
with open(filename, 'r') as f:
contents = f.read(4096)
if early_out_if_no_py_vulcanize and ('py_vulcanize' not in contents):
return None
s = strip_js_comments.StripJSComments(contents)
self.stripped_js_by_filename[filename] = s
return s
def _read_file(absolute_path):
"""Reads a file and returns a (path, contents) pair.
Args:
absolute_path: Absolute path to a file.
Raises:
Exception: The given file doesn't exist.
IOError: There was a problem opening or reading the file.
"""
if not os.path.exists(absolute_path):
raise Exception('%s not found.' % absolute_path)
f = codecs.open(absolute_path, mode='r', encoding='utf-8')
contents = f.read()
f.close()
return absolute_path, contents
|
num_segments=num_segments)
return voxel_features, voxel_xyz_indices, segment_ids, voxel_start_location
def _pad_or_clip_voxels(voxel_features, voxel_indices, num_valid_voxels,
segment_ids, voxels_pad_or_clip_size):
"""Pads or clips voxels."""
if voxels_pad_or_clip_size:
num_valid_voxels = tf.minimum(num_valid_voxels, voxels_pad_or_clip_size)
num_channels = voxel_features.get_shape().as_list()[-1]
if len(voxel_features.shape.as_list()) == 2:
output_shape = [voxels_pad_or_clip_size, num_channels]
elif len(voxel_features.shape.as_list()) == 3:
num_samples_per_voxel = voxel_features.get_shape().as_list()[1]
if num_samples_per_voxel is None:
num_samples_per_voxel = tf.shape(voxel_features)[1]
output_shape = [
voxels_pad_or_clip_size, num_samples_per_voxel, num_channels
]
else:
raise ValueError('voxel_features should be either rank 2 or 3.')
voxel_features = shape_utils.pad_or_clip_nd(
tensor=voxel_features, output_shape=output_shape)
voxel_indices = shape_utils.pad_or_clip_nd(
tensor=voxel_indices, output_shape=[voxels_pad_or_clip_size, 3])
valid_segment_ids_mask = tf.cast(
tf.less(segment_ids, num_valid_voxels), dtype=tf.int32)
segment_ids *= valid_segment_ids_mask
return voxel_features, voxel_indices, num_valid_voxels, segment_ids
def pointcloud_to_sparse_voxel_grid(points, features, num_valid_points,
grid_cell_size, voxels_pad_or_clip_size,
segment_func):
"""Converts a pointcloud into a voxel grid.
This function calls the `pointcloud_to_sparse_voxel_grid_unbatched`
function above in a while loop to map a batch of points to a batch of voxels.
Args:
points: A tf.float32 tensor of size [batch_size, N, 3].
features: A tf.float32 tensor of size [batch_size, N, F].
num_valid_points: A tf.int32 tensor of size [num_batches] containing the
number of valid points in each batch example.
grid_cell_size: A tf.float32 tensor of size [3].
voxels_pad_or_clip_size: Number of target voxels to pad or clip to. If None,
it will not perform the padding.
segment_func: A tensorflow function that operates on segments. Examples are
one of tf.math.unsorted_segment_{min/max/mean/prod/sum}.
Returns:
voxel_features: A tf.float32 tensor of size [batch_size, N', F]
or [batch_size, N', G, F] where G is the number of points sampled per
voxel.
voxel_indices: A tf.int32 tensor of size [batch_size, N', 3].
num_valid_voxels: A tf.int32 tensor of size [batch_size].
segment_ids: A size [batch_size, N] tf.int32 tensor of IDs for each point
indicating which (flattened) voxel cell its data was mapped to.
voxel_start_location: A size [batch_size, 3] tf.float32 tensor of voxel
start locations.
Raises:
ValueError: If pooling method is unknown.
"""
batch_size = points.get_shape().as_list()[0]
if batch_size is None:
batch_size = tf.shape(points)[0]
num_points = tf.shape(points)[1]
def fn(i):
"""Map function."""
num_valid_points_i = num_valid_points[i]
points_i = points[i, :num_valid_points_i, :]
features_i = features[i, :num_valid_points_i, :]
voxel_features_i, voxel_indices_i, segment_ids_i, voxel_start_location_i = (
pointcloud_to_ | sparse_voxel_grid_unbatched(
points=points_i,
features=features_i,
grid_cell_size=grid_cell_size,
segment_func=segment_func))
num_valid_voxels_i = tf.shape(voxel_features_i)[0]
(voxel_features_i, voxel_indices_i, num_valid_voxels_i,
segment_ids_i) = _pad_or_clip_voxels(
voxel_features=voxel_features_i,
voxel_indices=voxel_indices_i,
num_ | valid_voxels=num_valid_voxels_i,
segment_ids=segment_ids_i,
voxels_pad_or_clip_size=voxels_pad_or_clip_size)
segment_ids_i = tf.pad(
segment_ids_i, paddings=[[0, num_points - num_valid_points_i]])
return (voxel_features_i, voxel_indices_i, num_valid_voxels_i,
segment_ids_i, voxel_start_location_i)
return tf.map_fn(
fn=fn,
elems=tf.range(batch_size),
dtype=(tf.float32, tf.int32, tf.int32, tf.int32, tf.float32))
def sparse_voxel_grid_to_pointcloud(voxel_features, segment_ids,
num_valid_voxels, num_valid_points):
"""Convert voxel features back to points given their segment ids.
Args:
voxel_features: A tf.float32 tensor of size [batch_size, N', F].
segment_ids: A size [batch_size, N] tf.int32 tensor of IDs for each point
indicating which (flattened) voxel cell its data was mapped to.
num_valid_voxels: A tf.int32 tensor of size [batch_size] containing the
number of valid voxels in each batch example.
num_valid_points: A tf.int32 tensor of size [batch_size] containing the
number of valid points in each batch example.
Returns:
point_features: A tf.float32 tensor of size [batch_size, N, F].
Raises:
ValueError: If batch_size is unknown at graph construction time.
"""
batch_size = voxel_features.shape[0]
if batch_size is None:
raise ValueError('batch_size is unknown at graph construction time.')
num_points = tf.shape(segment_ids)[1]
def fn(i):
num_valid_voxels_i = num_valid_voxels[i]
num_valid_points_i = num_valid_points[i]
voxel_features_i = voxel_features[i, :num_valid_voxels_i, :]
segment_ids_i = segment_ids[i, :num_valid_points_i]
point_features = tf.gather(voxel_features_i, segment_ids_i)
point_features_rank = len(point_features.get_shape().as_list())
point_features_paddings = [[0, num_points - num_valid_points_i]]
for _ in range(point_features_rank - 1):
point_features_paddings.append([0, 0])
point_features = tf.pad(point_features, paddings=point_features_paddings)
return point_features
return tf.map_fn(fn=fn, elems=tf.range(batch_size), dtype=tf.float32)
@gin.configurable
def per_voxel_point_sample_segment_func(data, segment_ids, num_segments,
num_samples_per_voxel):
"""Samples features from the points within each voxel.
Args:
data: A tf.float32 tensor of size [N, F].
segment_ids: A tf.int32 tensor of size [N].
num_segments: Number of segments.
num_samples_per_voxel: Number of features to sample per voxel. If the voxel
has less number of points in it, the point features will be padded by 0.
Returns:
A tf.float32 tensor of size [num_segments, num_samples_per_voxel, F].
A tf.int32 indices of size [N, num_samples_per_voxel].
"""
num_channels = data.get_shape().as_list()[1]
if num_channels is None:
raise ValueError('num_channels is None.')
n = tf.shape(segment_ids)[0]
def _body_fn(i, indices_range, indices):
"""Computes the indices of the i-th point feature in each segment."""
indices_i = tf.math.unsorted_segment_max(
data=indices_range, segment_ids=segment_ids, num_segments=num_segments)
indices_i_positive_mask = tf.greater(indices_i, 0)
indices_i_positive = tf.boolean_mask(indices_i, indices_i_positive_mask)
boolean_mask = tf.scatter_nd(
indices=tf.cast(
tf.expand_dims(indices_i_positive - 1, axis=1), dtype=tf.int64),
updates=tf.ones_like(indices_i_positive, dtype=tf.int32),
shape=(n,))
indices_range *= (1 - boolean_mask)
indices_i *= tf.cast(indices_i_positive_mask, dtype=tf.int32)
indices_i = tf.pad(
tf.expand_dims(indices_i, axis=1),
paddings=[[0, 0], [i, num_samples_per_voxel - i - 1]])
indices += indices_i
i = i + 1
return i, indices_range, indices
cond = lambda i, indices_range, indices: i < num_samples_per_voxel
(_, _, indices) = tf.while_loop(
cond=cond,
body=_body_fn,
loop_vars=(tf.constant(0, dtype=tf.int32), tf.range(n) + 1,
tf.zeros([num_segments, num_samples_per_voxel],
dtype=tf.int32)))
data = tf.pad(data, paddings=[[1, 0], [0, 0]])
voxel_features = tf.gather(data, |
#!/usr/bin/env python
import argparse
import os
import logging
import cdec.configobj
import cdec.sa
from cdec.sa._sa import monitor_cpu
import sys
MAX_PHRASE_LENGTH = 4
def precompute(f_sa, max_len, max_nt, max_size, min_gap, rank1, rank2, tight_phrases):
lcp = cdec.sa.LCP(f_sa)
stats = sorted(lcp.compute_stats(MAX_PHRASE_LENGTH), reverse=True)
precomp = cdec.sa.Precomputation(from_stats=stats,
fsarray=f_sa,
precompute_rank=rank1,
precompute_secondary_rank=rank2,
max_length=max_len,
max_nonterminals=max_nt,
train_max_initial_size=max_size,
train_min_gap_size=min_gap)
return precomp
def main():
preprocess_start_time = monitor_cpu()
sys.setrecursionlimit(sys.getrecursionlimit() * 100)
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger('cdec.sa.compile')
parser = argparse.ArgumentParser(description='Compile a corpus into a suffix array.')
parser.add_argument('--maxnt', '-n', type=int, default=2,
help='Maximum number of non-terminal symbols')
parser.add_argument('--maxlen', '-l', type=int, default=5,
help='Maximum number of terminals')
parser.add_argument('--maxsize', '-s', type=int, default=15,
help='Maximum rule span')
parser.add_argument('--mingap', '-g', type=int, default=1,
help='Minimum gap size')
parser.add_argument('--rank1', '-r1', type=int, default=100,
help='Number of pre-computed frequent patterns')
parser.add_argument('--rank2', '-r2', type=int, default=10,
help='Number of pre-computed super-frequent patterns)')
parser.add_argument('--loose', action='store_true',
help='Enable loose phrase extraction (default: tight)')
parser.add_argument('-c', '--config', default='/dev/stdout',
help='Output configuration')
parser.add_argument('-f', '--source',
help='Source language corpus')
parser.add_argument('-e', '--target',
help='Target language corpus')
parser.add_argument('-b', '--bitext',
help='Parallel text (source ||| target)')
parser.add_argument('-a', '--alignment', required=True,
help='Bitext word alignment')
parser.add_argument('-o', '--output', required=True,
help='Output path')
args = parser.parse_args()
if not ((args.source and args.target) or args.bitext):
parser.error('a parallel corpus is required\n'
'\tuse -f (source) with -e (target) or -b (bitext)')
param_names = ('max_len', 'max_nt', 'max_size', 'min_gap',
'rank1', 'rank2', 'tight_phrases')
params = (args.maxlen, args.maxnt, args.maxsize, args.mingap,
args.rank1, args.rank2, not args.loose)
if not os.path.exists(args.output):
os.mkdir(args.output)
f_sa_bin = os.path.join(args.output, 'f.sa.bin')
e_bin = os.path.join(args.output, 'e.bin')
precomp_file = 'precomp.{0}.{1}.{2}.{3}.{4}.{5}.bin'.format(*params)
precomp_bin = os.path.join(args.output, precomp_file)
a_bin = os.path.join(args.output, 'a.bin')
lex_bin = os.path.join(args.output, 'lex.bin')
start_time = monitor_cpu()
logger.info('Compiling source suffix array')
if args.bitext:
f_sa = cdec.sa.SuffixArray(from_text=args.bitext, side='source')
else:
f_sa = cdec.sa.SuffixArray(from_text=args.source)
f_sa.write_binary(f_sa_bin)
stop_time = monitor_cpu()
logger.info('Compiling source suffix array took %f seconds', stop_time - start_time)
start_time = monitor_cpu()
logger.info('Compiling target data array')
if args.bitext:
e = cdec.sa.DataArray(from_text=args.bitext, side='target')
else:
e = cdec.sa.DataArray(from_text=args.target)
e.write_binary(e_bin)
stop_time = monitor_cpu()
logger.info('Compiling target data array took %f seconds', stop_time - start_time)
start_time = monitor_cpu()
logger.info('Precomputing frequent phrases')
precompute(f_sa, *params).write_binary(precomp_bin)
stop_time = monitor_cpu()
logger.info('Compiling precomputations took %f seconds', stop_time - start_time)
start_time = monitor_cpu()
logger.info('Compiling alignment')
a = cdec.sa.Alignment(from_text=args.alignment)
a.write_binary(a_bin)
stop_time = monitor_cpu()
logger.info('Compiling alignment took %f seonds', stop_time - start_time)
start_time = monitor_cpu()
logger.info('Compiling bilexical dictionary')
lex = cdec.sa.BiLex(from_data=True, alignment=a, earray=e, fsarray=f_sa)
lex.write_binary(lex_bin)
stop_time = monitor_cpu | ()
logger.info('Compiling bilexical dictionary took %f seconds', stop_time - start_time)
# Write configuration
config = cdec.configobj.ConfigObj(args.c | onfig, unrepr=True)
config['f_sa_file'] = os.path.abspath(f_sa_bin)
config['e_file'] = os.path.abspath(e_bin)
config['a_file'] = os.path.abspath(a_bin)
config['lex_file'] = os.path.abspath(lex_bin)
config['precompute_file'] = os.path.abspath(precomp_bin)
for name, value in zip(param_names, params):
config[name] = value
config.write()
preprocess_stop_time = monitor_cpu()
logger.info('Overall preprocessing step took %f seconds', preprocess_stop_time - preprocess_start_time)
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2014 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc. | ,
# | 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Define base Blueprint."""
from flask import Blueprint
blueprint = Blueprint('base', __name__, template_folder='templates',
static_folder='static')
|
inpu | t = """
x | -x.
y | - | y.
"""
output = """
x | -x.
y | -y.
"""
|
"""
Copyright 2014 Jason Heeris, jason.heeris@gmail.com
This file is part of the dungeon excavator web interface ("webcavate").
Webcavate is free software: you can redistribute it and/or modify it under the
terms of the GNU General Public License as published by the Free Software
Foundation, either version 3 of the License, or (at your option) any later
version.
Webcavate is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
webcavate. If not, see <http://www.gnu.org/licenses/>.
"""
import argparse
import uuid
from flask import Flask, render_template, request, make_response, redirect, url_for, flash
from dungeon.excavate import render_room
HELP_TEXT = """\
Web interface to the dungeon excavator."""
app = Flask('dungeon.web')
app.secret_key = str(uuid.uuid4())
@app.route("/")
def root():
""" Web interface landing page. """
return render_template('index.html')
@app.rout | e("/error")
def error():
"" | " Display errors. """
return render_template('error.html')
def make_map(request, format):
tile_size = int(request.form['size'])
wall_file = request.files['walls']
floor_file = request.files['floor']
floorplan_file = request.files['floorplan']
try:
room_data, content_type = render_room(
floor_file.read(),
wall_file.read(),
floorplan_file.read(),
tile_size,
format
)
except ValueError as ve:
flash(str(ve))
return redirect(url_for('error'))
# Create response
response = make_response(room_data)
response.headers['Content-Type'] = content_type
return response
@app.route("/map.svg", methods=['POST'])
def map_svg():
return make_map(request, format='svg')
@app.route("/map.png", methods=['POST'])
def map_png():
return make_map(request, format='png')
@app.route("/map.jpg", methods=['POST'])
def map_jpg():
return make_map(request, format='jpg')
@app.route("/map", methods=['POST'])
def process():
""" Process submitted form data. """
format = request.form['format']
try:
node = {
'png': 'map_png',
'svg': 'map_svg',
'jpg': 'map_jpg',
}[format]
except KeyError:
flash("The output format you selected is not supported.")
return redirect(url_for('error'))
else:
return redirect(url_for(node, _method='POST'), code=307)
def main():
""" Parse arguments and get things going for the web interface """
parser = argparse.ArgumentParser(description=HELP_TEXT)
parser.add_argument(
'-p', '--port',
help="Port to serve the interface on.",
type=int,
default=5050
)
parser.add_argument(
'-a', '--host',
help="Host to server the interface on.",
)
args = parser.parse_args()
app.run(port=args.port, host=args.host, debug=False)
|
import click
from parsec.cli import pass_context, json_loads
from parsec.decorators import custom_exception, json_output
@click.command('create_library')
@click.argument("name", type=str)
@click.option(
"--description",
help="Optional data library description",
type=str
)
@click.option(
"--synopsis",
help="Optional data library synopsis",
type=str
)
@pass_context
@custom_exception
@json_output
def cli(ctx, name, description="", | synopsis=""):
"""Create a data library with the properties defined in the arguments.
Output:
Details of the created library.
For example::
{'id': 'f740ab636b360a70',
'name': 'Library from bioblend',
' | url': '/api/libraries/f740ab636b360a70'}
"""
return ctx.gi.libraries.create_library(name, description=description, synopsis=synopsis)
|
import galaxy.model
from logging import getLogger
log = getLogger( __name__ )
ROLES_UNSET = object()
INVALID_STATES = [ galaxy.model.Dataset.states.ERROR, galaxy.model.Dataset.states.DISCARDED ]
class DatasetMatcher( object ):
""" Utility class to aid DataToolParameter and similar classes in reasoning
about what HDAs could match or are selected for a parameter and value.
Goal here is to both encapsulate and reuse logic related to filtering,
datatype matching, hiding errored dataset, finding implicit conversions,
and permission handling.
"""
def __init__( self, trans, param, value, other_values ):
self.trans = trans
self.param = param
self.tool = param.tool
self.value = value
self.current_user_roles = ROLES_UNSET
filter_value = None
if param.options:
try:
filter_value = param.options.get_options( trans, other_values )[0][0]
except IndexError:
pass # no valid options
self.filter_value = filter_value
def hda_accessible( self, hda, check_security=True ):
""" Does HDA correspond to dataset that is an a valid state and is
accessible to user.
"""
dataset = hda.dataset
state_valid = not dataset.state in INVALID_STATES
return state_valid and ( not check_security or self.__can_access_dataset( dataset ) )
def valid_hda_match( self, hda, check_implicit_conversions= | True, check_security=False ):
""" Return False of this parameter can not be matche | d to a the supplied
HDA, otherwise return a description of the match (either a
HdaDirectMatch describing a direct match or a HdaImplicitMatch
describing an implicit conversion.)
"""
if self.filter( hda ):
return False
formats = self.param.formats
if hda.datatype.matches_any( formats ):
return HdaDirectMatch( hda )
if not check_implicit_conversions:
return False
target_ext, converted_dataset = hda.find_conversion_destination( formats )
if target_ext:
if converted_dataset:
hda = converted_dataset
if check_security and not self.__can_access_dataset( hda.dataset ):
return False
return HdaImplicitMatch( hda, target_ext )
return False
def hda_match( self, hda, check_implicit_conversions=True, ensure_visible=True ):
""" If HDA is accessible, return information about whether it could
match this parameter and if so how. See valid_hda_match for more
information.
"""
accessible = self.hda_accessible( hda )
if accessible and ( not ensure_visible or hda.visible or ( self.selected( hda ) and not hda.implicitly_converted_parent_datasets ) ):
# If we are sending data to an external application, then we need to make sure there are no roles
# associated with the dataset that restrict its access from "public".
require_public = self.tool and self.tool.tool_type == 'data_destination'
if require_public and not self.trans.app.security_agent.dataset_is_public( hda.dataset ):
return False
if self.filter( hda ):
return False
return self.valid_hda_match( hda, check_implicit_conversions=check_implicit_conversions )
def selected( self, hda ):
""" Given value for DataToolParameter, is this HDA "selected".
"""
value = self.value
if value and str( value[ 0 ] ).isdigit():
return hda.id in map(int, value)
else:
return value and hda in value
def filter( self, hda ):
""" Filter out this value based on other values for job (if
applicable).
"""
param = self.param
return param.options and param._options_filter_attribute( hda ) != self.filter_value
def __can_access_dataset( self, dataset ):
# Lazily cache current_user_roles.
if self.current_user_roles is ROLES_UNSET:
self.current_user_roles = self.trans.get_current_user_roles()
return self.trans.app.security_agent.can_access_dataset( self.current_user_roles, dataset )
class HdaDirectMatch( object ):
""" Supplied HDA was a valid option directly (did not need to find implicit
conversion).
"""
def __init__( self, hda ):
self.hda = hda
@property
def implicit_conversion( self ):
return False
class HdaImplicitMatch( object ):
""" Supplied HDA was a valid option directly (did not need to find implicit
conversion).
"""
def __init__( self, hda, target_ext ):
self.hda = hda
self.target_ext = target_ext
@property
def implicit_conversion( self ):
return True
class DatasetCollectionMatcher( object ):
def __init__( self, dataset_matcher ):
self.dataset_matcher = dataset_matcher
def __valid_element( self, element ):
# Simplify things for now and assume these are hdas and not implicit
# converts. One could imagine handling both of those cases down the
# road.
if element.ldda:
return False
child_collection = element.child_collection
if child_collection:
return self.dataset_collection_match( child_collection )
hda = element.hda
if not hda:
return False
hda_match = self.dataset_matcher.hda_match( hda, ensure_visible=False )
return hda_match and not hda_match.implicit_conversion
def hdca_match( self, history_dataset_collection_association, reduction=False ):
dataset_collection = history_dataset_collection_association.collection
if reduction and dataset_collection.collection_type.find( ":" ) > 0:
return False
else:
return self.dataset_collection_match( dataset_collection )
def dataset_collection_match( self, dataset_collection ):
valid = True
for element in dataset_collection.elements:
if not self.__valid_element( element ):
valid = False
break
return valid
__all__ = [ DatasetMatcher, DatasetCollectionMatcher ]
|
# Copyright (c) 2012 - 2015 Lars Hupfeldt Nielsen, Hupfeldt IT
# All rights reserved. This work is under a BSD license, see LICENSE.TXT.
from jenkinsflow.flow import serial
from .framework import api_select
prefixed_jobs = """
serial flow: [
job: 'top_quick1'
serial flow: [
job: 'top_x_quick2-1'
]
serial flow: [
job: 'top_x_quick2-2'
]
serial flow: [
job: 'top_x_quick2-3'
]
job: 'top_quick3'
parallel flow: (
serial flow: [
job: 'top_y_z_quick4a'
]
serial flow: [
job: 'quick4b'
]
job: 'top_y_quick5'
)
]
"""
def test_prefix(ap | i_type, capsys):
with api_select.api(__file__, api_type) as api:
def job(name):
api.job(name, exec_time=0.5, max_fails=0, expect_invocations=0, expect_order=None, params=None)
api.flow_job()
job('quick1')
index = 0
for index in 1, 2, 3:
job('x_quick2-' + str(index))
job('quick3')
job('y | _z_quick4')
job('y_quick5')
with serial(api, timeout=70, report_interval=3, job_name_prefix='top_', just_dump=True) as ctrl1:
ctrl1.invoke('quick1')
for index in 1, 2, 3:
with ctrl1.serial(timeout=20, report_interval=3, job_name_prefix='x_') as ctrl2:
ctrl2.invoke('quick2-' + str(index))
ctrl1.invoke('quick3')
with ctrl1.parallel(timeout=40, report_interval=3, job_name_prefix='y_') as ctrl2:
with ctrl2.serial(timeout=40, report_interval=3, job_name_prefix='z_') as ctrl3a:
ctrl3a.invoke('quick4a')
# Reset prefix
with ctrl2.serial(timeout=40, report_interval=3, job_name_prefix=None) as ctrl3b:
ctrl3b.invoke('quick4b')
ctrl2.invoke('quick5')
sout, _ = capsys.readouterr()
assert prefixed_jobs.strip() in sout
|
mport STUDIO_BASE_URL
from common.test.acceptance.fixtures.base import FixtureError, XBlockContainerFixture
class XBlockFixtureDesc(object):
"""
Description of an XBlock, used to configure a course fixture.
"""
def __init__(self, category, display_name, data=None,
metadata=None, grader_type=None, publish='make_public', **kwargs):
"""
Configure the XBlock to be created by the fixture.
These arguments have the same meaning as in the Studio REST API:
* `category`
* `display_name`
* `data`
* `metadata`
* `grader_type`
* `publish`
"""
self.category = category
self.display_name = display_name
self.data = data
self.metadata = metadata
self.grader_type = grader_type
self.publish = publish
self.children = []
self.locator = None
self.fields = kwargs
def add_children(self, *args):
"""
Add child XBlocks to this XBlock.
Each item in `args` is an `XBlockFixtureDesc` obje | ct.
| Returns the `xblock_desc` instance to allow chaining.
"""
self.children.extend(args)
return self
def serialize(self):
"""
Return a JSON representation of the XBlock, suitable
for sending as POST data to /xblock
XBlocks are always set to public visibility.
"""
returned_data = {
'display_name': self.display_name,
'data': self.data,
'metadata': self.metadata,
'graderType': self.grader_type,
'publish': self.publish,
'fields': self.fields,
}
return json.dumps(returned_data)
def __str__(self):
"""
Return a string representation of the description.
Useful for error messages.
"""
return dedent(u"""
<XBlockFixtureDescriptor:
category={0},
data={1},
metadata={2},
grader_type={3},
publish={4},
children={5},
locator={6},
>
""").strip().format(
self.category, self.data, self.metadata,
self.grader_type, self.publish, self.children, self.locator
)
# Description of course updates to add to the course
# `date` is a str (e.g. "January 29, 2014)
# `content` is also a str (e.g. "Test course")
CourseUpdateDesc = namedtuple("CourseUpdateDesc", ['date', 'content'])
class CourseFixture(XBlockContainerFixture):
"""
Fixture for ensuring that a course exists.
WARNING: This fixture is NOT idempotent. To avoid conflicts
between tests, you should use unique course identifiers for each fixture.
"""
def __init__(self, org, number, run, display_name, start_date=None, end_date=None, settings=None):
"""
Configure the course fixture to create a course with
`org`, `number`, `run`, and `display_name` (all unicode).
`start_date` and `end_date` are datetime objects indicating the course start and end date.
The default is for the course to have started in the distant past, which is generally what
we want for testing so students can enroll.
`settings` can be any additional course settings needs to be enabled. for example
to enable entrance exam settings would be a dict like this {"entrance_exam_enabled": "true"}
These have the same meaning as in the Studio restful API /course end-point.
"""
super(CourseFixture, self).__init__()
self._course_dict = {
'org': org,
'number': number,
'run': run,
'display_name': display_name
}
# Set a default start date to the past, but use Studio's
# default for the end date (meaning we don't set it here)
if start_date is None:
start_date = datetime.datetime(1970, 1, 1)
self._course_details = {
'start_date': start_date.isoformat(),
}
if end_date is not None:
self._course_details['end_date'] = end_date.isoformat()
if settings is not None:
self._course_details.update(settings)
self._updates = []
self._handouts = []
self._assets = []
self._textbooks = []
self._advanced_settings = {}
self._course_key = None
def __str__(self):
"""
String representation of the course fixture, useful for debugging.
"""
return u"<CourseFixture: org='{org}', number='{number}', run='{run}'>".format(**self._course_dict)
def add_course_details(self, course_details):
"""
Add course details to dict of course details to be updated when configure_course or install is called.
Arguments:
Dictionary containing key value pairs for course updates,
e.g. {'start_date': datetime.now() }
"""
if 'start_date' in course_details:
course_details['start_date'] = course_details['start_date'].isoformat()
if 'end_date' in course_details:
course_details['end_date'] = course_details['end_date'].isoformat()
self._course_details.update(course_details)
def add_update(self, update):
"""
Add an update to the course. `update` should be a `CourseUpdateDesc`.
"""
self._updates.append(update)
def add_handout(self, asset_name):
"""
Add the handout named `asset_name` to the course info page.
Note that this does not actually *create* the static asset; it only links to it.
"""
self._handouts.append(asset_name)
def add_asset(self, asset_name):
"""
Add the asset to the list of assets to be uploaded when the install method is called.
"""
self._assets.extend(asset_name)
def add_textbook(self, book_title, chapters):
"""
Add textbook to the list of textbooks to be added when the install method is called.
"""
self._textbooks.append({"chapters": chapters, "tab_title": book_title})
def add_advanced_settings(self, settings):
"""
Adds advanced settings to be set on the course when the install method is called.
"""
self._advanced_settings.update(settings)
def install(self):
"""
Create the course and XBlocks within the course.
This is NOT an idempotent method; if the course already exists, this will
raise a `FixtureError`. You should use unique course identifiers to avoid
conflicts between tests.
"""
self._create_course()
self._install_course_updates()
self._install_course_handouts()
self._install_course_textbooks()
self._configure_course()
self._upload_assets()
self._add_advanced_settings()
self._create_xblock_children(self._course_location, self.children)
return self
def configure_course(self):
"""
Configure Course Settings, take new course settings from self._course_details dict object
"""
self._configure_course()
@property
def studio_course_outline_as_json(self):
"""
Retrieves Studio course outline in JSON format.
"""
url = STUDIO_BASE_URL + '/course/' + self._course_key + "?format=json"
response = self.session.get(url, headers=self.headers)
if not response.ok:
raise FixtureError(
u"Could not retrieve course outline json. Status was {0}".format(
response.status_code))
try:
course_outline_json = response.json()
except ValueError:
raise FixtureError(
u"Could not decode course outline as JSON: '{0}'".format(response)
)
return course_outline_json
@property
def _course_location(self):
"""
Return the locator string for the course.
"""
course_key = CourseKey.from_string(self._course_k |
# This is the version of this source code.
manual_verstr = | "1.5"
auto_build_num = "212"
verstr = manual_verstr + "." + auto_build_num
try:
from pyutil.version_class import Version as pyutil_Version
__version__ = pyutil_Version(verstr)
except (ImportError, ValueError):
# Maybe there is no pyutil installed.
from distutils.version import LooseVersion as distutils_Version
__version__ = distutils_Versi | on(verstr)
|
import mcpi.minecraft as minecraft
import mcpi.block as block
import random
import time
mc = minecraft.Minecraft.create()
#mc.po | stToChat("Heat Vision!")
pos = mc.player.getTilePos()
#mc.postToChat(pos)
#rot = mc.player.getRotation()
#pitch = mc.player.getPitch()
#direct = mc.player.getDirection()
#mc.postToChat(rot)
#mc.postToChat(pitch)
#mc.postToChat(direct)
# those dont work on Pi
# activate any tnt around
mc.postToChat("Oliver's boom!")
while True:
x,y,z = mc.player.getPos()
for xi in range(-4, 4):
for zi in | range (-4, 4):
for yi in range (-1, 3):
thisblock = mc.getBlock(x + xi, y + yi, z + zi)
#print thisblock
if thisblock == 46:
mc.setBlock(x + xi, y + yi, z+zi, 46, 1)
print "setting on"
#mc.setBlock(x + xi, y + 1, z+zi, 46, 1)
time.sleep(1)
|
'''
Created on Oct 19, 2016
@author: jaime
'''
from django.conf.urls import url
from django.views.decorators. | csrf import csrf_exempt
from products import views
urlpatterns = [
url(r'^categories/$', csrf_exempt(views.ProductCategoryView.as_view())),
url(r'^categories/(?P<uid>\w+)/$', csrf_exempt(views.ProductCategoryView.as_view())),
url(r'^$', csrf_exempt(views.ProductView.as_view())),
url(r'^(?P<uid>\w+)/$', csrf_exempt(views.P | roductView.as_view())),
] |
# -*- encoding: utf-8 -*-
import argparse
import os
import sys
import yaml
from . import VERSION
import actions
import core
import exception
from utils import log
def error(errtype, msg, code=42):
sys.stderr.write("{t.red}[ERROR] {t.yellow}{er}: {msg}"
"{t.normal}\n".format(er=errtype, msg=msg, t=log.term))
sys.exit(code)
def get_parser():
parser = argparse.ArgumentParser(prog='rdoupdate')
subparsers = parser.add_subparsers(help='available actions')
parser.add_argument('--version', action='version', version=VERSION)
# check
check_parser = subparsers.add_parser(
'check', help="validate update file(s)",
description="validate one or more update files; use -g to select "
"an update file added by last commit to a git repo or "
"use -f to select update files directly (default: -g .)")
check_parser.add_argument(
'-g', '--git', type=str, metavar='DIR',
help="check latest update file added to git repo in DIR directory")
check_parser.add_argument(
'-f', '--files', type=str, metavar='FILE', nargs='+',
help="check all specified FILEs; use - for stdin")
check_parser.add_argument(
'-a', '--available', action='store_true',
help="also check if builds are available for download")
check_parser.set_defaults(action=do_check)
# download
dl_parser = subparsers.add_parser(
'download', help="download builds from update file(s)",
description=("download builds from one or more update files into a "
"directory tree; use -g to select an update file added "
"by last commit to a git repo or use -f to select update "
"files directly; default: -g ."))
dl_parser.add_argument(
'-g', '--git', type=str, metavar='DIR',
help="download builds from latest update file added to git repo in "
"DIR directory")
dl_parser.add_argument(
'-f', '--files', type=str, metavar='FILE', nargs='+',
help="check all specified FILEs; use - for stdin")
dl_parser.add_argument(
'-o', '--outdir', type=str, metavar='DIR',
help="directory to download builds into (default: .)")
dl_parser.add_argument(
'-u', '--per-update', action='store_true',
help="create extra directory for each update")
dl_parser.add_argument(
'-b', '--build-filter | ', metavar='ATTR:REGEX', action='append',
help="Only download builds with ATTRibute matching python REGEX; can "
"be specified multiple times")
dl_parser.set_defaults(action=do_download)
# move
move_parser = subparsers.add_parser(
'move', help="move an update file (create a commit)",
description="create a commit that moves selected files to a directory")
move_parser | .add_argument(
'files', metavar='FILE', type=str, nargs='+',
help='update file(s) to move')
move_parser.add_argument(
'-d', '--dir', type=str, metavar='DIR',
help="move update file(s) to this directory instead of using "
"update.group")
move_parser.set_defaults(action=do_move)
list_parser = subparsers.add_parser(
'list-bsources', help="show available build sources",
description="show available build sources")
list_parser.set_defaults(action=do_list_bsources)
return parser
def _get_update_files(args):
if args.files and args.git:
error("invalid invocation", "-g and -f are exclusive.", 19)
if args.files:
files = args.files
else:
if not args.git:
args.git = '.'
f = actions.get_last_commit_update(args.git)
files = [os.path.join(args.git, f)]
return files
def do_check(args):
files = _get_update_files(args)
good, fails = actions.check_files(*files, available=args.available,
verbose=True)
actions.print_summary(good, fails, 'PASSED', 'FAILED')
if fails:
return 127
def _parse_build_filter(fargs):
bf = []
if not fargs:
return bf
for f in fargs:
try:
attr, rex = f.split(':', 1)
except Exception as ex:
raise exception.InvalidFilter(what=f)
bf.append((attr, rex))
return bf
def do_download(args):
files = _get_update_files(args)
build_filter = _parse_build_filter(args.build_filter)
good, fails = actions.download_updates_builds(
*files, out_dir=args.outdir, per_update=args.per_update,
build_filter=build_filter)
actions.print_summary(good, fails, 'DOWNLOADED', 'FAILED to download')
if fails:
return 128
def do_move(args):
actions.move_files(args.files, args.dir)
def do_list_bsources(args):
actions.list_build_sources()
def run(*cargs):
parser = get_parser()
args = parser.parse_args(cargs)
action = args.action
return action(args)
def main():
cargs = sys.argv[1:]
try:
return run(*cargs)
except IOError as e:
error("file error", "%s: %s" % (e.strerror, e.filename), 2)
except exception.ChdirError as e:
error("file error", e, 3)
except exception.CommandFailed as e:
error("command failed", e.kwargs['cmd'], 5)
except (yaml.parser.ParserError, yaml.scanner.ScannerError) as e:
error("invalid YAML", e, 7)
except exception.InvalidUpdateStructure as e:
error("invalid structure", e, 11)
except exception.InvalidUpdateCommit as e:
error("invalid commit", e, 13)
except exception.ParsingError as e:
error("parsing error", e, 17)
except Exception as e:
err = type(e).__name__
ex = str(e)
if ex:
err += ": %s" % ex
error("unexpected error", err, 42)
if __name__ == '__main__':
main()
|
self.pMap[pseudo.unicode] = ttFont.getGlyphName(pseudo.nPseudo)
data = data[8 + 6 * numPseudo:]
currpos = (sstruct.calcsize(Silf_part1_format)
+ sstruct.calcsize(Silf_justify_format) * self.numJLevels
+ sstruct.calcsize(Silf_part2_format) + 2 * self.numCritFeatures
+ 1 + 1 + 4 * numScriptTag + 6 + 4 * sel | f.numPasses + 8 + 6 * numPseudo)
if version >= 3.0:
currpos += sstruct.calcsize(Silf_part1_format_v3)
self.classes = Classes()
self.classes.decompile(data, ttFont, version)
for i in range(self.numPasses):
| p = Pass()
self.passes.append(p)
p.decompile(data[self.oPasses[i]-currpos:self.oPasses[i+1]-currpos],
ttFont, version)
def compile(self, ttFont, version=2.0):
self.numPasses = len(self.passes)
self.numJLevels = len(self.jLevels)
self.numCritFeatures = len(self.critFeatures)
numPseudo = len(self.pMap)
data = b""
if version >= 3.0:
hdroffset = sstruct.calcsize(Silf_part1_format_v3)
else:
hdroffset = 0
data += sstruct.pack(Silf_part1_format, self)
for j in self.jLevels:
data += sstruct.pack(Silf_justify_format, j)
data += sstruct.pack(Silf_part2_format, self)
if self.numCritFeatures:
data += struct.pack((">%dH" % self.numCritFeaturs), *self.critFeatures)
data += struct.pack("BB", 0, len(self.scriptTags))
if len(self.scriptTags):
tdata = [struct.pack("4s", x.encode("ascii")) for x in self.scriptTags]
data += b"".join(tdata)
data += struct.pack(">H", self.lbGID)
self.passOffset = len(data)
data1 = grUtils.bininfo(numPseudo, 6)
currpos = hdroffset + len(data) + 4 * (self.numPasses + 1)
self.pseudosOffset = currpos + len(data1)
for u, p in sorted(self.pMap.items()):
data1 += struct.pack((">LH" if version >= 3.0 else ">HH"),
u, ttFont.getGlyphID(p))
data1 += self.classes.compile(ttFont, version)
currpos += len(data1)
data2 = b""
datao = b""
for i, p in enumerate(self.passes):
base = currpos + len(data2)
datao += struct.pack(">L", base)
data2 += p.compile(ttFont, base, version)
datao += struct.pack(">L", currpos + len(data2))
if version >= 3.0:
data3 = sstruct.pack(Silf_part1_format_v3, self)
else:
data3 = b""
return data3 + data + datao + data1 + data2
def toXML(self, writer, ttFont, version=2.0):
if version >= 3.0:
writer.simpletag('version', ruleVersion=self.ruleVersion)
writer.newline()
writesimple('info', self, writer, *attrs_info)
writesimple('passindexes', self, writer, *attrs_passindexes)
writesimple('contexts', self, writer, *attrs_contexts)
writesimple('attributes', self, writer, *attrs_attributes)
if len(self.jLevels):
writer.begintag('justifications')
writer.newline()
jformat, jnames, jfixes = sstruct.getformat(Silf_justify_format)
for i, j in enumerate(self.jLevels):
attrs = dict([(k, getattr(j, k)) for k in jnames])
writer.simpletag('justify', **attrs)
writer.newline()
writer.endtag('justifications')
writer.newline()
if len(self.critFeatures):
writer.begintag('critFeatures')
writer.newline()
writer.write(" ".join(map(str, self.critFeatures)))
writer.newline()
writer.endtag('critFeatures')
writer.newline()
if len(self.scriptTags):
writer.begintag('scriptTags')
writer.newline()
writer.write(" ".join(self.scriptTags))
writer.newline()
writer.endtag('scriptTags')
writer.newline()
if self.pMap:
writer.begintag('pseudoMap')
writer.newline()
for k, v in sorted(self.pMap.items()):
writer.simpletag('pseudo', unicode=hex(k), pseudo=v)
writer.newline()
writer.endtag('pseudoMap')
writer.newline()
self.classes.toXML(writer, ttFont, version)
if len(self.passes):
writer.begintag('passes')
writer.newline()
for i, p in enumerate(self.passes):
writer.begintag('pass', _index=i)
writer.newline()
p.toXML(writer, ttFont, version)
writer.endtag('pass')
writer.newline()
writer.endtag('passes')
writer.newline()
def fromXML(self, name, attrs, content, ttFont, version=2.0):
if name == 'version':
self.ruleVersion = float(safeEval(attrs.get('ruleVersion', "0")))
if name == 'info':
getSimple(self, attrs, *attrs_info)
elif name == 'passindexes':
getSimple(self, attrs, *attrs_passindexes)
elif name == 'contexts':
getSimple(self, attrs, *attrs_contexts)
elif name == 'attributes':
getSimple(self, attrs, *attrs_attributes)
elif name == 'justifications':
for element in content:
if not isinstance(element, tuple): continue
(tag, attrs, subcontent) = element
if tag == 'justify':
j = _Object()
for k, v in attrs.items():
setattr(j, k, int(v))
self.jLevels.append(j)
elif name == 'critFeatures':
self.critFeatures = []
element = content_string(content)
self.critFeatures.extend(map(int, element.split()))
elif name == 'scriptTags':
self.scriptTags = []
element = content_string(content)
for n in element.split():
self.scriptTags.append(n)
elif name == 'pseudoMap':
self.pMap = {}
for element in content:
if not isinstance(element, tuple): continue
(tag, attrs, subcontent) = element
if tag == 'pseudo':
k = int(attrs['unicode'], 16)
v = attrs['pseudo']
self.pMap[k] = v
elif name == 'classes':
self.classes = Classes()
for element in content:
if not isinstance(element, tuple): continue
tag, attrs, subcontent = element
self.classes.fromXML(tag, attrs, subcontent, ttFont, version)
elif name == 'passes':
for element in content:
if not isinstance(element, tuple): continue
tag, attrs, subcontent = element
if tag == 'pass':
p = Pass()
for e in subcontent:
if not isinstance(e, tuple): continue
p.fromXML(e[0], e[1], e[2], ttFont, version)
self.passes.append(p)
class Classes(object):
def __init__(self):
self.linear = []
self.nonLinear = []
def decompile(self, data, ttFont, version=2.0):
sstruct.unpack2(Silf_classmap_format, data, self)
if version >= 4.0 :
oClasses = struct.unpack((">%dL" % (self.numClass+1)),
data[4:8+4*self.numClass])
else:
oClasses = struct.unpack((">%dH" % (self.numClass+1)),
data[4:6+2*self.numClass])
for s,e in zip(oClasses[:self.numLinear], oClasses[1:self.numLinear+1]):
self.linear.append(ttFont.getGlyphName(x) for x in
struct.unpack((">%dH" % ((e-s)/2)), data[s:e]))
for s,e in zip(oClasses[self.numLinear:self.numClass],
oClasses[self.numLinear+1:self.numClass+1]):
nonLinids = [struct. |
# Copyright (C) 2010-2012 Red Hat, Inc.
# This work is licensed under the GNU GPLv2 or later.
import libvirt
import re
from libvirt import libvirtError
from libvirttestapi.utils import utils
required_params = {'guestname', 'checkpoint_name'}
optional_params = {'flags': None}
def checkpoint_get_xml(params):
logger = params['logger']
guestname = params['guestname']
checkpoint_name = params.get('checkpoint_name', None)
flag = utils.parse_flags(params)
if not utils.version_compare('libvirt-python', 5, 6, 0, logger):
logger.info("Current libvirt-python don't support getXMLDesc().")
return 0
logger.info("Checkpoint name: %s" % checkpoint_name)
logger.inf | o("flag: %s" % flag)
if flag == libvirt.VIR_DOMAIN_CHECKPOINT_XML_SIZE:
logger.info("Bug 1207659: Don't support this flag.")
return 0
try:
conn = libvirt.open()
dom = conn.lookupByName(guestname)
cp = dom.checkpointLookupByName(checkpoint_name)
cp_xml = | cp.getXMLDesc(flag)
except libvirtError as err:
logger.error("API error message: %s" % err.get_error_message())
return 1
checkpoint_xml_path = "/var/lib/libvirt/qemu/checkpoint/%s/%s.xml" % (guestname, checkpoint_name)
cp_fd = open(checkpoint_xml_path, 'r')
checkpoint_xml = cp_fd.read()
checkpoint_xml = re.sub(r'<!--\n.*\n-->\n\n', '', checkpoint_xml, flags=re.S)
if flag == libvirt.VIR_DOMAIN_CHECKPOINT_XML_NO_DOMAIN:
cp_xml = cp_xml.replace('</domaincheckpoint>\n', '')
if cp_xml in checkpoint_xml:
logger.info("PASS: check checkpoint xml successful.")
else:
logger.error("FAIL: check checkpoint xml failed.")
return 1
elif flag == libvirt.VIR_DOMAIN_CHECKPOINT_XML_SIZE:
logger.info("Don't support this flag.")
elif flag == libvirt.VIR_DOMAIN_CHECKPOINT_XML_SECURE or flag == 0:
if cp_xml == checkpoint_xml:
logger.info("PASS: check checkpoint xml successful.")
else:
logger.error("FAIL: check checkpoint xml failed.")
return 1
return 0
|
def factorial(count):
return count * fa | ctorial( | count - 1)
answer = factorial(6)
|
import pytest
from iotile.core.exceptions import ArgumentError
from iotile.sg.model import DeviceModel
def test_default_values():
"""Make sure we can get properties with default values."""
model = DeviceModel()
assert model.get('max_nodes') == 32
| assert model.get(u'max_nodes') == 32
model.set('max_nodes', 16)
assert model.get('max_nodes') == 16
assert model.get(u'max_nodes') == 16
model.set(u'max_nodes', 17)
assert model.get('max_nodes') == 17
assert model.get(u'max_nodes') == 17
with pytest.raises(ArgumentError):
model.get('unknown_para | meter')
with pytest.raises(ArgumentError):
model.set('unknown_parameter', 15)
|
' | '' custom script for platformio '''
from os.path import join
from SCons.Script import DefaultEnvironment
env | = DefaultEnvironment()
#print "post_extra_script running..."
#print env.Dump()
# compiler and linker flags dont work very well in build_flags of platformio.ini - need to set them here
env.Append(
LINKFLAGS = [
"--data-loc", 0x30
],
STCGALCMD="/stcgal.py"
)
|
from attributes import *
from constants import *
# ------------------------------------------------------------------------------
#
class UnitManager (Attributes) :
"""
UnitManager class -- manages a pool
"""
# --------------------------------------------------------------------------
#
def __init__ (self, url=None, scheduler='default', session=None) :
Attributes.__init__ (self)
# --------------------------------------------------------------------------
#
def add_pilot (self, pid) :
"""
add (Compute or Data)-Pilot(s) to the pool
"""
raise Exception ("%s.add_pilot() is not implemented" % self.__class__.__name__)
# --------------------------------------------------------------------------
#
def list_pilots (self, ptype=ANY) :
"""
List IDs of data and/or compute pilots
"""
raise Exception ("%s.list_pilots() is not implemented" % self.__class__.__name__)
# --------------------------------------------------------------------------
#
def remove_pilot (self, pid, drain=False) :
"""
Remove pilot(s) (does not cancel the pilot(s), but removes all units
from | the pilot(s).
`drain` determines what happens to the units which are managed by the
removed pilot(s). If `True`, the pilot removal is delayed until all
units reach a final state. If `False` (the default), then `RUNNING`
units will be canceled, and `PENDING` units will be re-assinged to the
unit managers for re-scheduling to other pilots. |
"""
raise Exception ("%s.remove_pilot() is not implemented" % self.__class__.__name__)
# --------------------------------------------------------------------------
#
def submit_unit (self, description) :
"""
Instantiate and return (Compute or Data)-Unit object(s)
"""
raise Exception ("%s.submit_unit() is not implemented" % self.__class__.__name__)
# --------------------------------------------------------------------------
#
def list_units (self, utype=ANY) :
"""
List IDs of data and/or compute units
"""
raise Exception ("%s.list_units() is not implemented" % self.__class__.__name__)
# --------------------------------------------------------------------------
#
def get_unit (self, uids) :
"""
Reconnect to and return (Compute or Data)-Unit object(s)
"""
raise Exception ("%s.get_unit() is not implemented" % self.__class__.__name__)
# --------------------------------------------------------------------------
#
def wait_unit (self, uids, state=[DONE, FAILED, CANCELED], timeout=-1.0) :
"""
Wait for given unit(s) to enter given state
"""
raise Exception ("%s.wait_unit() is not implemented" % self.__class__.__name__)
# --------------------------------------------------------------------------
#
def cancel_units (self, uids) :
"""
Cancel given unit(s)
"""
raise Exception ("%s.cancel_unit() is not implemented" % self.__class__.__name__)
# ------------------------------------------------------------------------------
#
|
import pytz
priorities = ('US/Pacific', 'US/Mountain', 'US/Central', 'US/Eastern',
| 'Brazil/East', 'UTC')
all_tz = pytz.all_t | imezones_set.copy()
for priority in priorities:
all_tz.remove(priority)
all_tz = sorted(list(all_tz))
all_tz[:0] = priorities # prepends list to list
# tuples for selection widget
all_tz = tuple((tz, tz) for tz in all_tz)
|
# pylint: disable=missing-docstring
from datetime import datetime, timedelta
import factory
import pytz
from factory.django import DjangoModelFactory
from factory.fuzzy import FuzzyText
from oauth2_provider.models import AccessToken, Application, RefreshToken
from openedx.core.djangoapps.oauth_dispatch.models import ApplicationAccess
from common.djangoapps.student.tests.factories import UserFactory
class ApplicationFactory(DjangoModelFactory):
class Meta:
model = Application
user = factory.SubFactory(UserFactory)
client_id = factory.Sequence('client_{}'.format)
client_secret = 'some_secret'
client_type = 'confidential'
authorization_grant_type = Application.CLIENT_CONFIDENTIAL
name = FuzzyText(prefix='name', length=8)
class ApplicationAccessFactory(DjangoModelFactory):
class Meta:
model = ApplicationAccess
application = factory.SubFactory(ApplicationFactory)
scopes = ['grades:read']
class AccessTokenFactory(Dja | ngoModelFactory):
class Meta:
model = AccessToken
django_get_or_create = ('user', 'application')
token = FuzzyText(length=32)
expires = datetime.now(pytz.UTC) + timedelta(days=1)
class RefreshTokenFactory(DjangoModelFactory):
class Meta:
model = RefreshToken
django_get_or_create = ('user', 'application')
token | = FuzzyText(length=32)
|
{
"name": "Delivery S | equence",
"vesion": "12.0.1.0.0",
"author": "IT-Projects LLC, Ivan Yelizariev",
"license": "LGPL-3",
"category": "Custom",
"website": "https://yelizariev.github.io",
"depends": ["delivery"],
"data": ["views.xml"],
" | installable": False,
}
|
ction
count["section"] += 1
count["subsection"]=0
else :
t=sectionstar
t=t.replace("_SecNumb_",str(count["section"]) )
t=t.replace("_SecName_",L[1])
return(t)
def convertsubsection (m) :
L=cb.split(m)
if L[0].find("*") == -1 :
t=subsection
else :
t=subsectionstar
count["subsection"] += 1
t=t.replace("_SecNumb_",str(count["section"]) )
t=t.replace("_SubSecNumb_",str(count["subsection"]) )
t=t.replace("_SecName_",L[1])
return(t)
def converturl (m) :
L = cb.split(m)
return ("<a href=\""+L[1]+"\">"+L[3]+"</a>")
def converturlnosnap (m) :
L = cb.split(m)
return ("<a class=\"snap_noshots\" href=\""+L[1]+"\">"+L[3]+"</a>")
def convertimage (m) :
L = cb.split (m)
return ("<p align=center><img "+L[1] + " src=\""+L[3]
+"\"></p>")
def convertstrike (m) :
L=cb.split(m)
return("<s>"+L[1]+"</s>")
def processtext ( t ) :
p = re.compile("\\\\begin\\{\\w+}"
"|\\\\nbegin\\{\\w+}\\s*\\{.*?}"
"|\\\\end\\{\\w+}"
"|\\\\item"
"|\\\\nitem\\s*\\{.*?}"
"|\\\\label\\s*\\{.*?}"
"|\\\\section\\s*\\{.*?}"
"|\\\\section\\*\\s*\\{.*?}"
"|\\\\subsection\\s*\\{.*?}"
"|\\\\subsection\\*\\s*\\{.*?}"
"|\\\\href\\s*\\{.*?}\\s*\\{.*?}"
"|\\\\hrefnosnap\\s*\\{.*?}\\s*\\{.*?}"
"|\\\\image\\s*\\{.*?}\\s*\\{.*?}\\s*\\{.*?}"
"|\\\\sout\\s*\\{.*?}")
for s1, s2 in Mnomath :
t=t.replace(s1,s2)
ttext = p.split(t)
tcontrol = p.findall(t)
w = ttext[0]
i=0
while i < len(tcontrol) :
if tcontrol[i].find("{itemize}") != -1 :
w=w+convertitm(tcontrol[i])
elif tcontrol[i].find("{enumerate}") != -1 :
w= w+convertenum(tcontrol[i])
elif tcontrol[i][0:5]=="\\item" :
w=w+"<li>"
elif tcontrol[i][0:6]=="\\nitem" :
lb = tcontrol[i][7:].replace("{","")
lb = lb.replace("}","")
w=w+"<li>"+lb
elif tcontrol[i].find("\\hrefnosnap") != -1 :
w = w+converturlnosnap(tcontrol[i])
elif tcontrol[i].find("\\href") != -1 :
w = w+converturl(tcontrol[i])
elif tcontrol[i].find("{proof}") != -1 :
w = w+convertproof(tcontrol[i])
elif tcontrol[i].find("\\subsection") != -1 :
w = w+convertsubsection(tcontrol[i])
elif tcontrol[i].find("\\section") != -1 :
w = w+convertsection(tcontrol[i])
elif tcontrol[i].find("\\label") != -1 :
w=w+convertlab(tcontrol[i])
elif tcontrol[i].find("\\image") != -1 :
w = w+convertimage(tcontrol[i])
elif tcontrol[i].find("\\sout") != -1 :
w = w+convertstrike(tcontrol[i])
elif tcontrol[i].find("\\begin") !=-1 and tcontrol[i].find("{center}")!= -1 :
w = w+"<p align=center>"
elif tcontrol[i].find("\\end")!= -1 and tcontrol[i].find("{center}") != -1 :
w = w+"</p>"
else :
for clr in colorchoice :
if tcontrol[i].find("{"+clr+"}") != -1:
w=w + convertcolors(tcontrol[i],clr)
for thm in ThmEnvs :
if tcontrol[i]=="\\end{"+thm+"}" :
w=w+convertendthm(thm)
elif tcontrol[i]=="\\begin{"+thm+"}":
w=w+convertbeginthm(thm)
elif tcontrol[i].find("\\nbegin{"+thm+"}") != -1:
L=cb.split(tcontrol[i])
thname=L[3]
w=w+convertbeginnamedthm(thname,thm)
w += ttext[i+1]
i += 1
return processfontstyle(w)
def processfontstyle(w) :
close = dict()
ww = ""
level = i = 0
while i < len(w):
special = False
for k, v in fontstyle.items():
l = len(k)
if w[i:i+l] == k:
level += 1
ww += '<' + v + '>'
close[ | level] = '</' + v + '>'
i += l
special = True
if not special:
if w[i] == '{':
ww += '{'
level += 1
close[level] = '}'
elif w[i] == '}' and level > 0:
ww += close[level]
level -= 1
else:
ww += w[i]
i += 1
return ww
def conv | ertref(m) :
global ref
p=re.compile("\\\\ref\s*\\{.*?}|\\\\eqref\s*\\{.*?}")
T=p.split(m)
M=p.findall(m)
w = T[0]
for i in range(len(M)) :
t=M[i]
lab=cb.split(t)[1]
lab=lab.replace(":","")
if t.find("\\eqref") != -1 :
w=w+"<a href=\"#"+lab+"\">("+str(ref[lab])+")</a>"
else :
w=w+"<a href=\"#"+lab+"\">"+str(ref[lab])+"</a>"
w=w+T[i+1]
return w
"""
The program makes several passes through the input.
In a first clean-up, all text before \begin{document}
and after \end{document}, if present, is removed,
all double-returns are converted
to <p>, and all remaining returns are converted to
spaces.
The second step implements a few simple macros. The user can
add support for more macros if desired by editing the
convertmacros() procedure.
Then the program separates the mathematical
from the text parts. (It assumes that the document does
not start with a mathematical expression.)
It makes one pass through the text part, translating
environments such as theorem, lemma, proof, enumerate, itemize,
\em, and \bf. Along the way, it keeps counters for the current
section and subsection and for the current numbered theorem-like
environment, as well as a flag that tells whether one is
inside a theorem-like environment or not. Every time a \label{xx}
command is encountered, we give ref[xx] the value of the section
in which the command appears, or the number of the theorem-like
environment in which it appears (if applicable). Each appearence
of \label is replace by an html "name" tag, so that later we can
replace \ref commands by clickable html links.
The next step is to make a pass through the mathematical environments.
Displayed equations are numbered and centered, and when a \label{xx}
command is encountered we give ref[xx] the number of the current
equation.
A final pass replaces \ref{xx} commands by the number in ref[xx],
and a clickable link to the referenced location.
"""
import sys
s = ""
while True:
char = sys.stdin.read(1)
if not char:
break
if char:
s = s + char
"""
extractbody() takes the text between a \begin{document}
and \end{document}, if present, (otherwise it keeps the
whole document), normalizes the spacing, and removes comments
"""
s=extractbody(s)
# formats tables
s=converttables(s)
# reformats optional parameters passed in square brackets
s=convertsqb(s)
#implement simple macros
s=convertmacros(s)
# extracts the math parts, and replaces the with placeholders
# processes math and text separately, then puts the processed
# math equations in place of the placeholders
(math,text) = separatemath(s)
s=text[0]
for i in range(len(math)) :
s=s+"__math"+str(i)+"__"+text[i+1]
s = processtext ( s )
math = processmath ( math )
# converts escape sequences such as \$ to HTML codes
# This must be done after formatting the tables or the '&' in
# the HTML codes will create problems
for e in esc :
s=s.replace(e[1],e[2])
for i in range ( len ( math ) ) :
math[i] = math[i].replace(e[1],e[3])
# puts the math equations back into the text
for i in range(len(math)) :
s=s.replace("__math"+str(i)+"__",math[i])
# translating the \ref{} commands
s=convertref(s)
if HTML :
s="<head><style>body{max-width:55em;} |
# -*- coding: utf-8 -*-
import re
from ..base.downloader import BaseDownloader
class VeehdCom(BaseDownloader):
__name__ = "VeehdCom"
__type__ = "downloader"
__version__ = "0.29"
__status__ = "testing"
__pattern__ = r"http://veehd\.com/video/\d+_\S+"
__config__ = [
("enabled", "bool", "Activated", True),
("filename_spaces", "bool", "Allow spaces in filename", False),
("replacement_char", "str", "Filename replacement character", "_"),
]
__description__ = """Veehd.com downloader plugin"""
__license__ = "GPLv3"
__authors__ = [("cat", "cat@pyload")]
def setup(self):
self.multi_dl = True
self.req.can_continue = True
def process(self, pyfile):
self.download_html()
if not self.file_exists():
self.offline()
pyfile.name = self.get_file_name()
self.download(self.get_file_url())
def download_html(self):
url = self.pyfile.url
self.log_debug(f"Requesting page: {url}")
self.data = self.load(url)
def file_exists(self):
if not self.data:
self.download_html()
if "<title>Veehd</title>" in self.data:
return False
return True
def get_file_name(self):
if not self.data:
self.download_html()
m = re.search(r"<title.*?>(.+?) on Veehd</title>", self.data)
if m is None:
| self.error(self._("Video title not found"))
name = m.group(1)
#: Replace unwanted characters in filename
if self.config.get("filen | ame_spaces"):
pattern = r"[^\w ]+"
else:
pattern = r"[^\w.]+"
return re.sub(pattern, self.config.get("replacement_char"), name) + ".avi"
def get_file_url(self):
"""
Returns the absolute downloadable filepath.
"""
if not self.data:
self.download_html()
m = re.search(
r'<embed type="video/divx" src="(http://([^/]*\.)?veehd\.com/dl/.+?)"',
self.data,
)
if m is None:
self.error(self._("Embedded video url not found"))
return m.group(1)
|
import csv
import datetime
import logging
import os
from celery.task import task
from django.conf import settings
from django.contrib.auth import get_user_model
from django.utils.timezone import now
from libya_elections.constants import REMINDER_CHECKIN, REMINDER_REPORT, \
REMINDER_LAST_REPORT, REMINDER_CLOSE
from polling_reports.models import CenterOpen, PollingReport, StaffPhone
from register.models import Whitelist
from text_messages.utils import get_message
from .models import Batch, Broadcast
from .utils import Line
logger = logging.getLogger(__name__)
def read_messages_from_file(file_path):
"""
Read uploaded bulk SMS file.
Generate tuples: (phone_number, message, from_shortcode).
Delete file afterward.
:param file_path:
:return:
"""
# We don't currently enable customization of the from_shortcode via file upload.
# Just use the default.
from_shortcode = None
with open(file_path, encoding='utf-8') as f:
reader = csv.reader(f)
for row in reader:
if any(row):
line = Line._make(row)
number = int(line.number)
yield number, line.message, from_shortcode
os.remove(file_path)
@task
def upload_bulk_sms_file(batch_id, file_path):
"""
Upload a batch of bulk SMS messages for the given batch. Delete
the temp file after we're done.
Assumes the file is valid (run is_file_valid on it first!)
:param batch_id:
:param _file:
:return: message_for_user
"""
batch = Batch.objects.get(id=batch_id)
batch.add_messages(read_messages_from_file(file_path))
batch.status = Batch.PENDING
batch.save()
# Break out some of the logic for sending polling report reminder messages
# for easier testing
class PollingReportReminderMessage(object):
"""
Capture some of the common logic for polling report reminders.
(Do not instantiate, use the subclasses.)
"""
def __init__(self, message_number, reminder_number):
self.message_number = message_number
self.reminder_number = reminder_number
def get_message_code(self):
raise NotImplementedError
def get_message_text(self):
context = {'message_number': self.message_number,
'reminder_number': self.reminder_number}
return get_message(self.get_message_code()).msg.format(**context)
def get_phone_numbers_to_send_to(self):
"""
Generator that yields (phone_number, message_text, from_shortcode) tuples
for the phone numbers that we need to send this reminder to.
"""
# Get the phone numbers we want to send to, excluding those that have
# already done the thing we want to remind them of
phone_numbers = self.PhoneModel.objects.exclude(phone_number__in=self.to_exclude())\
.values_list('phone_number', flat=True)
message_text = self.get_message_text()
# Set from_number to REPORTS_SHORT_CODE so that recipient can
# simply just respond to this message with their report.
from_shortcode = settings.REPORTS_SHORT_CODE
for phone_number in phone_numbers:
yield phone_number, message_text, from_shortcode
def to_exclude(self):
raise NotImplementedError
class CheckinReminderMessage(PollingReportReminderMessage):
"""
Message telling user to check in (activate phone, roll call)
"""
def __init__(self, message_number, reminder_number):
super(CheckinReminderMessage, self).__init__(message_number, reminder_number)
self.PhoneModel = Whitelist
def get_message_code(self):
return REMINDER_CHECKIN
def to_exclude(self):
"""Return list of phone numbers to exclude"""
midnight = now().replace(hour=0, minute=0, microsecond=0)
return CenterOpen.objects.filter(
creation_date__gte=midnight,
).values_list('phone_number', flat=True)
class PollingDayReportReminderMessage(PollingReportReminderMessage):
"""
Message telling user to send in polling day statistics report
"""
def __init__(self, message_number, reminder_number):
super(PollingDayReportReminderMessage, self).__init__(message_number, reminder_number)
self.PhoneModel = StaffPhone
def get_message_code(self):
return {
4: REMINDER_REPORT,
5: REMINDER_REPORT,
6: REMINDER_LAST_REPORT,
7: REMINDER_CLOSE,
}[self.message_number]
def to_exclude(self):
"""Return list of phone numbers to exclude"""
reporting_period = self.message_number - 3
one_day_ago = now() - datetime.timedelta(hours=24)
return PollingReport.objects.filter(
period_number=reporting_period,
creation | _date__gte=one_day_ago,
).values_list('phone_number', flat=True)
@task
def message_reminder_task(message_number, reminder_number, audience, election):
"""
Make a batch to send out a bunch of reminder messages to a given audience,
iffi they haven't sent us the expected report yet.
"""
logger.debug("Start message_reminder_task")
if audience not in ('whitelist', 'registered'):
raise ValueError("Unknown audience type %s - expected whitelist or regis | tered" % audience)
# Batches need to be owned by somebody - pick a non-random superuser
user = get_user_model().objects.filter(is_active=True, is_superuser=True)[0]
batch = Batch.objects.create(
name="Reminder %d for message_number %d" % (reminder_number, message_number),
created_by=user,
priority=Batch.PRIORITY_TIME_CRITICAL)
# create the corresponding broadcast object
broadcast = Broadcast.objects.create(
created_by=batch.created_by,
batch=batch,
audience=Broadcast.STAFF_ONLY,
message=batch.name, # this message is only temporary
)
try:
if audience == 'whitelist':
msg = CheckinReminderMessage(message_number, reminder_number)
else:
msg = PollingDayReportReminderMessage(message_number, reminder_number)
batch.add_messages(msg.get_phone_numbers_to_send_to())
batch.status = Batch.APPROVED
batch.reviewed_by = user
batch.save()
# update the message for the broadcast.
broadcast.message = msg.get_message_text()
broadcast.save()
logger.debug("Batch saved")
except Exception:
logger.exception("Error while creating message reminder batch")
# If anything went wrong, don't leave partial batch lying around in unknown state
batch.delete()
broadcast.delete()
raise
@task
def approve_broadcast(broadcast_id):
"""Creates messages for each individual in the audience and
changes batch status to approved."""
broadcast = Broadcast.objects.get(pk=broadcast_id)
messages = broadcast.get_messages()
batch = broadcast.batch
batch.add_messages(messages)
batch.status = Batch.APPROVED
batch.save()
|
# -*- coding: utf-8 -*-
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
from common import public
import time
import re
class qyxx_ck():
"""采矿许可证"""
need_check_ziduan = ['valid_from',
| 'validto'
]
def check_valid_from(self, indexstr, ustr):
"""有效期限自"""
ret = None
validdate = indexstr['validdate'].strip()
if validdate and len(validdate):
err, time = public.get_date(validdate, 0)
if err:
ret = err
else:
frm = time
if ustr != frm:
| ret = u'不等我的是-%s-' % frm
return ret
def check_validto(self, indexstr, ustr):
"""有效期限至"""
ret = None
validdate = indexstr['validdate'].strip()
if validdate and len(validdate):
err, time = public.get_date(validdate, 1)
if err:
ret = err
else:
frm = time
if ustr != frm:
ret = u'不等我的是-%s-' % frm
return ret
|
# -*- coding: utf-8 -*-
# Copyright (C) 2011 Romain Bignon, Laurent Bachelier
#
# This file is part of assnet.
#
# assnet is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# assnet is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with assnet. If not, see <http://www.gnu.org/licenses/>.
from urlparse import urlsplit, urlunsplit
from urllib import quote
from paste.url import URL
__all__ = ['compact', 'quote_url', 'quote_path', 'quote_and_decode_url']
UNSAFE_CHARS = {
'?': quote('?'),
'&': quote('&'),
';': quote(';'),
':': quote(':'),
',': quote(','),
'=': quote('='),
' ': quote(' '),
'+': quote('+'),
':': quote(':'),
'$': quote('$'),
'"': quote('"'),
}
def compact(text):
return text.replace('\n', ' ').strip()
def quote_path(path):
"""
Quote a pa | th (see quote_url)
"""
return ''.join([UNSAFE_CHARS.get(c, c) for c in path])
def quote_url(url):
"""
Quote the path part of an URL object and return the full URL a | s a string.
Special characters in the URL are not considered as the query string or
any other parameters, they should be in their dedicated variables
of the URL class.
"""
purl = urlsplit(url.url)
# do not escape the scheme and netloc
if purl.scheme and purl.netloc:
path = urlunsplit((None, None, purl.path, purl.query, purl.fragment))
basepath = urlunsplit((purl.scheme, purl.netloc, '', None, None))
else:
path = url.url
basepath = ''
return URL(basepath + quote_path(path), vars=url.vars).href
def quote_and_decode_url(url):
"""
Like quote_url but for usage in Mako templates
"""
return quote_url(url).decode('utf-8')
|
import radon.complexity
import radon.visitors
from coalib.bears.LocalBear import LocalBear
from coalib.results.Result import Result
from coalib.results.RESULT_SEVERITY import RESULT_SEVERITY
from coalib.results.SourceRange import SourceRange
from coalib.settings.Setting import typed_list
class RadonBear(LocalBear):
def run(self, filename, file,
radon_ranks_info: typed_list(str)=(),
radon_ranks_normal: typed_list(str)=('C', 'D'),
radon_ranks_major: typed_list(str)=('E', 'F')):
"""
Uses radon to compute complexity of a given file.
:param radon_ranks_info: The ranks (given by radon) to
treat as severity INFO.
:param radon_ranks_normal: The ranks (given by radon) to
treat as severity NORMAL.
:param radon_ranks_major: The ranks (given by radon) to
treat as severity MAJOR.
"""
severity_map = {
RESULT | _SEVERITY.INFO: radon_ranks_info,
RESULT_SEVERITY.NORMAL: radon_ranks_normal,
RESULT_SEVERITY.MAJOR: radon_ranks_major
}
for visitor in radon.complexity.cc_visit("".join(file)):
rank = radon.complexity.cc_ | rank(visitor.complexity)
severity = None
for result_severity, rank_list in severity_map.items():
if rank in rank_list:
severity = result_severity
if severity is None:
continue
visitor_range = SourceRange.from_values(
filename, visitor.lineno, visitor.col_offset, visitor.endline)
message = "{} has a cyclomatic complexity of {}".format(
visitor.name, rank)
yield Result(self, message, severity=severity,
affected_code=(visitor_range,))
|
"" | "Place all plugins in this directory."""
| |
import os
import datetime
from utils.util import run_command
__author__ = 'maa'
class MsBuilder:
def __init__(self, msbuild):
if msbuild == None:
self.msbuild = r"C:\Windows\Microsoft.NET\Framework64\v4.0.30319\MSBuild.exe"
else:
self.msbuild = msbuild
def build_with_params(self, csprojPath, targets, properties):
if not os.path.isfile(self.msbuild):
raise Exception('MsBuild.exe not found. path = ' + self.msbuild)
start = datetime.datetime.now()
print('STARTED BUILD - ' + start.strftime('%Y-%m-%d %H:%M:%S'))
params = [self.msbuild, csprojPath]
params.append('/t:' + ';'.join(targets))
params.append('/p:' + ';'.join(properties))
return run_command(params)
def build(self, csprojPath, args):
| if not os.path.isfile(self.msbuild):
raise Exception('MsBuild.exe not found. path = ' + self.msbuild)
| start = datetime.datetime.now()
print('STARTED BUILD - ' + start.strftime('%Y-%m-%d %H:%M:%S'))
params = [self.msbuild, csprojPath] + list(args)
return run_command(params)
def get_files_from_project_bin_folder(self, csproj, configuration, do_return_full_paths=False):
name = os.path.dirname(os.path.realpath(csproj))
bin_config_path = os.path.join(name, 'bin', configuration)
files = os.listdir(bin_config_path)
if not do_return_full_paths:
return files
files_full_path = list()
for file in files:
files_full_path.append(os.path.join(bin_config_path, file))
return files_full_path
|
# -*- coding: utf-8 -*-
# Mathmaker creates automatically maths exercises sheets
# with their answers
# Copyright 2006-2017 Nicolas Hainaux <nh.techn@gmail.com>
# This file is part of Mathmaker.
# Mathmaker is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# any later version.
# Mathmaker is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Mathmaker; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import pytest
from mathmaker.lib.core.root_calculus import Value
from mathmaker.lib.core.base_geometry import Point
from mathmaker.lib.core.geometry import Polygon
@pytest.fixture
def p1():
p1 = Polygon([Point('A', 0.5, 0.5),
Point('B', 3, 1),
Point('C', 3.2, 4),
Point('D', 0.8, 3)
])
p1.side[0].label = Value(4, unit='cm')
p1.side[1].label = Value(3, unit='cm')
p1.side[2].label = Value(2, unit='cm')
p1.side[3].label = Value(6.5, unit='cm')
p1.angle[0].label = Value(64, unit="\\textdegree")
p1.angle[1].label = Value(128, unit="\\textdegree")
p1.angle[2].label = Value(32, unit="\\textdegree")
p1.angle[3].label = Value(256, unit="\\textdegree")
p1.angle[0].mark = 'simple'
p1.angle[1].mark = 'simple'
p1.angle[2].mark = 'simple'
p1.angle[3].mark = 'simple'
return p1
def test_p1_into_euk(p1):
"""Check Polygon's generated euk file."""
assert p1.into_euk() == \
'box -0.1, -0.1, 3.8, 4.6\n\n'\
'A = point(0.5, 0.5)\n'\
'B = point(3, 1)\n'\
'C = point(3.2, 4)\n'\
'D = point | (0.8, 3)\n'\
'\n'\
'draw\n'\
' (A.B.C.D)\n'\
' $\\rotatebox{11}{\sffamily 4~cm}$ A 11 - 12.7 deg 4.1\n'\
' $\\rotatebox{86}{\sffamily 3~cm}$ B 86 - 8.9 deg 4.9\n'\
' $\\rotatebox{23}{\sffamily | 2~cm}$ C 203 - 12.2 deg 4.2\n'\
' $\\rotatebox{83}{\sffamily 6.5~cm}$ D 263 - 12.9 deg 4.1\n'\
' $\\rotatebox{47.3}{\sffamily 64\\textdegree}$ A 47.3 deg 2.7\n'\
' $\\rotatebox{-41.3}{\sffamily 128\\textdegree}$ B 138.7 deg 2.7\n'\
' $\\rotatebox{54.3}{\sffamily 32\\textdegree}$ C 234.3 deg 2.7\n'\
' $\\rotatebox{322.7}{\sffamily 256\\textdegree}$ D 322.7 deg 2.7\n'\
' "A" A 227.3 deg, font("sffamily")\n'\
' "B" B 318.7 deg, font("sffamily")\n'\
' "C" C 54.3 deg, font("sffamily")\n'\
' "D" D 142.7 deg, font("sffamily")\n'\
'end\n\n'\
'label\n'\
' B, A, D simple\n'\
' C, B, A simple\n'\
' D, C, B simple\n'\
' A, D, C simple\n'\
'end\n'
def test_p1_rename_errors(p1):
"""Check wrong arguments trigger exceptions when renaming."""
with pytest.raises(TypeError):
p1.rename(5678)
with pytest.raises(ValueError):
p1.rename('KJLIZ')
def test_p1_renamed(p1):
"""Check renaming Polygon is OK."""
p1.rename('YOGA')
assert p1.into_euk() == \
'box -0.1, -0.1, 3.8, 4.6\n\n'\
'A = point(0.5, 0.5)\n'\
'G = point(3, 1)\n'\
'O = point(3.2, 4)\n'\
'Y = point(0.8, 3)\n'\
'\n'\
'draw\n'\
' (A.G.O.Y)\n'\
' $\\rotatebox{11}{\sffamily 4~cm}$ A 11 - 12.7 deg 4.1\n'\
' $\\rotatebox{86}{\sffamily 3~cm}$ G 86 - 8.9 deg 4.9\n'\
' $\\rotatebox{23}{\sffamily 2~cm}$ O 203 - 12.2 deg 4.2\n'\
' $\\rotatebox{83}{\sffamily 6.5~cm}$ Y 263 - 12.9 deg 4.1\n'\
' $\\rotatebox{47.3}{\sffamily 64\\textdegree}$ A 47.3 deg 2.7\n'\
' $\\rotatebox{-41.3}{\sffamily 128\\textdegree}$ G 138.7 deg 2.7\n'\
' $\\rotatebox{54.3}{\sffamily 32\\textdegree}$ O 234.3 deg 2.7\n'\
' $\\rotatebox{322.7}{\sffamily 256\\textdegree}$ Y 322.7 deg 2.7\n'\
' "A" A 227.3 deg, font("sffamily")\n'\
' "G" G 318.7 deg, font("sffamily")\n'\
' "O" O 54.3 deg, font("sffamily")\n'\
' "Y" Y 142.7 deg, font("sffamily")\n'\
'end\n\n'\
'label\n'\
' G, A, Y simple\n'\
' O, G, A simple\n'\
' Y, O, G simple\n'\
' A, Y, O simple\n'\
'end\n'
|
from idl.Annotatable import Annotatable
from idl.IDLSyntaxError import IDLSyntaxError
from idl.Type import Type
class EnumField(Annotatable):
'''
Object that represents a single enumeration field.
'''
def __init__(self, enum, name, value):
Annotatable.__init__(self)
self._enum = enum
self._name = name
self._value = value
@property
def enum(self):
'''
Enumeration type this field is associated with.
'''
return self._enum
@property
def name(self):
'''
Field name.
'''
return self._name
@property
def value(self):
'''
Integer field value.
'''
return self._value
class Enum(Type):
def __init__(self, module, desc):
Type.__init__(self, module, Type.ENUM, desc.name)
self._desc = desc
self._fields = []
for field in self._desc.fields:
if field.value:
# Evaluate value
value = eval(field.value)
|
# Dupl | icate value check
for i in self._fields:
if i.value == value:
raise IDLSyntaxError(self.module,
field.line,
'Duplicate explicit field value %d given for field %r in enumeration %r' % (value, field.name, self.pathStr)
)
else:
value = self._generateFieldValue()
newField = EnumField(self, field.name, value)
# Duplicate name check
if self.getField(newField.name):
raise IDLSyntaxError(self.module,
field.line,
'Duplicate field name %r in enumeration %r' % (newField.name, self.pathStr)
)
# Annotations
newField._assignAnnotations(field.annotations)
self._fields.append(newField)
@property
def fields(self):
'''
List of enumeration fields.
'''
return self._fields
def getField(self, name):
'''
Gets a field with a specific name.
@param name: Field name.
@return: EnumField object or None.
'''
for field in self._fields:
if field.name == name:
return field
return None
def _generateFieldValue(self):
# Assign value
value = 0
while True:
taken = False
for field in self._fields:
if field.value == value:
taken = True
value += 1
break
if not taken:
break
return value
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2006, TUBITAK/UEKAE
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# Please read the COPYING file.
#
import signal
exception = {
signal.SIGINT:KeyboardInterrupt
}
class Signal:
def __init__(self, sig):
self.signal = sig
self.oldhandler = signal.getsignal(sig)
self.pending = False
class SignalHandler:
def __init__(self):
self.signals = {}
def signal_handler(self, sig, frame):
signal.signal(sig, signal.SIG_IGN)
self.signals[sig].pending = True
def disable_signa | l(self, sig):
if sig not in self.signals.keys():
self.signals[sig] = Signal(sig)
signal.signal(sig, self.signal_handler)
d | ef enable_signal(self, sig):
if sig in self.signals.keys():
if self.signals[sig].oldhandler:
oldhandler = self.signals[sig].oldhandler
else:
oldhandler = signal.SIG_DFL
pending = self.signals[sig].pending
del self.signals[sig]
signal.signal(sig, oldhandler)
if pending:
raise exception[sig]
def signal_disabled(self, sig):
return sig in self.signals.keys()
def signal_pending(self, sig):
return self.signal_disabled(sig) and self.signals[sig].pending
|
-----
If the input arrays are of dimension deficient by one, for example
if the coordinates array is two dimensional, the time is a single
scalar or cell_lengths and cell_angles are a 1d array of length three,
that is okay. You'll simply be saving a single frame.
"""
self._validate_open()
if self._mode not in ['w', 'ws', 'a', 'as']:
raise IOError('The file was opened in mode=%s. Writing is not allowed.' % self._mode)
coordinates = in_units_of(coordinates, None, 'angstroms')
time = in_units_of(time, None, 'picoseconds')
cell_lengths = in_units_of(cell_lengths, None, 'angstroms')
cell_angles = in_units_of(cell_angles, None, 'degrees')
# typecheck all of the input arguments rigorously
coordinates = ensure_type(coordinates, np.float32, 3, 'coordinates', length=None,
can_be_none=False, shape=(None, None, 3), warn_on_cast=False, add_newaxis_on_deficient_ndim=True)
n_frames, n_atoms = coordinates.shape[0], coordinates.shape[1]
time = ensure_type(time, np.float32, 1, 'time', length=n_frames,
can_be_none=True, warn_on_cast=False, add_newaxis_on_deficient_ndim=True)
cell_lengths = ensure_type(cell_lengths, np.float64, 2, 'cell_lengths', length=n_frames,
can_be_none=True, shape=(n_frames, 3), warn_on_cast=False, add_newaxis_on_deficient_ndim=True)
cell_angles = ensure_type(cell_angles, np.float64, 2, 'cell_angles', length=n_frames,
can_be_none=True, shape=(n_frames, 3), warn_on_cast=False, add_newaxis_on_deficient_ndim=True)
# are we dealing with a periodic system?
if (cell_lengths is None and cell_angles is not None) or (cell_lengths is not None and cell_angles is None):
provided, neglected = 'cell_lengths', 'cell_angles'
if cell_lengths is None:
provided, neglected = neglected, provided
raise ValueError('You provided the variable "%s", but neglected to '
'provide "%s". They either BOTH must be provided, or '
'neither. Having one without the other is meaningless' % (
provided, neglected))
if self._needs_initialization:
self._initialize_headers(
n_atoms=n_atoms,
set_coordinates=True,
set_time=(time is not None),
set_cell=(cell_lengths is not None and cell_angles is not None))
self._needs_initialization = False
# this slice object says where we're going to put the data in the
# arrays
frame_slice = slice(self._frame_index, self._frame_index + n_frames)
# deposit the data
try:
self._handle.variables['coordinates'][frame_slice, :, :] = coordinates
if time is not None:
self._handle.variables['time'][frame_slice] = time
if cell_lengths is not None:
self._handle.variables['cell_lengths'][frame_slice, :] = cell_lengths
if cell_angles is not None:
self._handle.variables['cell_angles'][frame_slice, :] = cell_angles
except KeyError as e:
raise ValueError("The file that you're trying to save to doesn't "
"contain the field %s." % str(e))
# check for missing attributes
missing = None
if (time is None and 'time' in self._handle.variables):
missing = 'time'
elif (cell_angles is None and 'cell_angles' in self._handle.variables):
missing = 'cell_angles'
elif (cell_lengths is None and 'cell_lengths' in self._handle.variables):
missing = 'cell_lengths'
if missing is not None:
raise ValueError("The file that you're saving to expects each frame "
"to contain %s information, but you did not supply it."
"I don't allow 'ragged' arrays." % missing)
# update the frame index pointers. this should be done at the
# end so that if anything errors out, we don't actually get here
self._frame_index += n_frames
def flush(self):
"Write all buffered data in the to the disk file."
self._validate_open()
self._handle.sync()
def _initialize_headers(self, set_coordinates, n_atoms, set_time, set_cell):
"""Initialize the NetCDF file according to the AMBER NetCDF Convention,
Version 1.0, revision B.
The convention is defined here: http://ambermd.org/netcdf/nctraj.xhtml
"""
# Set attributes.
setattr(self._handle, 'title', 'CREATED at %s on %s' %
(datetime.now(), socket.gethostname()))
setattr(self._handle, 'application', 'Omnia')
setattr(self._handle, 'program', 'MDTraj')
setattr(self._handle, 'programVersion', version.short_version)
setattr(self._handle, 'Conventions', 'AMBER')
setattr(self._handle, 'ConventionVersion', '1.0')
# set the dimensions
# unlimited number of frames in trajectory
self._handle.createDimension('frame', 0)
# number of spatial coordinates
self._handle.createDimension('spatial', 3)
# number of atoms
self._handle.createDimension('atom', n_atoms)
if set_cell:
# three spatial coordinates for the length of the unit cell
self._handle.createDimension('cell_spatial', 3)
# three spatial coordinates for the angles that define the shape
# of the unit cell
self._handle.createDimension('cell_angular', 3)
# length of the longest string used for a label
self._handle.createDimension('label', 5)
# Define variables to store unit cell data
self._handle.createVariable('cell_spatial', 'c', ('cell_spatial',))
cell_angles = self._handle.createVariable('cell_angular', 'c', ('cell_spatial', 'label'))
cell_lengths = self._handle.createVariable('cell_lengths', 'd', ('frame', 'cell_spatial'))
setattr(cell_lengths, 'units', 'angstrom')
cell_angles = self._handle.createVariable('cell_angles', 'd', ('frame', 'cell_angular'))
setattr(cell_angles, 'units', 'degree')
self._handle.variables['cell_spatial'][0] = 'x'
self._handle.variables['cell_spatial'][1] = 'y'
self._handle.variables['cell_spatial'][2] = 'z'
self._handle.variables['cell_angular'][0] = 'alpha'
self._handle.variables['cell_angular'][1] = 'beta '
self._handle.variables['cell_angular'][2] = 'gamma'
if set_time:
# Define coordinates and snapshot times.
frame_times = self._handle.createVariable('time', 'f', ('frame',))
setattr(frame_times, 'units', 'picosecond')
if set_coordinates:
frame_coordinates = self._handle.createVariable('coordinates', 'f', ('frame', 'atom', 'spatial'))
setattr(frame_coordinates, 'units', 'angstrom')
def seek(self, offset, whence=0):
"""Move to a new file position
Parameters
----------
offset : int
A number of frames.
whence : {0, 1, 2}
0: offset from start of file, offset should be >=0.
1: move relative to the current position, positive or negative
2: move relative to the end of file, offset should be <= 0.
Seeking beyond the end of a file is not supported
"""
if whence == 0 and offset >= 0:
self._frame_index = offset
elif whence == 1:
self._frame_index = self._frame_index + offset
elif whence == 2 and offset <= 0:
self._frame_index = self.n_frames + offset
else:
raise IOError('I | nvalid argument')
def tell(self):
"""Current file position
Returns
-------
offset : int
The current frame in the file.
"""
return int(self._frame_index)
| def close(self):
"""Close |
nd std of the images on the training set
PARAMETER_MAX = 10 # What is the max 'level' a transform could be predicted
def get_mean_and_std():
if FLAGS.task_name == "cifar10":
means = [0.49139968, 0.48215841, 0.44653091]
stds = [0.24703223, 0.24348513, 0.26158784]
elif FLAGS.task_name == "svhn":
means = [0.4376821, 0.4437697, 0.47280442]
stds = [0.19803012, 0.20101562, 0.19703614]
else:
assert False
return means, stds
def _width_height_from_img_shape(img_shape):
"""`img_shape` in autoaugment is (height, width)."""
return (img_shape[1], img_shape[0])
def random_flip(x):
"""Flip the input x horizontally with 50% probability."""
if np.random.rand(1)[0] > 0.5:
return np.fliplr(x)
return x
def zero_pad_and_crop(img, amount=4):
"""Zero pad by `amount` zero pixels on each side then take a random crop.
Args:
img: numpy image that will be zero padded and cropped.
amount: amount of zeros to pad `img` with horizontally and verically.
Returns:
The cropped zero padded img. The returned numpy array will be of the same
shape as `img`.
"""
padded_img = np.zeros((img.shape[0] + amount * 2, img.shape[1] + amount * 2,
img.shape[2]))
padded_img[amount:img.shape[0] + amount, amount:
img.shape[1] + amount, :] = img
top = np.random.randint(low=0, high=2 * amount)
left = np.random.randint(low=0, high=2 * amount)
new_img = padded_img[top:top + img.shape[0], left:left + img.shape[1], :]
return new_img
def create_cutout_mask(img_height, img_width, num_channels, size):
"""Creates a zero mask used for cutout of shape `img_height` x `img_width`.
Args:
img_height: Height of image cutout mask will be applied to.
img_width: Width of image cutout mask will be applied to.
num_channels: Number of channels in the image.
size: Size of the zeros mask.
Returns:
A mask of shape `img_height` x `img_width` with all ones except for a
square of zeros of shape `size` x `size`. This mask is meant to be
elementwise multiplied with the original image. Additionally returns
the `upper_coord` and `lower_coord` which specify where the cutout mask
will be applied.
"""
assert img_height == img_width
# Sample center where cutout mask will be applied
height_loc = np.random.randint(low=0, high=img_height)
width_loc = np.random.randint(low=0, high=img_width)
# Determine upper right and lower left corners of patch
upper_coord = (max(0, height_loc - size // 2), max(0, width_loc - size // 2))
lower_coord = (min(img_height, height_loc + size // 2),
min(img_width, width_loc + size // 2))
mask_height = lower_coord[0] - upper_coord[0]
mask_width = lower_coord[1] - upper_coord[1]
assert mask_height > 0
assert mask_width > 0
mask = np.ones((img_height, img_width, num_channels))
zeros = np.zeros((mask_height, mask_width, num_channels))
mask[upper_coord[0]:lower_coord[0], upper_coord[1]:lower_coord[1], :] = (
zeros)
return mask, upper_coord, lower_coord
def cutout_numpy(img, size=16):
"""Apply cutout with mask of shape `size` x `size` to `img`.
The cutout operation is from the paper https://arxiv.org/abs/1708.04552.
This operation applies a `size`x`size` mask of zeros to a random location
within `img`.
Args:
img: Numpy image that cutout will be applied to.
size: Height/width of the cutout mask that will be
Returns:
A numpy tensor that is the result of applying the cutout mask to `img`.
"""
img_height, img_width, num_channels = (img.shape[0], img.shape[1],
img.shape[2])
assert len(img.shape) == 3
mask, _, | _ = create_cutout_mask(img_height, img_width, num_channels, siz | e)
return img * mask
def float_parameter(level, maxval):
"""Helper function to scale `val` between 0 and maxval .
Args:
level: Level of the operation that will be between [0, `PARAMETER_MAX`].
maxval: Maximum value that the operation can have. This will be scaled
to level/PARAMETER_MAX.
Returns:
A float that results from scaling `maxval` according to `level`.
"""
return float(level) * maxval / PARAMETER_MAX
def int_parameter(level, maxval):
"""Helper function to scale `val` between 0 and maxval .
Args:
level: Level of the operation that will be between [0, `PARAMETER_MAX`].
maxval: Maximum value that the operation can have. This will be scaled
to level/PARAMETER_MAX.
Returns:
An int that results from scaling `maxval` according to `level`.
"""
return int(level * maxval / PARAMETER_MAX)
def pil_wrap(img, use_mean_std):
"""Convert the `img` numpy tensor to a PIL Image."""
if use_mean_std:
MEANS, STDS = get_mean_and_std()
else:
MEANS = [0, 0, 0]
STDS = [1, 1, 1]
img_ori = (img * STDS + MEANS) * 255
return Image.fromarray(
np.uint8((img * STDS + MEANS) * 255.0)).convert('RGBA')
def pil_unwrap(pil_img, use_mean_std, img_shape):
"""Converts the PIL img to a numpy array."""
if use_mean_std:
MEANS, STDS = get_mean_and_std()
else:
MEANS = [0, 0, 0]
STDS = [1, 1, 1]
pic_array = np.array(pil_img.getdata()).reshape((img_shape[0], img_shape[1], 4)) / 255.0
i1, i2 = np.where(pic_array[:, :, 3] == 0)
pic_array = (pic_array[:, :, :3] - MEANS) / STDS
pic_array[i1, i2] = [0, 0, 0]
return pic_array
def apply_policy(policy, img, use_mean_std=True):
"""Apply the `policy` to the numpy `img`.
Args:
policy: A list of tuples with the form (name, probability, level) where
`name` is the name of the augmentation operation to apply, `probability`
is the probability of applying the operation and `level` is what strength
the operation to apply.
img: Numpy image that will have `policy` applied to it.
Returns:
The result of applying `policy` to `img`.
"""
img_shape = img.shape
pil_img = pil_wrap(img, use_mean_std)
for xform in policy:
assert len(xform) == 3
name, probability, level = xform
xform_fn = NAME_TO_TRANSFORM[name].pil_transformer(
probability, level, img_shape)
pil_img = xform_fn(pil_img)
return pil_unwrap(pil_img, use_mean_std, img_shape)
class TransformFunction(object):
"""Wraps the Transform function for pretty printing options."""
def __init__(self, func, name):
self.f = func
self.name = name
def __repr__(self):
return '<' + self.name + '>'
def __call__(self, pil_img):
return self.f(pil_img)
class TransformT(object):
"""Each instance of this class represents a specific transform."""
def __init__(self, name, xform_fn):
self.name = name
self.xform = xform_fn
def pil_transformer(self, probability, level, img_shape):
def return_function(im):
if random.random() < probability:
im = self.xform(im, level, img_shape)
return im
name = self.name + '({:.1f},{})'.format(probability, level)
return TransformFunction(return_function, name)
################## Transform Functions ##################
identity = TransformT('identity', lambda pil_img, level, _: pil_img)
flip_lr = TransformT(
'FlipLR',
lambda pil_img, level, _: pil_img.transpose(Image.FLIP_LEFT_RIGHT))
flip_ud = TransformT(
'FlipUD',
lambda pil_img, level, _: pil_img.transpose(Image.FLIP_TOP_BOTTOM))
# pylint:disable=g-long-lambda
auto_contrast = TransformT(
'AutoContrast',
lambda pil_img, level, _: ImageOps.autocontrast(
pil_img.convert('RGB')).convert('RGBA'))
equalize = TransformT(
'Equalize',
lambda pil_img, level, _: ImageOps.equalize(
pil_img.convert('RGB')).convert('RGBA'))
invert = TransformT(
'Invert',
lambda pil_img, level, _: ImageOps.invert(
pil_img.convert('RGB')).convert('RGBA'))
# pylint:enable=g-long-lambda
blur = TransformT(
'Blur', lambda pil_img, level, _: pil_img.filter(ImageFilter.BLUR))
smooth = TransformT(
'Smooth',
lambda pil_img, level, _: pil_img.filter(ImageFilter.SMOOTH))
def _rotate_impl(pil_img, level, _):
"""Rotates `pil_img` from -30 to 30 degrees depending on `level`."""
degrees = int_parameter(level, 30)
if ran |
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 ADHOC SA (http://www.adhoc.com.ar)
# All Righ | ts Reserved.
#
# This program is free software: you can redistribute it and/or modify
# | it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Project Description',
'version': '8.0.1.0.0',
'category': 'Projects & Services',
'sequence': 14,
'summary': '',
'description': """
Project Description
===================
Adds account_analytic_account description field on project form view
""",
'author': 'ADHOC SA',
'website': 'www.adhoc.com.ar',
'license': 'AGPL-3',
'images': [
],
'depends': [
'project',
],
'data': [
'view/project_view.xml',
],
'demo': [
],
'test': [
],
'installable': True,
'auto_install': False,
'application': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
#!/usr/bin/python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations unde | r the License.
import zookeeper, zktestbase, unittest, threading
class DeletionTest(zktestbase.TestBase):
"""Test whether we can delete znodes"""
def test_sync_delete(self):
ZOO_OPEN_ACL_UNSAFE = {"perms":0x1f, "scheme":"world", "id" :"anyone"}
self.assertEqual(self.connected, True)
ret = zookeeper.create(self.handle, "/zk-python-deletetest", "nodecontents", [ZOO_OPEN_ACL_UNSAFE], zookeeper.EPHEMERAL)
self.assertEqual(ret, "/zk-python-dele | tetest")
ret = zookeeper.delete(self.handle,"/zk-python-deletetest")
self.assertEqual(ret, zookeeper.OK)
children = zookeeper.get_children(self.handle, "/")
self.assertEqual(False, "zk-python-deletetest" in children)
# test exception
self.assertRaises(zookeeper.NoNodeException,
zookeeper.delete,
self.handle,
"/zk-python-deletetest")
def test_async_delete(self):
ZOO_OPEN_ACL_UNSAFE = {"perms":0x1f, "scheme":"world", "id" :"anyone"}
self.assertEqual(self.connected, True)
ret = zookeeper.create(self.handle, "/zk-python-adeletetest", "nodecontents", [ZOO_OPEN_ACL_UNSAFE], zookeeper.EPHEMERAL)
self.assertEqual(ret, "/zk-python-adeletetest")
self.cv = threading.Condition()
self.callback_flag = False
self.rc = -1
def callback(handle, rc):
self.cv.acquire()
self.callback_flag = True
self.cv.notify()
self.rc = rc # don't assert this here, as if the assertion fails, the test will block
self.cv.release()
self.cv.acquire()
ret = zookeeper.adelete(self.handle,"/zk-python-adeletetest",-1,callback)
self.assertEqual(ret, zookeeper.OK, "adelete failed")
while not self.callback_flag:
self.cv.wait(15)
self.cv.release()
self.assertEqual(self.callback_flag, True, "adelete timed out")
self.assertEqual(self.rc, zookeeper.OK)
if __name__ == '__main__':
unittest.main()
|
from selenium import webd | river
import logging
logger = logging.getLogger()
driver = | webdriver.Firefox()
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# pylint: disable=W0201
from recipe_engine import recipe_api
from recipe_engine import config_types
class CheckoutApi(recipe_api.RecipeApi):
@property
def default_checkout_root(self):
"""The default location for cached persistent checkouts."""
return self.m.vars.cache_dir.join('work')
def git(self, checkout_root):
"""Run the steps to perform a pure-git checkout without DEPS."""
skia_dir = checkout_root.join('skia')
self.m.git.checkout(
self.m.properties['repository'], dir_path=skia_dir,
ref=self.m.properties['revision'], submodules=False)
if self.m.vars.is_trybot:
self.m.git('fetch', 'origin', self.m.properties['patch_ref'])
self.m.git('checkout', 'FETCH_HEAD')
self.m.git('rebase', self.m.properties['revision'])
return self.m.properties['revision']
def bot_update(self, checkout_root, gclient_cache=None,
checkout_chromium=False, checkout_flutter=False,
extra_gclient_env=None, parent_rev=False,
flutter_android=False):
"""Run the steps to obtain a checkout using bot_update.
Args:
checkout_root: Root directory where the code will be synced.
gclient_cache: Optional, directory of the gclient cache.
checkout_chromium: If True, will check out chromium/src.git in addition
to the primary repo.
checkout_flutter: If True, will checkout flutter in addition to the
primary repo.
extra_gclient_env: Map of extra environment variable names to their values
to supply while running gclient.
parent_rev: If True, checks out the parent of the specified revision,
rather than the revision itself, ie. HEAD^ for normal jobs and HEAD
(no patch) for try jobs.
flutter_android: Indicates that we're checking out flutter for Android.
"""
if not gclient_cache:
gclient_cache = self.m.vars.cache_dir.join('git')
if not extra_gclient_env:
extra_gclient_env = {}
cfg_kwargs = {}
# Use a persistent gclient cache for Swarming.
cfg_kwargs['CACHE_DIR'] = gclient_cache
# Create the checkout path if necessary.
# TODO(borenet): 'makedirs checkout_root'
self.m.file.ensure_directory('makedirs checkout_path', checkout_root)
# Initial cleanup.
gclient_cfg = self.m.gclient.make_config(**cfg_kwargs)
main_repo = self.m.properties['repository']
if checkout_flutter:
main_repo = 'https://github.com/flutter/engine.git'
main_name = self.m.path.basename(main_repo)
if main_name.endswith('.git'):
main_name = main_name[:-len('.git')]
# Special case for flutter because it seems to need a very specific
# directory structure to successfully build.
if checkout_flutter and main_name == 'engine':
main_name = 'src/flutter'
main = gclient_cfg.solutions.add()
main.name = main_name
main.managed = False
main.url = main_repo
main.revision = self.m.properties.get('revision') or 'origin/master'
m = gclient_cfg.got_revision_mapping
m[main_name] = 'got_revision'
patch_root = main_name
patch_repo = main.url
if self.m.properties.get('patch_repo'):
patch_repo = self.m.properties['patch_repo']
patch_root = patch_repo.split('/')[-1]
if patch_root.endswith('.git'):
patch_root = patch_root[:-4]
if checkout_flutter:
# Skia is a DEP of Flutter; the 'revision' property is a Skia revision,
# and any patch should be applied to Skia, not Flutter.
main.revision = 'origin/master'
main.managed = True
m[main_name] = 'got_flutter_revision'
if flutter_android:
gclient_cfg.target_os.add('android')
skia_dep_path = 'src/third_party/skia'
gclient_cfg.repo_path_map['https://skia.googlesource.com/skia'] = (
skia_dep_path, 'HEAD')
gclient_cfg.revisions[skia_dep_path] = self.m.properties['revision']
m[skia_dep_path] = 'got_revision'
patch_root = skia_dep_path
if checkout_chromium:
main.custom_vars['checkout_chromium'] = True
extra_gclient_env['GYP_CHROMIUM_NO_ACTION'] = '0'
# TODO(rmistry): Remove the below block after there is a solution for
# crbug.com/616443
entries_file = checkout_root.join('.gclient_entries')
if self.m.path.exists(entries_file) or self._test_data.enabled:
self.m.file.remove('remove %s' % entries_file,
entries_file)
# Run bot_update.
if not self.m.vars.is_trybot and parent_rev:
main.revision = main.revision + '^'
patch_refs = None
patch_ref = self.m.properties.get('patch_ref')
if patch_ref:
patch_refs = ['%s@%s:%s' % (self.m.properties['patch_repo'],
self.m.properties['revision'],
patch_ref)]
self.m.gclient.c = gclient_cfg
with self.m.context(cwd=checkout_root):
update_step = self.m.bot_update.ensure_checkout(
patch_root=patch_root,
# The logic in ensure_checkout for this arg is fairly naive, so if
# patch=False, we'll see "... (without patch)" in the step names, even
# for non-trybot runs, which is misleading and confusin | g. Therefore,
# always specify patch=True for non-trybot runs.
patch=not (self.m.vars.is_trybot and parent_rev),
patch_refs=patch_refs,
)
if checkout_chromium or checkout_flutter:
gclient_env = {'DEPOT_TOOLS_UPDATE': '0'}
if extra_gclient_env:
gclient_env.update(extra_gclient_env)
with self.m.context(cwd=checko | ut_root, env=gclient_env):
self.m.gclient.runhooks()
return update_step.presentation.properties['got_revision']
|
""" Tablib - DataFrame Support.
"""
import sys
if sys.version_info[0] > 2:
from io import BytesIO
else:
from cStringIO import StringIO as BytesIO
try:
from pandas import DataFrame
except ImportError:
DataFrame = None
import tablib
from tablib | .compat import unicode
title = 'df'
extensions = ('df', )
def detect(stream):
"""Returns True if given stream is a DataFrame."""
if DataFrame is None:
return False
| try:
DataFrame(stream)
return True
except ValueError:
return False
def export_set(dset, index=None):
"""Returns DataFrame representation of DataBook."""
if DataFrame is None:
raise NotImplementedError(
'DataFrame Format requires `pandas` to be installed.'
' Try `pip install tablib[pandas]`.')
dataframe = DataFrame(dset.dict, columns=dset.headers)
return dataframe
def import_set(dset, in_stream):
"""Returns dataset from DataFrame."""
dset.wipe()
dset.dict = in_stream.to_dict(orient='records')
|
import os
from lxml import etree
class device:
def __init__(self,ipadress,name):
self.ipadress=str(ipadress)
self.name=str(name)
self.status="off"
def turn_on(self):
self.status="on"
def turn_off(self):
self.status="off"
def getstatus(devices):
ips= | []
for instance in devices:
instance.turn_off()
test=os.popen("nmap -sP --unprivileged 192.168.2.0/24")
| for i in test.readlines():
if i.split(' ')[0]=='Nmap' and i.split(' ')[1]=='scan' :
ips.append(i.split('(')[1][:-2])
for i in xrange(0,len(ips)):
for j in xrange(0,len(devices)):
if ips[i]== devices[j].ipadress:
devices[j].turn_on()
return devices
def writexmlrow(device,container,number):
if (number==1):
col=etree.SubElement(container,'div',{'class':'col-lg-2 col-lg-offset-1 col-md-2 col-md-offset-1 placeholder'})
else:
col=etree.SubElement(container,'div',{'class':'col-lg-2 col-md-2 placeholder'})
if (device.status=='on'):
image1=etree.SubElement(col,'img',{'src':'./images/green.png','width':'200','height':'200','class':'img-responsive','align':'center'})
else:
image1=etree.SubElement(col,'img',{'src':'./images/gray.png','width':'200','height':'200','class':'img-responsive','align':'center'})
label1=etree.SubElement(col,'h4',{'align':'center'})
label1.text=device.name
return
def writexmlpart(devices):
container=etree.Element('div',{'class':'row placeholder'})
i=1
for instance in devices:
writexmlrow(instance,container,i)
i=i+1
output=etree.tostring(container, pretty_print=True)
with open("./parts/part1_1.html","r") as file:
part1=file.read()
with open("./parts/part1_2.html","r") as file:
part2=file.read()
with open("./parts/part1.html","w") as file:
file.write(part1+output+part2)
return
def writescanlog():
localtime==time.localtime(time.time())
with open("./log/scanlog.txt","a") as log:
log.write(str(localtime[3])+':'+str(localtime[4])+'on the'+str(localtime[2])+'.'+str(localtime[1])+'.'+str(localtime[0])[-2:])
log.write("Scanned Wifi for my Devices")
|
#!/usr/bin/env python
import os
from setuptools import setup
def read(fname):
with open(os.path.join(os.path.dirname(__file__), fname)) as f:
return f.read()
setup(name='django-darkknight',
version='0.9.0',
license="BSD",
description="He's a silent guardian, a watchful protector",
long_description=read('README.rst'),
author="Fusionbox, Inc",
author_email="programmers@fusionbox.com",
url='http://github.com/fusionbox/django-darkknight',
packages=['darkknight', 'darkknight_gpg'],
install_requires=[
'django-dotenv',
'Django>=1.5',
'pyOpenSSL',
'django-localflavor',
'django-countries',
],
| extras_require = {
'gpg': ['gnupg>=2.0.2,<3', 'django-apptemplates'],
},
classifiers=[
"Development Status :: 4 - Beta",
"Framework :: Django",
"Intended Audience :: Developers",
"Intended Audience :: Information Technology",
"License :: OSI Approved :: BSD License",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.4",
"Programming La | nguage :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
"Topic :: Security :: Cryptography",
],
)
|
from .app import App
from .model import User, Group, ResetNonce
class ViewPermission:
pass
class EditPermission:
pass
@App.permission_rule(model=object, permission=object)
def admin_has_global_permission(identity, model, permission):
user = User.get(email=identity.userid)
return Group.get(name="Admin") in user.groups
@App.permission_rule(model=User, permission=object)
def user_has_self_permission(identity, model, permission):
user = User.get(email=identity.userid)
if user is no | t None and Group.get(name="Admin") in user.groups:
return True
else:
return model.email == identity.userid
@App.permission_rule(model=ResetNonce, permission=EditPermission)
def user_has_permission_to_reset_nonce(identity, model, permission):
user = User.get(email=identity.userid)
if user is not None and Group.get(name="Admin") in user.groups:
return True
else:
| return user.id == int(model.id)
|
def _filename(obj):
try:
return obj.__fil | ename__()
except:
pass
return | str(obj)
|
honcom.CLSCTX_SERVER):
IDispatch, userName = _GetGoodDispatchAndUserName(IDispatch,userName,clsctx)
if createClass is None:
createClass = CDispatch
lazydata = None
try:
if typeinfo is None:
typeinfo = IDispatch.GetTypeInfo()
try:
#try for a typecomp
typecomp = typeinfo.GetTypeComp()
lazydata = typeinfo, typecomp
except pythoncom.com_error:
pass
except pythoncom.com_error:
typeinfo = None
olerepr = MakeOleRepr(IDispatch, typeinfo, lazydata)
return createClass(IDispatch, olerepr, userName,UnicodeToString, lazydata)
def MakeOleRepr(IDispatch, typeinfo, typecomp):
olerepr = None
if typeinfo is not None:
try:
attr = typeinfo.GetTypeAttr()
# If the type info is a special DUAL interface, magically turn it into
# a DISPATCH typeinfo.
if attr[5] == pythoncom.TKIND_INTERFACE and attr[11] & pythoncom.TYPEFLAG_FDUAL:
# Get corresponding Disp interface;
# -1 is a special value which does this for us.
href = typeinfo.GetRefTypeOfImplType(-1);
typeinfo = typeinfo.GetRefTypeInfo(href)
attr = typeinfo.GetTypeAttr()
if typecomp is None:
olerepr = build.DispatchItem | (typeinfo, attr, None, 0)
else:
olerepr = build.LazyDispatchItem(attr, None)
except pythoncom.ole_error:
pass
if olerepr is None: olerepr = build.DispatchItem()
return olerepr
def DumbDispatch(IDispatch, userName = None, createClass = None,UnicodeToString=NeedUnicodeConversions, clsctx=pyth | oncom.CLSCTX_SERVER):
"Dispatch with no type info"
IDispatch, userName = _GetGoodDispatchAndUserName(IDispatch,userName,clsctx)
if createClass is None:
createClass = CDispatch
return createClass(IDispatch, build.DispatchItem(), userName,UnicodeToString)
class CDispatch:
def __init__(self, IDispatch, olerepr, userName = None, UnicodeToString=NeedUnicodeConversions, lazydata = None):
if userName is None: userName = "<unknown>"
self.__dict__['_oleobj_'] = IDispatch
self.__dict__['_username_'] = userName
self.__dict__['_olerepr_'] = olerepr
self.__dict__['_mapCachedItems_'] = {}
self.__dict__['_builtMethods_'] = {}
self.__dict__['_enum_'] = None
self.__dict__['_unicode_to_string_'] = UnicodeToString
self.__dict__['_lazydata_'] = lazydata
def __call__(self, *args):
"Provide 'default dispatch' COM functionality - allow instance to be called"
if self._olerepr_.defaultDispatchName:
invkind, dispid = self._find_dispatch_type_(self._olerepr_.defaultDispatchName)
else:
invkind, dispid = pythoncom.DISPATCH_METHOD | pythoncom.DISPATCH_PROPERTYGET, pythoncom.DISPID_VALUE
if invkind is not None:
allArgs = (dispid,LCID,invkind,1) + args
return self._get_good_object_(self._oleobj_.Invoke(*allArgs),self._olerepr_.defaultDispatchName,None)
raise TypeError, "This dispatch object does not define a default method"
def __nonzero__(self):
return 1 # ie "if object:" should always be "true" - without this, __len__ is tried.
# _Possibly_ want to defer to __len__ if available, but Im not sure this is
# desirable???
def __repr__(self):
return "<COMObject %s>" % (self._username_)
def __str__(self):
# __str__ is used when the user does "print object", so we gracefully
# fall back to the __repr__ if the object has no default method.
try:
return str(self.__call__())
except pythoncom.com_error, details:
if details[0] not in ERRORS_BAD_CONTEXT:
raise
return self.__repr__()
# Delegate comparison to the oleobjs, as they know how to do identity.
def __cmp__(self, other):
other = getattr(other, "_oleobj_", other)
return cmp(self._oleobj_, other)
def __int__(self):
return int(self.__call__())
def __len__(self):
invkind, dispid = self._find_dispatch_type_("Count")
if invkind:
return self._oleobj_.Invoke(dispid, LCID, invkind, 1)
raise TypeError, "This dispatch object does not define a Count method"
def _NewEnum(self):
try:
invkind = pythoncom.DISPATCH_METHOD | pythoncom.DISPATCH_PROPERTYGET
enum = self._oleobj_.InvokeTypes(pythoncom.DISPID_NEWENUM,LCID,invkind,(13, 10),())
except pythoncom.com_error:
return None # no enumerator for this object.
import util
return util.WrapEnum(enum, None)
def __getitem__(self, index): # syver modified
# Improved __getitem__ courtesy Syver Enstad
# Must check _NewEnum before Item, to ensure b/w compat.
if isinstance(index, IntType):
if self.__dict__['_enum_'] is None:
self.__dict__['_enum_'] = self._NewEnum()
if self.__dict__['_enum_'] is not None:
return self._get_good_object_(self._enum_.__getitem__(index))
# See if we have an "Item" method/property we can use (goes hand in hand with Count() above!)
invkind, dispid = self._find_dispatch_type_("Item")
if invkind is not None:
return self._get_good_object_(self._oleobj_.Invoke(dispid, LCID, invkind, 1, index))
raise TypeError, "This object does not support enumeration"
def __setitem__(self, index, *args):
# XXX - todo - We should support calling Item() here too!
# print "__setitem__ with", index, args
if self._olerepr_.defaultDispatchName:
invkind, dispid = self._find_dispatch_type_(self._olerepr_.defaultDispatchName)
else:
invkind, dispid = pythoncom.DISPATCH_PROPERTYPUT | pythoncom.DISPATCH_PROPERTYPUTREF, pythoncom.DISPID_VALUE
if invkind is not None:
allArgs = (dispid,LCID,invkind,0,index) + args
return self._get_good_object_(self._oleobj_.Invoke(*allArgs),self._olerepr_.defaultDispatchName,None)
raise TypeError, "This dispatch object does not define a default method"
def _find_dispatch_type_(self, methodName):
if self._olerepr_.mapFuncs.has_key(methodName):
item = self._olerepr_.mapFuncs[methodName]
return item.desc[4], item.dispid
if self._olerepr_.propMapGet.has_key(methodName):
item = self._olerepr_.propMapGet[methodName]
return item.desc[4], item.dispid
try:
dispid = self._oleobj_.GetIDsOfNames(0,methodName)
except: ### what error?
return None, None
return pythoncom.DISPATCH_METHOD | pythoncom.DISPATCH_PROPERTYGET, dispid
def _ApplyTypes_(self, dispid, wFlags, retType, argTypes, user, resultCLSID, *args):
result = self._oleobj_.InvokeTypes(*(dispid, LCID, wFlags, retType, argTypes) + args)
return self._get_good_object_(result, user, resultCLSID)
def _wrap_dispatch_(self, ob, userName = None, returnCLSID = None, UnicodeToString = NeedUnicodeConversions):
# Given a dispatch object, wrap it in a class
return Dispatch(ob, userName, UnicodeToString=UnicodeToString)
def _get_good_single_object_(self,ob,userName = None, ReturnCLSID=None):
if iunkType==type(ob):
try:
ob = ob.QueryInterface(pythoncom.IID_IDispatch)
# If this works, we then enter the "is dispatch" test below.
except pythoncom.com_error:
# It is an IUnknown, but not an IDispatch, so just let it through.
pass
if dispatchType==type(ob):
# make a new instance of (probably this) class.
return self._wrap_dispatch_(ob, userName, ReturnCLSID)
elif self._unicode_to_string_ and UnicodeType==type(ob):
return str(ob)
else:
return ob
def _get_good_object_(self,ob,userName = None, ReturnCLSID=None):
"""Given an object (usually the retval from a method), make it a good object to return.
Basically checks if it is a COM object, and wraps it up.
Also handles the fact that a retval may be a tuple of retvals"""
if ob is None: # Quick exit!
return None
elif type(ob)==TupleType:
return tuple(map(lambda o, s=self, oun=userName, rc=ReturnCLSID: s._get_good_single_object_(o, oun, rc), ob))
else:
return self._get_good_single_object_(ob)
def _make_method_(self, name):
"Make a method object - Assumes in olerepr funcmap"
methodName = build.MakePublicAttributeName(name) # translate keywords etc.
methodCodeList = self._olerepr_.MakeFuncMethod(self._olerepr_.mapFuncs[name], methodName,0)
methodCode = string.join(methodCodeList,"\n")
try:
# print "Method code for %s is:\n" % self._username_, methodCode
# self._print_details_( |
e element list (GH 15447)
res = sparse.reindex(["A"], level=0)
exp = orig.reindex(["A"], level=0).to_sparse()
tm.assert_sp_series_equal(res, exp)
with pytest.raises(TypeError):
# Incomplete keys are not accepted for reindexing:
sparse.reindex(["A", "C"])
# "copy" argument:
res = sparse.reindex(sparse.index, copy=True)
exp = orig.reindex(orig.index, copy=True).to_sparse()
tm.assert_sp_series_equal(res, exp)
assert sparse is not res
@pytest.mark.filterwarnings("ignore:Sparse:FutureWarning")
@pytest.mark.filterwarnings("ignore:DataFrame.to_sparse:FutureWarning")
@pytest.mark.filterwarnings("ignore:Series.to_sparse:FutureWarning")
class TestSparseDataFrameIndexing:
def test_getitem(self):
orig = pd.DataFrame(
[[1, np.nan, np.nan], [2, 3, np.nan], [np.nan, np.nan, 4], [0, np.nan, 5]],
columns=list("xyz"),
)
sparse = orig.to_sparse()
tm.assert_sp_series_equal(sparse["x"], orig["x"].to_sparse())
tm.assert_sp_frame_equal(sparse[["x"]], orig[["x"]].to_sparse())
tm.assert_sp_frame_equal(sparse[["z", "x"]], orig[["z", "x"]].to_sparse())
tm.assert_sp_frame_equal(
sparse[[True, False, True, True]],
orig[[True, False, True, True]].to_sparse(),
)
tm.assert_sp_frame_equal(sparse.iloc[[1, 2]], orig.iloc[[1, 2]].to_sparse())
def test_getitem_fill_value(self):
orig = pd.DataFrame(
[[1, np.nan, 0], [2, 3, np.nan], [0, np.nan, 4], [0, np.nan, 5]],
columns=list("xyz"),
)
sparse = orig.to_sparse(fill_value=0)
result = sparse[["z"]]
expected = orig[["z"]].to_sparse(fill_value=0)
tm.assert_sp_frame_equal(result, expected, check_fill_value=False)
tm.assert_sp_series_equal(sparse["y"], orig["y"].to_sparse(fill_value=0))
exp = orig[["x"]].to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(sparse[["x"]], exp)
exp = orig[["z", "x"]].to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(sparse[["z", "x"]], exp)
indexer = [True, False, True, True]
exp = orig[indexer].to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(sparse[indexer], exp)
exp = orig.iloc[[1, 2]].to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(sparse.iloc[[1, 2]], exp)
def test_loc(self):
orig = pd.DataFrame(
[[1, np.nan, np.nan], [2, 3, np.nan], [np.nan, np.nan, 4]],
columns=list("xyz"),
)
sparse = orig.to_sparse()
assert sparse.loc[0, "x"] == 1
assert np.isnan(sparse.loc[1, "z"])
assert sparse.loc[2, "z"] == 4
# have to specify `kind='integer'`, since we construct a
# new SparseArray here, and the default sparse type is
# integer there, but block in SparseSeries
tm.assert_sp_series_equal(sparse.loc[0], orig.loc[0].to_sparse(kind="integer"))
tm.assert_sp_series_equal(sparse.loc[1], orig.loc[1].to_sparse(kind="integer"))
tm.assert_sp_series_equal(
sparse.loc[2, :], orig.loc[2, :].to_sparse(kind="integer")
)
tm.assert_sp_series_equal(
sparse.loc[2, :], orig.loc[2, :].to_sparse(kind="integer")
)
tm.assert_sp_series_equal(sparse.loc[:, "y"], orig.loc[:, "y"].to_sparse())
tm.assert_sp_series_equal(sparse.loc[:, "y"], orig.loc[:, "y"].to_sparse())
result = sparse.loc[[1, 2]]
exp = orig.loc[[1, 2]].to_sparse()
tm.assert_sp_frame_equal(result, exp)
result = sparse.loc[[1, 2], :]
exp = orig.loc[[1, 2], :].to_sparse()
tm.assert_sp_frame_equal(result, exp)
result = sparse.loc[:, ["x", "z"]]
exp = orig.loc[:, ["x", "z"]].to_sparse()
tm.assert_sp_frame_equal(result, exp)
result = sparse.loc[[0, 2], ["x", "z"]]
exp = orig.loc[[0, 2], ["x", "z"]].to_sparse()
tm.assert_sp_frame_equal(result, exp)
# exceeds the bounds
result = sparse.reindex([1, 3, 4, 5])
exp = orig.reindex([1, 3, 4, 5]).to_sparse()
tm.assert_sp_frame_equal(result, exp)
# dense array
result = sparse.loc[orig.x % 2 == 1]
exp = orig.loc[orig.x % 2 == 1].to_sparse()
tm.assert_sp_frame_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse.loc[sparse.x % 2 == 1]
exp = orig.loc[orig.x % 2 == 1].to_sparse()
tm.assert_sp_frame_equal(result, exp)
# sparse array
result = sparse.loc[pd.SparseArray(sparse.x % 2 == 1, dtype=bool)]
tm.assert_sp_frame_equal(result, exp)
def test_loc_index(self):
orig = pd.DataFrame(
[[1, np.nan, np.nan], [2, 3, np.nan], [np.nan, np.nan, 4]],
index=list("abc"),
columns=list("xyz"),
)
sparse = orig.to_sparse()
assert sparse.loc["a", "x"] == 1
assert np.isnan(sparse.loc["b", "z"])
assert sparse.loc["c", "z"] == 4
tm.assert_sp_series_equal(
sparse.loc["a"], orig.loc["a"].to_sparse(kind="integer")
)
tm.assert_sp_series_equal(
sparse.loc["b"], orig.loc["b"].to_sparse(kind="integer")
)
tm.assert_sp_series_equal(
sparse.loc["b", :], orig.loc["b", :].to_sparse(kind="integer")
)
tm.assert_sp_series_equal(
sparse.loc["b", :], orig.loc["b", :].to_sparse(kind="integer")
)
tm.assert_sp_series_equal(sparse.loc[:, "z"], orig.loc[:, "z"].to_sparse())
tm.assert_sp_series_equal(sparse.loc[:, "z"], orig.loc[:, "z"].to_sparse())
result = sparse.loc[["a", "b"]]
exp = orig.loc[["a", "b"]].to_sparse()
tm.assert_sp_frame_equal(result, exp)
result = sparse.loc[["a", "b"], :]
exp = orig.loc[["a", "b"], :].to_sparse()
tm.assert_sp_frame_equal(result, exp)
result = sparse.loc[:, ["x", "z"]]
exp = orig.loc[:, ["x", "z"]].to_sparse()
tm.assert_sp_frame_equal(result, exp)
result = sparse.loc[["c", "a"], ["x", "z"]]
exp = orig.loc[["c", "a"], ["x", "z"]].to_sparse()
tm.assert_sp_frame_equal(result, exp)
# dense array
result = sparse.loc[orig.x % 2 == 1]
exp = orig.loc[orig.x % 2 == 1].to_sparse()
tm.assert_sp_frame_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse.loc[sparse.x % 2 == 1]
exp = orig.loc[orig.x % 2 == 1].to_sparse()
tm.assert_sp_frame_equal(result, exp)
# sparse array
result = sparse.loc[pd.SparseArray(sparse.x % 2 == 1, dtype=bool)]
tm.assert_sp_frame_equal(result, exp)
def test_loc_slice(self):
orig = pd.DataFrame(
[[1, np.nan, np.nan], [2, 3, np.nan], [np.nan, np.nan, 4]],
columns=list("xyz"),
)
sparse = orig.to_sparse()
tm.assert_sp_frame_equal(sparse.loc[2:], orig.loc[2:].to_sparse())
def test_iloc(self):
orig = pd.DataFrame([[1, np.nan, np.nan], [2, 3, np.nan], [np.nan, np.nan, 4]])
| sparse = orig.to_sparse()
assert sparse.iloc[1, 1] == 3
assert np.isnan(sparse.iloc[2, 0])
tm.assert_sp_series_equal(sparse.iloc[0], orig.loc[0].to_sparse(kind="integer"))
tm.assert_sp_series_equal(sparse.iloc[1], orig.loc[1].to_sparse(kind="integer"))
tm.assert_sp_series_equal(
sparse.iloc[2, :], orig.iloc[2, :].to_sparse(kind="integer")
) |
tm.assert_sp_series_equal(
sparse.iloc[2, :], orig.iloc[2, :].to_sparse(kind="integer")
)
tm.assert_sp_series_equal(sparse.iloc[:, 1], orig.iloc[:, 1].to_sparse())
tm.assert_sp_series_equal(sparse.iloc[:, 1], orig.iloc[:, 1].to_sparse())
result = sparse.iloc[[1, 2]]
exp = orig.iloc[[1, 2]].to_sparse()
|
from quotes.m | odels import Quote
fro | m django.contrib import admin
class QuoteAdmin(admin.ModelAdmin):
list_display = ('message', 'name', 'program', 'class_of',
'submission_time')
admin.site.register(Quote, QuoteAdmin)
|
#!/usr/bin/env python
# vim: set expandtab shiftwidth=4:
# http://www.voip-info.org/wiki/view/asterisk+manager+events
import sys,time
import simplejson as json
from stompy.simple import Client
import ConfigParser
config = ConfigParser.ConfigParser()
devel_config = ConfigParser.ConfigParser()
config.read('/opt/ucall/etc/config.ini')
devel_config.read('/opt/ucall/etc/devel_config.ini')
stomp_host = config.get('STOMP', 'host')
stomp_username = config.get | ('STOMP', 'username')
stomp_password = config.get('STOMP', 'password')
stomp_queue = "/queue/messages/" + devel_config.get | ('GENERAL', 'agent')
print '='*80
print 'Stomp host:', stomp_host
print 'Stomp username:', stomp_username
print 'Stomp password:', stomp_password
print 'Stomp queue:', stomp_queue
print '='*80
stomp = Client(stomp_host)
stomp.connect(stomp_username, stomp_password)
stomp.subscribe("jms.queue.msg.ctrl")
while True:
message = stomp.get()
print message.body
stomp.disconnect()
|
def _ | magic_get_file_type(f, _):
file_type = magic.from_buffer(f.read(1024), mime=True)
f.seek(0)
return file_type.decode('utf-8')
def _guess | _file_type(_, filename):
return mimetypes.guess_type(filename)[0]
try:
import magic
except ImportError:
import mimetypes
get_file_type = _guess_file_type
else:
get_file_type = _magic_get_file_type
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from tempest.api.compute import base
from tempest.common.utils import data_utils
from tempest import exceptions
from tempest.test import attr
class FloatingIPsTestJSON(base.BaseV2ComputeTest):
_interface = 'json'
server_id = None
floating_ip = None
@classmethod
def setUpClass(cls):
super(FloatingIPsTestJSON, cls).setUpClass()
cls.client = cls.floating_ips_client
cls.servers_client = cls.servers_client
# Server creation
resp, server = cls.create_test_server(wait_until='ACTIVE')
cls.server_id = server['id']
# Floating IP crea | tion
resp, body = cls.client.create_floating_ip()
cls.floating_ip_id = body['id']
cls.floating_ip = body['ip']
# Ge | nerating a nonexistent floatingIP id
cls.floating_ip_ids = []
resp, body = cls.client.list_floating_ips()
for i in range(len(body)):
cls.floating_ip_ids.append(body[i]['id'])
while True:
cls.non_exist_id = data_utils.rand_int_id(start=999)
if cls.config.service_available.neutron:
cls.non_exist_id = str(uuid.uuid4())
if cls.non_exist_id not in cls.floating_ip_ids:
break
@classmethod
def tearDownClass(cls):
# Deleting the floating IP which is created in this method
resp, body = cls.client.delete_floating_ip(cls.floating_ip_id)
super(FloatingIPsTestJSON, cls).tearDownClass()
@attr(type='gate')
def test_allocate_floating_ip(self):
# Positive test:Allocation of a new floating IP to a project
# should be successful
resp, body = self.client.create_floating_ip()
self.assertEqual(200, resp.status)
floating_ip_id_allocated = body['id']
try:
resp, floating_ip_details = \
self.client.get_floating_ip_details(floating_ip_id_allocated)
# Checking if the details of allocated IP is in list of floating IP
resp, body = self.client.list_floating_ips()
self.assertIn(floating_ip_details, body)
finally:
# Deleting the floating IP which is created in this method
self.client.delete_floating_ip(floating_ip_id_allocated)
@attr(type=['negative', 'gate'])
def test_allocate_floating_ip_from_nonexistent_pool(self):
# Positive test:Allocation of a new floating IP from a nonexistent_pool
# to a project should fail
self.assertRaises(exceptions.NotFound,
self.client.create_floating_ip,
"non_exist_pool")
@attr(type='gate')
def test_delete_floating_ip(self):
# Positive test:Deletion of valid floating IP from project
# should be successful
# Creating the floating IP that is to be deleted in this method
resp, floating_ip_body = self.client.create_floating_ip()
# Storing the details of floating IP before deleting it
cli_resp = self.client.get_floating_ip_details(floating_ip_body['id'])
resp, floating_ip_details = cli_resp
# Deleting the floating IP from the project
resp, body = self.client.delete_floating_ip(floating_ip_body['id'])
self.assertEqual(202, resp.status)
# Check it was really deleted.
self.client.wait_for_resource_deletion(floating_ip_body['id'])
@attr(type='gate')
def test_associate_disassociate_floating_ip(self):
# Positive test:Associate and disassociate the provided floating IP
# to a specific server should be successful
# Association of floating IP to fixed IP address
resp, body = self.client.associate_floating_ip_to_server(
self.floating_ip,
self.server_id)
self.assertEqual(202, resp.status)
# Disassociation of floating IP that was associated in this method
resp, body = self.client.disassociate_floating_ip_from_server(
self.floating_ip,
self.server_id)
self.assertEqual(202, resp.status)
@attr(type=['negative', 'gate'])
def test_delete_nonexistant_floating_ip(self):
# Negative test:Deletion of a nonexistent floating IP
# from project should fail
# Deleting the non existent floating IP
self.assertRaises(exceptions.NotFound, self.client.delete_floating_ip,
self.non_exist_id)
@attr(type=['negative', 'gate'])
def test_associate_nonexistant_floating_ip(self):
# Negative test:Association of a non existent floating IP
# to specific server should fail
# Associating non existent floating IP
self.assertRaises(exceptions.NotFound,
self.client.associate_floating_ip_to_server,
"0.0.0.0", self.server_id)
@attr(type=['negative', 'gate'])
def test_dissociate_nonexistant_floating_ip(self):
# Negative test:Dissociation of a non existent floating IP should fail
# Dissociating non existent floating IP
self.assertRaises(exceptions.NotFound,
self.client.disassociate_floating_ip_from_server,
"0.0.0.0", self.server_id)
@attr(type='gate')
def test_associate_already_associated_floating_ip(self):
# positive test:Association of an already associated floating IP
# to specific server should change the association of the Floating IP
# Create server so as to use for Multiple association
resp, body = self.servers_client.create_server('floating-server2',
self.image_ref,
self.flavor_ref)
self.servers_client.wait_for_server_status(body['id'], 'ACTIVE')
self.new_server_id = body['id']
# Associating floating IP for the first time
resp, _ = self.client.associate_floating_ip_to_server(
self.floating_ip,
self.server_id)
# Associating floating IP for the second time
resp, body = self.client.associate_floating_ip_to_server(
self.floating_ip,
self.new_server_id)
self.addCleanup(self.servers_client.delete_server, self.new_server_id)
if (resp['status'] is not None):
self.addCleanup(self.client.disassociate_floating_ip_from_server,
self.floating_ip,
self.new_server_id)
# Make sure no longer associated with old server
self.assertRaises((exceptions.NotFound,
exceptions.UnprocessableEntity),
self.client.disassociate_floating_ip_from_server,
self.floating_ip, self.server_id)
@attr(type=['negative', 'gate'])
def test_associate_ip_to_server_without_passing_floating_ip(self):
# Negative test:Association of empty floating IP to specific server
# should raise NotFound exception
self.assertRaises(exceptions.NotFound,
self.client.associate_floating_ip_to_server,
'', self.server_id)
class FloatingIPsTestXML(FloatingIPsTestJSON):
_interface = 'xml'
|
# coding:utf-8
'''
Created on 2017/11/7.
@author: chk01
'''
import math
import numpy as np
import h5py
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.python.framework import ops
from class_two.week_three.tf_utils import load_dataset, random_mini_batches, convert_to_one_hot, predict
np.random.seed(1)
def exam1():
y_hat = tf.constant(36, name='Y-hat')
y = tf.constant(39, name='y')
loss = tf.Variable((y - y_hat) ** 2, name='loss')
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
print(sess.run(loss))
def exam2():
a = tf.constant(2)
b = tf.constant(3)
c = tf.multiply(a, b)
return c
def exam3(x_input):
with tf.Session() as sess:
x = tf.placeholder(tf.int64, name='x')
y = 2 * x
print(sess.run(y, feed_dict={x: x_input}))
# GRADED FUNCTION: linear_function
def linear_function():
"""
Implements a linear function:
Initializes W to be a random tensor of shape (4,3)
Initializes X to be a random tensor of shape (3,1)
Initializes b to be a random tensor of shape (4,1)
Returns:
result -- runs the session for Y = WX + b
"""
np.random.seed(1)
X = tf.constant(np.random.randn(3, 1), tf.float32, name='X')
W = tf.constant(np.random.randn(4, 3), tf.float32, name='W')
b = tf.constant(np.random.randn(4, 1), tf.float32, name='b')
Y = tf.matmul(W, X) + b
with tf.Session() as sess:
result = sess.run(Y)
return result
# GRADED FUNCTION: sigmoid
def sigmoid(z):
"""
Computes the sigmoid of z
Arguments:
z -- input value, scalar or vector
Returns:
results -- the sigmoid of z
"""
x = tf.placeholder(tf.float32, name='x')
sigmoid = tf.nn.sigmoid(x)
with tf.Session() as sess:
result = sess.run(sigmoid, feed_dict={x: z})
return result
def cost(logits, labels):
"""
Computes the cost using the sigmoid cross entropy
Arguments:
logits -- vector containing z, output of the last linear unit (before the final sigmoid activation)
labels -- vector of labels y (1 or 0)
Note: What we've been calling "z" and "y" in this class are respectively called "logits" and "labels"
in the TensorFlow documentation. So logits will feed into z, and labels into y.
Returns:
cost -- runs the session of the cost (formula (2))
"""
z = tf.placeholder(tf.float32, name='z-input')
y = tf.placeholder(tf.flo | at32, name='y-input')
cost = tf.nn.sigmoid_cross_entropy_with_logits(logits=z, labels=y)
with tf.Session() as sess:
cost = sess.run(cost, feed_dict={z: logits, y: labels})
return cost
# GRADED FUNCTION: one_hot_matrix
def one_hot_matrix(labels, C):
"""
Creates a matrix where the i-th row corresponds to the ith class number | and the jth column
corresponds to the jth training example. So if example j had a label i. Then entry (i,j)
will be 1.
Arguments:
labels -- vector containing the labels
C -- number of classes, the depth of the one hot dimension
Returns:
one_hot -- one hot matrix
"""
C = tf.constant(C, name='C')
one_hot_matrix = tf.one_hot(labels, C, axis=0)
tf.nn.sigmoid_cross_entropy_with_logits()
with tf.Session() as sess:
one_hot = sess.run(one_hot_matrix)
return one_hot
if __name__ == '__main__':
# exam1()
logits = np.array([0.2, 0.4, 0.7, 0.9])
cost = cost(logits, np.array([0, 0, 1, 1]))
print("cost = " + str(cost))
tf.one_hot(labels,C,axis=0) |
"""
raven.core.processors
~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import re
from raven.utils import varmap
from raven.utils import six
class Processor(object):
def __init__(self, client):
self.client = client
def get_data(self, data, **kwargs):
return
def process(self, data, **kwargs):
resp = self.get_data(d | ata, **kwargs)
if resp:
data = resp
if 'exception' in data:
if 'values' in data['exception']:
for value in data['exception'].get('values', []):
if 'stacktrace' in value:
self.filter_stacktrace(value['stacktrace'])
if 'request' in data:
self.filter_http(data['request'])
if 'extra' in data:
data['extra'] = self.filter_extra(data['extra'])
retur | n data
def filter_stacktrace(self, data):
pass
def filter_http(self, data):
pass
def filter_extra(self, data):
return data
class RemovePostDataProcessor(Processor):
"""
Removes HTTP post data.
"""
def filter_http(self, data, **kwargs):
data.pop('data', None)
class RemoveStackLocalsProcessor(Processor):
"""
Removes local context variables from stacktraces.
"""
def filter_stacktrace(self, data, **kwargs):
for frame in data.get('frames', []):
frame.pop('vars', None)
class SanitizePasswordsProcessor(Processor):
"""
Asterisk out things that look like passwords, credit card numbers,
and API keys in frames, http, and basic extra data.
"""
MASK = '*' * 8
FIELDS = frozenset([
'password',
'secret',
'passwd',
'authorization',
'api_key',
'apikey',
'sentry_dsn',
])
VALUES_RE = re.compile(r'^(?:\d[ -]*?){13,16}$')
def sanitize(self, key, value):
if value is None:
return
if isinstance(value, six.string_types) and self.VALUES_RE.match(value):
return self.MASK
if not key: # key can be a NoneType
return value
key = key.lower()
for field in self.FIELDS:
if field in key:
# store mask as a fixed length for security
return self.MASK
return value
def filter_stacktrace(self, data):
for frame in data.get('frames', []):
if 'vars' not in frame:
continue
frame['vars'] = varmap(self.sanitize, frame['vars'])
def filter_http(self, data):
for n in ('data', 'cookies', 'headers', 'env', 'query_string'):
if n not in data:
continue
if isinstance(data[n], six.string_types) and '=' in data[n]:
# at this point we've assumed it's a standard HTTP query
# or cookie
if n == 'cookies':
delimiter = ';'
else:
delimiter = '&'
data[n] = self._sanitize_keyvals(data[n], delimiter)
else:
data[n] = varmap(self.sanitize, data[n])
if n == 'headers' and 'Cookie' in data[n]:
data[n]['Cookie'] = self._sanitize_keyvals(
data[n]['Cookie'], ';'
)
def filter_extra(self, data):
return varmap(self.sanitize, data)
def _sanitize_keyvals(self, keyvals, delimiter):
sanitized_keyvals = []
for keyval in keyvals.split(delimiter):
keyval = keyval.split('=')
if len(keyval) == 2:
sanitized_keyvals.append((keyval[0], self.sanitize(*keyval)))
else:
sanitized_keyvals.append(keyval)
return delimiter.join('='.join(keyval) for keyval in sanitized_keyvals)
|
# -*- coding: utf-8 -*-
#
# This file is part of HEPData.
# Copyright (C) 2016 CERN.
#
# HEPData is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# HEPData is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HEPData; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""HEPData application factories."""
import os
import sys
from invenio_base.app import create_app_factory
from invenio_base.wsgi import create_wsgi_factory
from invenio_config import create_config_loader
from . import config
env_prefix = 'APP'
conf_loader = create_config_loader(config=config, env_prefix=env_prefix)
instance_path = os.getenv(env_prefix + '_INSTANCE_PATH') or \
os.path.join(sys.prefix, 'var', 'hepdata-instance')
static_folder = os.getenv(env_prefix + '_STATIC_FOLDER') or \
os.path.join(instance_path, 'static')
create_api = create_app_factory(
'hepdata',
config_loader=conf_loader,
extens | ion_entry_points=['invenio_base.api_apps'],
blueprint_en | try_points=['invenio_base.api_blueprints'],
instance_path=instance_path,
)
create_app = create_app_factory(
'hepdata',
config_loader=conf_loader,
extension_entry_points=['invenio_base.apps'],
blueprint_entry_points=['invenio_base.blueprints'],
wsgi_factory=create_wsgi_factory({'/api': create_api}),
instance_path=instance_path,
static_folder=static_folder,
)
|
from datetime import datetime
from django.db import models
from uuidfield.fields import UUIDField
from access import acl
import amo.models
from translations.fields import save_signal
from mkt.constants import comm as const
class CommunicationPermissionModel(amo.models.ModelBase):
# Read permissions imply write permissions as well.
read_permission_public = models.BooleanField()
read_permission_developer = models.BooleanField()
read_permission_reviewer = models.BooleanField()
read_permission_senior_reviewer = models.BooleanField()
read_permission_mozilla_contact = models.BooleanField()
read_permission_staff = models.BooleanField()
class Meta:
abstract = True
def check_acls(user, obj, acl_type):
"""Check ACLs."""
if acl_type == 'moz_contact':
try:
return user.email in obj.addon.get_mozilla_contacts()
except AttributeError:
return user.email in obj.thread.addon.get_mozilla_contacts()
if acl_type == 'admin':
return acl.action_allowed_user(user, 'Admin', '%')
elif acl_type == 'reviewer':
return acl.action_allowed_user(user, 'Apps', 'Review')
elif acl_type == 'senior_reviewer':
return acl.action_allowed_user(user, 'Apps', 'ReviewEscalated')
else:
raise Exception('Invalid ACL lookup.')
return False
def check_acls_comm_obj(obj, profile):
"""Cross-reference ACLs and Note/Thread permissions."""
if obj.read_permission_public:
return True
if (obj.read_permission_reviewer and
check_acls(profile, obj, 'reviewer')):
return True
if (obj.read_permission_senior_reviewer and
check_acls(profile, obj, 'senior_reviewer')):
return True
if (obj.read_permission_mozilla_contact and
check_acls(profile, obj, 'moz_contact')):
return True
if (obj.read_permission_staff and
check_acls(profile, obj, 'admin')):
return True
return False
def user_has_perm_thread(thread, profile):
"""
Check if the user has read/write permissions on the given thread.
Developers of the add-on used in the thread, users in the CC list,
and users who post to the thread are allowed to access the object.
Moreover, other object permissions are also checked agaisnt the ACLs
of the user.
"""
user_post = CommunicationNote.objects.filter(
author=profile, thread=thread)
user_cc = CommunicationThreadCC.objects.filter(
user=profile, thread=thread)
if user_post.exists() or user_cc.exists():
return True
# User is a developer of the add-on and has the permission to read.
user_is_author = profile.addons.filter(pk=thread.addon_id)
if thread.read_permission_developer and user_is_author.exists():
return True
return check_acls_comm_obj(thread, profile)
def user_has_perm_note(note, profile):
"""
Check if the user has read/write permissions on the given note.
Developers of the add-on used in the note, users in the CC list,
and users who post to the thread are allowed to access the object.
Moreover, other object permissions are also checked agaisnt the ACLs
of the user.
"""
if note.author.id == profile.id:
# Let the dude access his own note.
return True
# User is a developer of the add-on and has the permission to read.
user_is_author = profile.addons.filter(pk=note.thread.addon_id)
if note.read_permission_developer and user_is_author.exists():
return True
return check_acls_comm_obj(note, profile)
class CommunicationThread(CommunicationPermissionModel):
addon = models.ForeignKey('addons.Addon', related_name='threads')
version = models.ForeignKey('versions.Version', related_name='threads',
null=True)
class Meta:
db_table = 'comm_threads'
class CommunicationThreadCC(amo.models.ModelBase):
thread = models.ForeignKey(CommunicationThread,
related_name='thread_cc')
user = models.ForeignKey('users.UserProfile',
related_name='comm_thread_cc')
class Meta:
db_table = 'comm_thread_cc'
unique_together = ('user', 'thread',)
class CommunicationNoteManager(models.Manager):
def with_perms(self, profile, thread):
ids = [note.id for note in self.filter(thread=thread) if
user_has_perm_note(note, profile)]
return self.filter(id__in=ids)
class CommunicationNote(CommunicationPermissionModel):
thread = models.ForeignKey(CommunicationThread, related_name='notes')
author = models.ForeignKey('users.UserProfile', related_name='comm_notes')
note_type = models.IntegerField()
body = models.TextField(null=True)
reply_to = models.ForeignKey('self', related_name='replies', null=True,
blank=True)
read_by_users = models.ManyToManyField('users.UserProfile',
through='CommunicationNoteRead')
objects = CommunicationNoteManager()
class Meta:
db_table = 'comm_thread_notes'
def save(self, *args, **kwargs):
super(CommunicationNote, self).save(*args, **kwargs)
self.thread.modified = self.created
self.thread.save()
class CommunicationNoteRead(models.Model):
user = models.ForeignKey('users.UserProfile')
note = models.ForeignKey(CommunicationNote)
class Meta:
db_table = 'comm_notes_read'
class CommunicationThreadToken(amo.models.ModelBase):
thread = models.ForeignKey(CommunicationThread, related_name='token')
user = models.ForeignKey('users.UserProfile',
related_name='comm_thread_tokens')
uuid = UUIDField(unique=True, auto=True)
use_count = models.IntegerField(default=0,
help_text='Stores the number of times the token has been used')
class Meta:
db_table = 'comm_thread_tokens'
unique_together = ('thread', 'user')
def is_valid(self):
# TODO: Confirm the expiration and max use count values.
timedelta = datetime.now() - self.modified
return (timedelta.days <= const.THREAD_TOKEN_EXPIRY and
self.use_count < const.MAX_TOKEN_USE_ | COUNT)
def reset_uuid(self):
# Generate a new UUID.
self.uuid = UUIDField()._create_uuid( | ).hex
models.signals.pre_save.connect(save_signal, sender=CommunicationNote,
dispatch_uid='comm_thread_notes_translations')
|
""" This class represents a Queue Node to store values and also
links others Nodes with values."""
class Node:
""" It starts with a value at all times. A note can not be
created without a value associated. """
| def __init__(self, value):
self.value = val | ue
self.next = None
""" This class represents a Queue to store values. The Queue starts
with a node called head. Every single element is going to be added
after the last node entered."""
class Queue:
""" The Queue is created with it's size zero and the head element
head is None (undefined)."""
def __init__(self):
self.head = None
self.size = 0
""" It adds a new value. The value is going to be added always after the
last value added. If the Queue has no elements, the value added is
going to be the head/head and also the last/tail value."""
def enqueue(self, value):
if (self.head is None):
self.head = Node(value)
self.size += 1
else:
pointer = self.head
while(pointer.next is not None):
pointer = pointer.next
pointer.next = Node(value)
""" This routine removes and also returns the first element. After the
remotion of the element, the head is updated and it turns to be the next
element of the queue (it's next element). If there are no more elements
other than the head, the Queue turns to be empty. If there are no elements
at all, there will be no remotion or return."""
def dequeue(self):
if (self.head is not None):
removed = self.head.value
self.head = self.head.next
self.size -= 1
return removed
""" It shows all the Queue elements one by one in a correct
order. """
def display(self):
pointer = self.head
while (pointer is not None):
print pointer.value
pointer = pointer.next
""" It returns the head node value, but it doesn't remove the
node. """
def head(self):
return self.head.value
""" It verifies whether or not the Queue has elements. If the Queue
doesn't have any elements, the head or head element is going to
be None. """
def is_empty(self):
return self.head is None
|
NAME = 'django-adminactions'
VERSION = __version__ = (0, 4, 0, 'final', 0)
__author__ = 'sax'
import subprocess
import datetime
import os
def get_version(version=None):
"""Derives a PEP386-compliant version number from VERSION."""
if version is None:
version = VERSION
assert len(version) == 5
assert version[3] in ('alpha', 'beta', 'rc', 'final')
parts = 2 if version[2] == 0 else 3
main = '.'.join(str(x) for x in version[:parts])
sub = ''
if version[3] == 'alpha' and version[4] == 0:
git_changeset = get_git_changeset()
if git_changeset:
sub = '.a%s' % git_changeset
elif version[3] != 'final':
mapping = {'alpha': 'a', 'beta': 'b | ', 'rc': 'c'}
| sub = mapping[version[3]] + str(version[4])
return main + sub
def get_git_changeset():
"""Returns a numeric identifier of the latest git changeset.
The result is the UTC timestamp of the changeset in YYYYMMDDHHMMSS format.
This value isn't guaranteed to be unique, but collisions are very unlikely,
so it's sufficient for generating the development version numbers.
"""
repo_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
git_log = subprocess.Popen('git log --pretty=format:%ct --quiet -1 HEAD',
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=True, cwd=repo_dir, universal_newlines=True)
timestamp = git_log.communicate()[0]
try:
timestamp = datetime.datetime.utcfromtimestamp(int(timestamp))
except ValueError:
return None
return timestamp.strftime('%Y%m%d%H%M%S')
|
##############################################################################
#
# Copyright (c) 2004 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
################################################################# | #############
"""`APIdoc` skin.
$Id$
"""
__docformat__ = "reStructuredText"
from zope.publisher.interfaces.browser import IB | rowserRequest
from zope.publisher.interfaces.browser import IDefaultBrowserLayer
class apidoc(IBrowserRequest):
"""The `apidoc` layer."""
class APIDOC(apidoc, IDefaultBrowserLayer):
"""The `APIDOC` skin."""
# BBB 2006/02/18, to be removed after 12 months
import zope.app.skins
zope.app.skins.set('APIDOC', APIDOC)
|
# Copyright (c) 2013, Kevin Greenan (kmgreen2@gmail.com)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution. THIS SOFTWARE IS
# PROVIDED BY THE COPYRIGHT HO | LDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
# NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL | , EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
"""
tflite backend (https://github.com/tensorflow/tensorflow/lite)
"""
# pylint: disable=unused-argument,missing-docstring,useless-super-delegation
from threading import Lock
try:
# try dedicated tflite package first
import tflite_runtime
import tflite_runtime.interpreter as tflite
_version = tflite_runtime.__version__
_git_version = tflite_runtime.__git_version__
except:
# fall back to tflite bundled in tensorflow
import tensorflow as tf
from tensorflow.lite.python import interpreter as tflite
_version = tf.__version__
_git_version = tf.__git_version__
import backend
class BackendTflite(backend.Backend):
def __init__(self):
super(BackendTflite, self).__init__()
self.sess = None
self.lock = Lock()
def version(self):
return _version + "/" + _git_version
def name(self):
return "tflite" |
def image_format(self):
# tflite is always NHWC
return "NHWC"
def load(self, model_path, inputs=None, outputs=None):
self.sess = tflite.Interpreter(model_path=model_path)
self.sess.allocate_tensors()
# keep input/output name to index mapping
self.input2index = {i["name"]: i["index"] for i in self.sess.get_input_details()}
self.output | 2index = {i["name"]: i["index"] for i in self.sess.get_output_details()}
# keep input/output names
self.inputs = list(self.input2index.keys())
self.outputs = list(self.output2index.keys())
return self
def predict(self, feed):
self.lock.acquire()
# set inputs
for k, v in self.input2index.items():
self.sess.set_tensor(v, feed[k])
self.sess.invoke()
# get results
res = [self.sess.get_tensor(v) for _, v in self.output2index.items()]
self.lock.release()
return res
|
assert sympify('2.6e+2/17', rational=True) == Rational(260, 17)
assert sympify('2.6e-2/17', rational=True) == Rational(26, 17000)
assert sympify('2.1+3/4', rational=True) == \
Rational(21, 10) + Rational(3, 4)
assert sympify('2.234456', rational=True) == Rational(279307, 125000)
assert sympify('2.234456e23', rational=True) == 223445600000000000000000
assert sympify('2.234456e-23', rational=True) == \
Rational(279307, 12500000000000000000000000000)
assert sympify('-2.234456e-23', rational=True) == \
Rational(-279307, 12500000000000000000000000000)
assert sympify('12345678901/17', rational=True) == \
Rational(12345678901, 17)
assert sympify('1/.3 + x', rational=True) == Rational(10, 3) + x
# make sure longs in fractions work
assert sympify('222222222222/11111111111') == \
Rational(222222222222, 11111111111)
# ... even if they come from repetend notation
assert sympify('1/.2[123456789012]') == Rational(333333333333, 70781892967)
# ... or from high precision reals
assert sympify('.1234567890123456', rational=True) == \
Rational(19290123283179, 156250000000000)
def test_sympify_Fraction():
try:
import fractions
except ImportError:
pass
else:
value = sympify(fractions.Fraction(101, 127))
assert value == Rational(101, 127) and type(value) is Rational
def test_sympify_gmpy():
if HAS_GMPY:
if HAS_GMPY == 2:
import gmpy2 as gmpy
elif HAS_GMPY == 1:
import gmpy
value = sympify(gmpy.mpz(1000001))
assert value == Integer(1000001) and type(value) is Integer
value = sympify(gmpy.mpq(101, 127))
assert value == Rational(101, 127) and type(value) is Rational
@conserve_mpmath_dps
def test_sympify_mpmath():
value = sympify(mp | math.mpf(1.0))
assert value == Float(1.0) and type(value) is Float
mpmath.mp.dps = 12
assert sympify(
mpmath.pi).epsilon_eq(Float(" | 3.14159265359"), Float("1e-12")) is True
assert sympify(
mpmath.pi).epsilon_eq(Float("3.14159265359"), Float("1e-13")) is False
mpmath.mp.dps = 6
assert sympify(
mpmath.pi).epsilon_eq(Float("3.14159"), Float("1e-5")) is True
assert sympify(
mpmath.pi).epsilon_eq(Float("3.14159"), Float("1e-6")) is False
assert sympify(mpmath.mpc(1.0 + 2.0j)) == Float(1.0) + Float(2.0)*I
def test_sympify2():
class A:
def _sympy_(self):
return Symbol("x")**3
a = A()
assert _sympify(a) == x**3
assert sympify(a) == x**3
assert a == x**3
def test_sympify3():
assert sympify("x**3") == x**3
assert sympify("x^3") == x**3
assert sympify("1/2") == Integer(1)/2
raises(SympifyError, lambda: _sympify('x**3'))
raises(SympifyError, lambda: _sympify('1/2'))
def test_sympify_keywords():
raises(SympifyError, lambda: sympify('if'))
raises(SympifyError, lambda: sympify('for'))
raises(SympifyError, lambda: sympify('while'))
raises(SympifyError, lambda: sympify('lambda'))
def test_sympify_float():
assert sympify("1e-64") != 0
assert sympify("1e-20000") != 0
def test_sympify_bool():
"""Test that sympify accepts boolean values
and that output leaves them unchanged"""
assert sympify(True) is True
assert sympify(False) is False
def test_sympyify_iterables():
ans = [Rational(3, 10), Rational(1, 5)]
assert sympify(['.3', '.2'], rational=True) == ans
assert sympify(set(['.3', '.2']), rational=True) == set(ans)
assert sympify(tuple(['.3', '.2']), rational=True) == Tuple(*ans)
assert sympify(dict(x=0, y=1)) == {x: 0, y: 1}
assert sympify(['1', '2', ['3', '4']]) == [S(1), S(2), [S(3), S(4)]]
def test_sympify4():
class A:
def _sympy_(self):
return Symbol("x")
a = A()
assert _sympify(a)**3 == x**3
assert sympify(a)**3 == x**3
assert a == x
def test_sympify_text():
assert sympify('some') == Symbol('some')
assert sympify('core') == Symbol('core')
assert sympify('True') is True
assert sympify('False') is False
assert sympify('Poly') == Poly
assert sympify('sin') == sin
def test_sympify_function():
assert sympify('factor(x**2-1, x)') == -(1 - x)*(x + 1)
assert sympify('sin(pi/2)*cos(pi)') == -Integer(1)
def test_sympify_poly():
p = Poly(x**2 + x + 1, x)
assert _sympify(p) is p
assert sympify(p) is p
def test_sympify_factorial():
assert sympify('x!') == factorial(x)
assert sympify('(x+1)!') == factorial(x + 1)
assert sympify('(1 + y*(x + 1))!') == factorial(1 + y*(x + 1))
assert sympify('(1 + y*(x + 1)!)^2') == (1 + y*factorial(x + 1))**2
assert sympify('y*x!') == y*factorial(x)
assert sympify('x!!') == factorial2(x)
assert sympify('(x+1)!!') == factorial2(x + 1)
assert sympify('(1 + y*(x + 1))!!') == factorial2(1 + y*(x + 1))
assert sympify('(1 + y*(x + 1)!!)^2') == (1 + y*factorial2(x + 1))**2
assert sympify('y*x!!') == y*factorial2(x)
assert sympify('factorial2(x)!') == factorial(factorial2(x))
raises(SympifyError, lambda: sympify("+!!"))
raises(SympifyError, lambda: sympify(")!!"))
raises(SympifyError, lambda: sympify("!"))
raises(SympifyError, lambda: sympify("(!)"))
raises(SympifyError, lambda: sympify("x!!!"))
def test_sage():
# how to effectivelly test for the _sage_() method without having SAGE
# installed?
assert hasattr(x, "_sage_")
assert hasattr(Integer(3), "_sage_")
assert hasattr(sin(x), "_sage_")
assert hasattr(cos(x), "_sage_")
assert hasattr(x**2, "_sage_")
assert hasattr(x + y, "_sage_")
assert hasattr(exp(x), "_sage_")
assert hasattr(log(x), "_sage_")
def test_bug496():
assert sympify("a_") == Symbol("a_")
assert sympify("_a") == Symbol("_a")
@XFAIL
def test_lambda():
x = Symbol('x')
assert sympify('lambda: 1') == Lambda((), 1)
assert sympify('lambda x: 2*x') == Lambda(x, 2*x)
assert sympify('lambda x, y: 2*x+y') == Lambda([x, y], 2*x + y)
def test_lambda_raises():
with raises(SympifyError):
_sympify('lambda: 1')
def test_sympify_raises():
raises(SympifyError, lambda: sympify("fx)"))
def test__sympify():
x = Symbol('x')
f = Function('f')
# positive _sympify
assert _sympify(x) is x
assert _sympify(f) is f
assert _sympify(1) == Integer(1)
assert _sympify(0.5) == Float("0.5")
assert _sympify(1 + 1j) == 1.0 + I*1.0
class A:
def _sympy_(self):
return Integer(5)
a = A()
assert _sympify(a) == Integer(5)
# negative _sympify
raises(SympifyError, lambda: _sympify('1'))
raises(SympifyError, lambda: _sympify([1, 2, 3]))
def test_sympifyit():
x = Symbol('x')
y = Symbol('y')
@_sympifyit('b', NotImplemented)
def add(a, b):
return a + b
assert add(x, 1) == x + 1
assert add(x, 0.5) == x + Float('0.5')
assert add(x, y) == x + y
assert add(x, '1') == NotImplemented
@_sympifyit('b')
def add_raises(a, b):
return a + b
assert add_raises(x, 1) == x + 1
assert add_raises(x, 0.5) == x + Float('0.5')
assert add_raises(x, y) == x + y
raises(SympifyError, lambda: add_raises(x, '1'))
def test_int_float():
class F1_1(object):
def __float__(self):
return 1.1
class F1_1b(object):
"""
This class is still a float, even though it also implements __int__().
"""
def __float__(self):
return 1.1
def __int__(self):
return 1
class F1_1c(object):
"""
This class is still a float, because it implements _sympy_()
"""
def __float__(self):
return 1.1
def __int__(self):
return 1
def _sympy_(self):
return Float(1.1)
class I5(object):
def __int__(self):
return 5
class I5b(object):
"""
This class implements both __int__() and __float__(), so it will be
treated as Float in SymPy. One could change this b |
from .ScatterplotStructure import ScatterplotStructure
from .BasicHTMLFromScatterplotStructure import BasicHTMLFromScatterplotSt | ructure
from scattertext.viz.PairPlotFromScattertextStructure import PairPlotFromScatterplotStructure
from .VizDataAdapter imp | ort VizDataAdapter
from .HTMLSemioticSquareViz import HTMLSemioticSquareViz |
'''
Creat | ed by auto_sdk on 2013.11.26
'''
from top.api.base import | RestApi
class PictureIsreferencedGetRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.picture_id = None
def getapiname(self):
return 'taobao.picture.isreferenced.get'
|
"""
Meteo | rology visualisation examples
======= | ===========================
"""
|
def test_assert():
assert 'soup' == 'soup'
def test_pass():
pass
def test_fail():
ass | ert False
test_fail.w | ill_fail = True
|
#! /usr/bin/env python
# -*- coding: UTF-8 -*-
# ---------------------------------------------------------------------------
# ___ __ ___ ___ ____ ____ __
# | \ | \ | | / | | | \ Automatic
# |__/ |__/ | | | | |__ |__ | | Conference
# | |\_ | | | | | | | Proceedings
# | | \ |___| \___ |___ |___ |__/ Generator
# ==========================================================
#
# http://www.lpl-aix.fr/~bigi/
#
# ---------------------------------------------------------------------------
# developed at:
#
# Laboratoire Parole et Langage
#
# Copyright (C) 2013-2014 Brigitte B | igi
#
# Use of this software is governed by the GPL, v3
# This banner notice must not be removed
# ---------------------------------------------------------------------------
#
# Proceed is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Proceed is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Proceed. If not, see <http://www.gnu.org/licenses/>.
#
# ---------------------------------------------------------------------------
__docformat__ = "epytext"
# ---------------------------------------------------------------------------
import sys
import os
import random
import tempfile
from datetime import date
# ---------------------------------------------------------------------------
class GenName():
"""
@authors: Brigitte Bigi
@contact: brigitte.bigi@gmail.com
@license: GPL
@summary: A class to generates a random file name of a non-existing file.
"""
def __init__(self,extension=""):
self.name = "/"
while (os.path.exists(self.name)==True):
self.set_name(extension)
def set_name(self, extension):
"""
Set a new file name.
"""
# random float value
randval = str(int(random.random()*10000))
# process pid
pid = str(os.getpid())
# today's date
today = str(date.today())
# filename
filename = "tmp_"+today+"_"+pid+"_"+randval
# final file name is path/filename
self.name = filename + extension
def get_name(self):
"""
Get the current file name.
"""
return str(self.name)
# ---------------------------------------------------------------------------
if __name__ == "__main__":
print GenName().get_name()
# ---------------------------------------------------------------------------
|
"""
POZ Development Application.
"""
import numpy as np
# import cv2
import pozutil as pu
import test_util as tpu
def perspective_test(_y, _z, _ele, _azi):
print "--------------------------------------"
print "Perspective Transform tests"
print
cam = pu.CameraHelper()
# some landmarks in a 3x3 grid pattern
p0 = np.float32([-1., _y - 1.0, _z])
p1 = np.float32([0., _y - 1.0, _z])
p2 = np.float32([1., _y - 1.0, _z])
p3 = np.float32([-1., _y + 1.0, _z])
p4 = np.float32([0., _y + 1.0, _z])
p5 = np.float32([1., _y + 1.0, _z])
p6 = np.float32([-1., _y, _z])
p7 = np.float32([0, _y, _z])
p8 = np.float32([1., _y, _z])
# 3x3 grid array
ppp = np.array([p0, p1, p2, p3, p4, p5, p6, p7, p8])
print "Here are some landmarks in world"
print ppp
puv_acc = []
quv_acc = []
for vp in ppp:
# original view of landmarks
u, v = cam.project_xyz_to_uv(vp)
puv_acc.append(np.float32([u, v]))
# rotated view of landmarks
xyz_r = pu.calc_xyz_after_rotation_deg(vp, _ele, _azi, 0)
u, v = cam.project_xyz_to_uv(xyz_r)
quv_acc.append(np.float32([u, v]))
puv = np.array(puv_acc)
quv = np.array(quv_acc)
# 4-pt "diamond" array
quv4 = np.array([quv[1], quv[4], quv[6], quv[8]])
puv4 = np.array([puv[1], puv[4], puv[6], puv[8]])
print
print "Landmark img coords before rotate:"
print puv
print "Landmark img coords after rotate:"
print quv
print quv4
print
# h, _ = cv2.findHomography(puv, quv)
# hh = cv2.getPerspectiveTransform(puv4, quv4)
# print h
# print hh
# perspectiveTransform needs an extra dimension
puv1 = np.expand_dims(puv, axis=0)
# print "Test perspectiveTransform with findHomography matrix:"
# xpersp = cv2.perspectiveTransform(puv1, h)
# print xpersp
# print "Test perspectiveTransform with getPerspectiveTransform matrix:"
# xpersp = cv2.perspectiveTransform(puv1, hh)
# print xpersp
# print
if __name__ == "__main__":
# robot always knows the Y and Elevation of its camera
# (arbitrary assignments for testing)
known_cam_y = -3.
known_cam_el = 0.0
tests = [(1., 1., tpu.lm_vis_1_1),
(7., 6., tpu.lm_vis_7_6)]
print "--------------------------------------"
print "Landmark Test"
print
test_index = 0
vis_map = tests[test_index][2]
# robot does not know its (X, Z) position
# it will have to solve for it
cam_x = tests[test_index][0]
cam_z = tests[test_index][1]
print "Known (X,Z): ", (cam_x, cam_z)
for key in sorted(vis_map.keys()):
cam_azim = vis_map[key].az + 0. # change offs | et for testing
cam_elev = vis_map[key].el + known_cam_el
| print "-----------"
# print "Known Camera Elev =", cam_elev
xyz = [cam_x, known_cam_y, cam_z]
angs = [cam_azim, cam_elev]
print "Landmark {:s}. Camera Azim = {:8.2f}".format(key, cam_azim)
lm1 = tpu.mark1[key]
f, x, z, a = tpu.landmark_test(lm1, tpu.mark2[key], xyz, angs)
print "Robot is at: {:6.3f},{:6.3f},{:20.14f}".format(x, z, a)
f, x, z, a = tpu.landmark_test(lm1, tpu.mark3[key], xyz, angs)
print "Robot is at: {:6.3f},{:6.3f},{:20.14f}".format(x, z, a)
tpu.pnp_test(key, xyz, angs)
|
#!/usr/bin/env python
from setuptools import setup, find_packages
setup(
name='pysensu-yelp',
| version='0.4.4',
provides=['pysensu_yelp'],
description='Emits Yelp-flavored Sensu events to | a Sensu Client',
url='https://github.com/Yelp/pysensu-yelp',
author='Yelp Operations Team',
author_email='operations@yelp.com',
packages=find_packages(exclude=['tests']),
install_requires=['six'],
license='Copyright Yelp 2014, all rights reserved',
)
|
# Peerz - P2P python library using ZeroMQ sockets and gevent
# Copyright (C) 2014-2015 Steve Henderson
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your op | tion) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PUR | POSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import time
from transitions import Machine
class MessageState(object):
states = ['initialised', 'waiting response', 'complete', 'timedout']
transitions = [
{'trigger': 'query', 'source': 'initialised', 'dest': 'waiting response', 'before': '_update', 'after': '_send_query'},
{'trigger': 'response', 'source': 'waiting response', 'dest': 'complete', 'before': '_update', 'after': '_completed'},
{'trigger': 'timeout', 'source': '*', 'dest': 'timedout', 'before': '_update', 'after': '_completed', },
]
def __init__(self, engine, txid, msg, callback=None, max_duration=5000, max_concurrency=3):
self.engine = engine
self.callback = callback
self.machine = Machine(model=self,
states=self.states,
transitions=self.transitions,
initial='initialised')
self.start = self.last_change = time.time() * 1000
self.max_duration = max_duration
self.max_concurrency = max_concurrency
self.txid = txid
self.times = {}
self.parse_message(msg)
self.query()
def query(self):
pass
def parse_message(self, msg):
self.val = msg.pop(0)
def is_complete(self):
return self.state in ['complete', 'timedout']
def pack_request(self):
return None
@staticmethod
def unpack_response(content):
return None
@staticmethod
def pack_response(content):
return None
def _update(self):
now = time.time() * 1000
self.times.setdefault(self.state, 0.0)
self.times[self.state] += (now - self.last_change)
self.last_change = now
def duration(self):
return time.time() * 1000 - self.start
def latency(self):
return self.times.setdefault('waiting response', 0.0)
def _send_query(self):
pass
def _completed(self):
pass
|
import hashlib
puzzle_input = 'iwrupvqb'
current = 0
done = False
while not done:
combined_input = puzzle_input + str(current)
solution = hashlib.md5(combined_input.encode())
solution = str(solution.hexdigest())
print(solution)
if solution.startswith('00000 | 0'):
| done = True
print(current)
current += 1
|
LinearSVC)
@pytest.mark.parametrize('ensemble', [True, False])
def test_calibration_cv_splitter(data, ensemble):
# Check when `cv` is a CV splitter
X, y = data
splits = 5
kfold = KFold(n_splits=splits)
calib_clf = CalibratedClassifierCV(cv=kfold, ensemble=ensemble)
assert isinstance(calib_clf.cv, KFold)
assert calib_clf.cv.n_splits == splits
calib_clf.fit(X, y)
expected_n_clf = splits if ensemble else 1
assert len(calib_clf.calibrated_classifiers_) == expected_n_clf
@pytest.mark.parametrize('method', ['sigmoid', 'isotonic'])
@pytest.mark.parametrize('ensemble', [True, False])
def test_sample_weight(data, method, ensemble):
n_samples = 100
X, y = data
sample_weight = np.random.RandomState(seed=42).uniform(size=len(y))
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_test = X[n_samples:]
base_estimator = LinearSVC(random_state=42)
calibrated_clf = CalibratedClassifierCV(
base_estimator, method=method, ensemble=ensemble
)
calibrated_clf.fit(X_train, y_train, sample_weight=sw_train)
probs_with_sw = calibrated_clf.predict_proba(X_test)
# As the weights are used for the calibration, they should still yield
# different predictions
calibrated_clf.fit(X_train, y_train)
probs_without_sw = calibrated_clf.predict_proba(X_test)
diff = np.linalg.norm(probs_with_sw - probs_without_sw)
assert diff > 0.1
@pytest.mark.parametrize('method', ['sigmoid', 'isotonic'])
@pytest.mark.parametrize('ensemble', [True, False])
def test_parallel_execution(data, method, ensemble):
"""Test parallel calibration"""
X, y = data
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
base_estimator = LinearSVC(random_state=42)
cal_clf_parallel = CalibratedClassifierCV(
base_estimator, method=method, n_jobs=2, ensemble=ensemble
)
cal_clf_parallel.fit(X_train, y_train)
probs_parallel = cal_clf_parallel.predict_proba(X_test)
cal_clf_sequential = CalibratedClassifierCV(
base_estimator, method=method, n_jobs=1, ensemble=ensemble
)
cal_clf_sequential.fit(X_train, y_train)
probs_sequential = cal_clf_sequential.predict_proba(X_test)
assert_allclose(probs_parallel, probs_sequential)
@pytest.mark.parametrize('method', ['sigmoid', 'isotonic'])
@pytest.mark.parametrize('ensemble', [True, False])
# increase the number of RNG seeds to assess the statistical stability of this
# test:
@pytest.mark.parametrize('seed', range(2))
def test_calibration_multiclass(method, ensemble, seed):
def multiclass_brier(y_true, proba_pred, n_classes):
Y_onehot = np.eye(n_classes)[y_true]
return np.sum((Y_onehot - proba_pred) ** 2) / Y_onehot.shape[0]
# Test calibration for multiclass with classifier that implements
# only decision function.
clf = LinearSVC(random_state=7)
X | , y = make_blobs(n_samples=500, n_features=100, random_state=seed,
centers=10, | cluster_std=15.0)
# Use an unbalanced dataset by collapsing 8 clusters into one class
# to make the naive calibration based on a softmax more unlikely
# to work.
y[y > 2] = 2
n_classes = np.unique(y).shape[0]
X_train, y_train = X[::2], y[::2]
X_test, y_test = X[1::2], y[1::2]
clf.fit(X_train, y_train)
cal_clf = CalibratedClassifierCV(
clf, method=method, cv=5, ensemble=ensemble
)
cal_clf.fit(X_train, y_train)
probas = cal_clf.predict_proba(X_test)
# Check probabilities sum to 1
assert_allclose(np.sum(probas, axis=1), np.ones(len(X_test)))
# Check that the dataset is not too trivial, otherwise it's hard
# to get interesting calibration data during the internal
# cross-validation loop.
assert 0.65 < clf.score(X_test, y_test) < 0.95
# Check that the accuracy of the calibrated model is never degraded
# too much compared to the original classifier.
assert cal_clf.score(X_test, y_test) > 0.95 * clf.score(X_test, y_test)
# Check that Brier loss of calibrated classifier is smaller than
# loss obtained by naively turning OvR decision function to
# probabilities via a softmax
uncalibrated_brier = \
multiclass_brier(y_test, softmax(clf.decision_function(X_test)),
n_classes=n_classes)
calibrated_brier = multiclass_brier(y_test, probas,
n_classes=n_classes)
assert calibrated_brier < 1.1 * uncalibrated_brier
# Test that calibration of a multiclass classifier decreases log-loss
# for RandomForestClassifier
clf = RandomForestClassifier(n_estimators=30, random_state=42)
clf.fit(X_train, y_train)
clf_probs = clf.predict_proba(X_test)
uncalibrated_brier = multiclass_brier(y_test, clf_probs,
n_classes=n_classes)
cal_clf = CalibratedClassifierCV(
clf, method=method, cv=5, ensemble=ensemble
)
cal_clf.fit(X_train, y_train)
cal_clf_probs = cal_clf.predict_proba(X_test)
calibrated_brier = multiclass_brier(y_test, cal_clf_probs,
n_classes=n_classes)
assert calibrated_brier < 1.1 * uncalibrated_brier
def test_calibration_zero_probability():
# Test an edge case where _CalibratedClassifier avoids numerical errors
# in the multiclass normalization step if all the calibrators output
# are zero all at once for a given sample and instead fallback to uniform
# probabilities.
class ZeroCalibrator():
# This function is called from _CalibratedClassifier.predict_proba.
def predict(self, X):
return np.zeros(X.shape[0])
X, y = make_blobs(n_samples=50, n_features=10, random_state=7,
centers=10, cluster_std=15.0)
clf = DummyClassifier().fit(X, y)
calibrator = ZeroCalibrator()
cal_clf = _CalibratedClassifier(
base_estimator=clf, calibrators=[calibrator], classes=clf.classes_)
probas = cal_clf.predict_proba(X)
# Check that all probabilities are uniformly 1. / clf.n_classes_
assert_allclose(probas, 1. / clf.n_classes_)
def test_calibration_prefit():
"""Test calibration for prefitted classifiers"""
n_samples = 50
X, y = make_classification(n_samples=3 * n_samples, n_features=6,
random_state=42)
sample_weight = np.random.RandomState(seed=42).uniform(size=y.size)
X -= X.min() # MultinomialNB only allows positive X
# split train and test
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_calib, y_calib, sw_calib = \
X[n_samples:2 * n_samples], y[n_samples:2 * n_samples], \
sample_weight[n_samples:2 * n_samples]
X_test, y_test = X[2 * n_samples:], y[2 * n_samples:]
# Naive-Bayes
clf = MultinomialNB()
# Check error if clf not prefit
unfit_clf = CalibratedClassifierCV(clf, cv="prefit")
with pytest.raises(NotFittedError):
unfit_clf.fit(X_calib, y_calib)
clf.fit(X_train, y_train, sw_train)
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
# Naive Bayes with calibration
for this_X_calib, this_X_test in [(X_calib, X_test),
(sparse.csr_matrix(X_calib),
sparse.csr_matrix(X_test))]:
for method in ['isotonic', 'sigmoid']:
cal_clf = CalibratedClassifierCV(clf, method=method, cv="prefit")
for sw in [sw_calib, None]:
cal_clf.fit(this_X_calib, y_calib, sample_weight=sw)
y_prob = cal_clf.predict_proba(this_X_test)
y_pred = cal_clf.predict(this_X_test)
prob_pos_cal_clf = y_prob[:, 1]
assert_array_equal(y_pred,
np.array([0, 1])[np.argmax(y_prob, axis=1)])
assert (brier_score_loss(y_test, prob_pos_clf) >
brier_score_loss(y_test, prob_pos_cal_clf))
@pytest.mark.parametrize('method', ['sigmoid', 'isotonic'])
def test_cali |
portError:
from md5 import md5
try:
import json
except ImportError:
import simplejson as json
from mwlib import filequeue, log, podclient, utils, wsgi, _version
# ==============================================================================
log = log.Log('mwlib.serve')
# ==============================================================================
def no_job_queue(job_type, collection_id, args):
"""Just spawn a new process for the given job"""
if os.name == 'nt':
kwargs = {}
else:
kwargs = {'close_fds': True}
try:
log.info('queueing %r' % args)
subprocess.Popen(args, **kwargs)
except OSError, exc:
raise RuntimeError('Could not execute command %r: %s' % (
args[0], exc,
))
# ==============================================================================
collection_id_rex = re.compile(r'^[a-z0-9]{16}$')
def make_collection_id(data):
sio = StringIO.StringIO()
for key in (
_version.version,
'metabook',
'base_url',
'script_extension',
'template_blacklist',
'template_exclusion_category',
'login_credentials',
):
sio.write(repr(data.get(key)))
return md5(sio.getvalue()).hexdigest()[:16]
# ==============================================================================
def json_response(fn):
"""Decorator wrapping result of decorated function in JSON response"""
def wrapper(*args, **kwargs):
result = fn(*args, **kwargs)
if isinstance(result, wsgi.Response):
return result
return wsgi.Response(
content=json.dumps(result),
headers={'Content-Type': 'application/json'},
)
return wrapper
# ==============================================================================
class Application(wsgi.Application):
metabook_filename = 'metabook.json'
error_filename = 'errors'
status_filename = 'status'
output_filename = 'output'
pid_filename = 'pid'
zip_filename = 'collection.zip'
mwpostlog_filename = 'mw-post.log'
mwziplog_filename = 'mw-zip.log'
mwrenderlog_filename = 'mw-render.log'
def __init__(self, cache_dir,
mwrender_cmd, mwrender_logfile,
mwzip_cmd, mwzip_logfile,
mwpost_cmd, mwpost_logfile,
queue_dir,
default_writer='rl',
report_from_mail=None,
report_recipients=None,
):
self.cache_dir = utils.ensure_dir(cache_dir)
self.mwrender_cmd = mwrender_cmd
self.mwrender_logfile = mwrender_logfile
self.mwzip_cmd = mwzip_cmd
self.mwzip_logfile = mwzip_logfile
self.mwpost_cmd = mwpost_cmd
self.mwpost_logfile = mwpost_logfile
if queue_dir:
self.queue_job = filequeue.FileJobQueuer(utils.ensure_dir(queue_dir))
else:
self.queue_job = no_job_queue
self.default_writer = default_writer
self.report_from_mail = report_from_mail
self.report_recipients = report_recipients
def dispatch(self, request):
try:
command = request.post_data['command']
except KeyError:
return self.error_response('no command given')
try:
method = getattr(self, 'do_%s' % command)
except AttributeError:
return self.error_response('invalid command %r' % command)
try:
return method(request.post_data)
except Exception, exc:
return self.error_response('error executing command %r: %s' % (
command, exc,
))
@json_response
def error_response(self, error):
if isinstance(error, str):
error = unicode(error, 'utf-8', 'ignore')
elif not isinstance(error, unicode):
error = unicode(repr(error), 'ascii')
self.send_report_mail('error response', error=error)
return {'error': error}
def send_report_mail(self, subject, **kwargs):
if not (self.report_from_mail and self.report_recipients):
return
utils.report(
system='mwlib.serve',
subject=subject,
from_email=self.report_from_mail,
mail_recipients=self.report_recipients,
write_file=False,
**kwargs
)
def get_collection_dir(self, collection_id):
return os.path.join(self.cache_dir, collection_id)
def check_collection_id(self, collection_id):
if not collection_id or not collection_id_rex.match(collection_id):
raise RuntimeError('invalid collection ID %r' % collection_id)
collection_dir = self.get_collection_dir(collection_id)
if not os.path.exists(collection_dir):
raise RuntimeError('no such collection: %r' % collection_id)
def new_collection(self, post_data):
collection_id = make_collection_id(post_data)
collection_dir = self.get_collection_dir(collection_id)
if not os.path.isdir(collection_dir):
log.info('Creating new collection dir %r' % collection_dir)
os.makedirs(collection_dir)
return collection_id
def get_path(self, collection_id, filename, ext=None):
p = os.path.join(self.get_collection_dir(collection_id), filename)
if ext is not None:
p += '.' + ext[:10]
return p
@json_response
def do_render(self, post_data):
metabook_data = post_data.get('metabook')
collection_id = post_data.get('collection_id')
if not (metabook_data or collection_id):
return self.error_response('POST argument metabook or collection_id required')
if metabook_data and collection_id:
return self.error_response('Specify either metabook or collection_id, not both')
try:
base_url = post_data['base_url']
writer = | post_data.get('writer', self.default_writer)
except KeyError, exc:
return self.error_response('POST argument required: %s' % exc)
writer_options = post_data.get('writer_options', '')
template_blacklist = post_data.get('template_blacklist', '')
template_exclusion_category = post_data.get('template_exclusion_category', '')
login_credentials = post_data.get('login_credentials', '')
force_render = bool(post_data.get('force_render'))
| script_extension = post_data.get('script_extension', '')
if not collection_id:
collection_id = self.new_collection(post_data)
log.info('render %s %s' % (collection_id, writer))
response = {
'collection_id': collection_id,
'writer': writer,
'is_cached': False,
}
pid_path = self.get_path(collection_id, self.pid_filename, writer)
if os.path.exists(pid_path):
log.info('mw-render already running for collection %r' % collection_id)
return response
output_path = self.get_path(collection_id, self.output_filename, writer)
if os.path.exists(output_path):
if force_render:
log.info('removing rendered file %r (forced rendering)' % output_path)
utils.safe_unlink(output_path)
else:
log.info('re-using rendered file %r' % output_path)
response['is_cached'] = True
return response
status_path = self.get_path(collection_id, self.status_filename, writer)
if os.path.exists(status_path):
if force_render:
log.info('removing status file %r (forced rendering)' % status_path)
utils.safe_unlink(status_path)
else:
log.info('status file exists %r' % status_path)
return response
error_path = self.get_path(collection_id, self.error_filename, writer)
if os.path.exists(error_path):
if force_render:
log.info('removing error file %r (forced rendering)' % error_path)
utils. |
#!/usr/bin/env python
"""
Simple-stupid time tracker script
=================================
Timetrack
opyright (C) 2010, Branko Vukelic <studio@brankovukelic.com>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import sys
import getopt
import os
import re
import sqlite3
HOME_DIR = os.path.expanduser('~')
DEFAULT_FILE = os.path.join(HOME_DIR, 'timesheet.db')
PID_RE = re.compile(r'^[A-Za-z]{3}$')
def optpair(opts):
""" Pair option switches and their own arguments """
optdict = {}
for sw, a in opts:
optdict[sw] = a
return optdict
def check_pid(pname):
""" Check project name, return true if it is correct """
if PID_RE.match(pname):
return True
return False
def generate_timestamp():
from datetime import datetime
timenow = datetime.now()
return (datetime.strftime(timenow, '%Y-%m-%d %H:%M:%S'), timenow)
def getduration(seconds):
seconds = int(seconds)
hours = seconds // 3600
seconds = seconds - hours * 3600
minutes = seconds // 60
seconds = seconds - minutes * 60
return (hours, minutes, seconds)
def get_pids(connection):
""" Get unique PIDs from database """
pids = []
c = connection.cursor()
c.execute("SELECT DISTINCT pid FROM timesheet ORDER BY pid ASC;")
for pid in c:
pids.append(pid[0])
c.close()
return pids
def get_times(connection, pidfilter):
""" Return a dictionary of PIDs with [job, time] pairs """
if pidfilter:
pids = [pidfilter]
else:
pids = get_pids(connection)
pid_times = {}
for pid in pids:
c = connection.cursor()
c.execute("SELECT desc, TOTAL(dur) FROM timesheet WHERE pid = ? GROUP BY desc;", (pid,))
results = []
for result in c:
results.append(result)
pid_times[pid] = results
c.close()
return pid_times
def read_stats(connection, pidfilter):
pid_times = get_times(connection, pidfilter)
if not pid_times:
print "No data in database. Exiting."
return True
for k in pid_times.keys():
print ""
print "=========================="
print "PID: %s" % k
print "=========================="
print ""
for j in pid_times[k]:
print "Job: %s" % j[0]
print "Time: %02d:%02d:%02d" % getduration(j[1])
print ""
print "=========================="
print ""
def export_tsv(connection, filename, pidfilter):
pid_times = get_times(connection, pidfilter)
if not pid_times:
print "No data in database. Exiting."
return True
f = open(filename, 'w')
# Write header
f.write('PID\tJob\tTime\n')
for k in pid_times.keys():
for j in pid_times[k]:
f.write('%s\t%s\t%s\n' % (k, j[0], j[1]))
f.close()
def clean_string(s):
""" Escapes characters in a string for SQL """
return s.replace(';', '\\;').replace('\'', '\\\'')
def add_data(connection, pidfilter):
""" Gives user a prompt and writes data to the fhandle file """
import readline
print "Press Ctrl+C to exit."
try:
while True:
pid = pidfilter
while not check_pid(pid):
pid = raw_input("PID: ")
if not check_pid(pid) | :
print "'%s' is not a valid pid, please use a 3 letter sequence" % pid
print "Project ID is %s" % pid
desc = raw_input("Job: ")
desc = clean_strin | g(desc)
if pid and desc:
timestamp, starttime = generate_timestamp()
print "Timer started at %s" % timestamp
raw_input("Press Enter to stop the timer or Ctrl+C to abort")
endtimestamp, endtime = generate_timestamp()
print "Timer stopped at %s" % endtimestamp
delta = endtime - starttime
dsecs = delta.seconds
print "Total duration was %s seconds" % dsecs
args = (timestamp, pid, desc, dsecs)
c = connection.cursor()
try:
c.execute("INSERT INTO timesheet (timestamp, pid, desc, dur) VALUES (?, ?, ?, ?)", args)
except:
connection.rollback()
print "DB error: Data was not written"
raise
else:
connection.commit()
c.close()
print "\n"
except KeyboardInterrupt:
connection.rollback()
def usage():
print """Timetrack
Copyright (c) 2010, Branko Vukelic
Released under GNU/GPL v3, see LICENSE file for details.
Usage: tt.py [-a] [-r] [-t FILE] [-p PID]
[--add] [--read] [--tsv FILE] [--pid PID] [dbfile]
-r --read : Display the stats.
-a --add : Start timer session (default action).
-t --tsv : Export into a tab-separated table (TSV). FILE is the filename to
use for exporting.
-p --pid : With argument 'PID' (3 letters, no numbers or non-alphanumeric
characters. Limits all operations to a single PID.
dbfile : Use this file as database, instead of default file. If the
specified file does not exist, it will be creadted.
More information at:
http://github.com/foxbunny/timetrack
"""
def main(argv):
try:
opts, args = getopt.getopt(argv, 'rat:p:', ['read', 'add', 'tsv=', 'pid='])
except getopt.GetoptError:
usage()
sys.exit(2)
optdict = optpair(opts)
statsfile = len(args) and args[0] or DEFAULT_FILE
print "Using stats file '%s'" % statsfile
pidfilter = optdict.get('-p', '') or optdict.get('--pid', '')
if pidfilter:
if check_pid(pidfilter):
print "Using project ID filter '%s'" % pidfilter
else:
print "Project ID filter '%s' is invalid and will be ignored." % pidfilter
print "Opening connection to database."
try:
connection = sqlite3.connect(statsfile)
except:
print "Database error. Exiting."
sys.exit(2)
print "Initialize table if none exists"
c = connection.cursor()
try:
c.execute("""CREATE TABLE IF NOT EXISTS timesheet (
id INTEGER PRIMARY KEY AUTOINCREMENT,
timestamp DATETIME DEFAULT (datetime('now')),
pid VARCHAR(3) NOT NULL,
desc VARCHAR(255) NOT NULL,
dur INTEGER NOT NULL);""")
except:
connection.rollback()
raise
else:
connection.commit()
c.close()
if ('-r' in optdict.keys()) or ('--read' in optdict.keys()):
read_stats(connection, pidfilter)
elif ('-t' in optdict.keys()) or ('--tsv' in optdict.keys()):
filename = optdict.get('-t', None) or optdict.get('--tsv')
export_tsv(connection, filename, pidfilter)
else:
add_data(connection, pidfilter)
print "Closing connection to database"
connection.close()
sys.exit(1)
if __name__ == '__main__':
main(sys.argv[1:])
|
from typing import Dict, Set
from django.db import transaction
from django.db import models
from django.utils import timezone
from data_refinery_common.models.models import Sample, Experiment, OriginalFile
class SurveyJob(models.Model):
"""Records information about a Surveyor Job."""
class Meta:
db_table = "survey_jobs"
source_type = | models.CharField(max_length=256)
success = models.NullBooleanField(n | ull=True)
no_retry = models.BooleanField(default=False)
nomad_job_id = models.CharField(max_length=256, null=True)
ram_amount = models.IntegerField(default=256)
# The start time of the job
start_time = models.DateTimeField(null=True)
# The end time of the job
end_time = models.DateTimeField(null=True)
# This field represents how many times this job has been
# retried. It starts at 0 and each time the job has to be retried
# it will be incremented.
num_retries = models.IntegerField(default=0)
# This field indicates whether or not this job has been retried
# already or not.
retried = models.BooleanField(default=False)
# This field allows jobs to specify why they failed.
failure_reason = models.TextField(null=True)
created_at = models.DateTimeField(editable=False, default=timezone.now)
last_modified = models.DateTimeField(default=timezone.now)
def save(self, *args, **kwargs):
""" On save, update timestamps """
current_time = timezone.now()
if not self.id:
self.created_at = current_time
self.last_modified = current_time
return super(SurveyJob, self).save(*args, **kwargs)
def get_properties(self) -> Dict:
""" Return all associated SurveyJobKeyValues as a dict"""
return {pair.key: pair.value for pair in self.surveyjobkeyvalue_set.all()}
def get_accession_code(self):
""" Return `experiment_accession_code`, the most important code."""
try:
kvp = self.surveyjobkeyvalue_set.get(key="experiment_accession_code")
return kvp.value
except:
return None
def __str__(self):
return "SurveyJob " + str(self.pk) + ": " + str(self.source_type)
class SurveyJobKeyValue(models.Model):
"""Tracks additional fields for SurveyJobs.
Useful for fields that would be sparsely populated if they were
their own columns. I.e. one source may have an extra field or two
that are worth tracking but are specific to that source.
"""
survey_job = models.ForeignKey(SurveyJob, on_delete=models.CASCADE)
key = models.CharField(max_length=256)
value = models.CharField(max_length=256)
class Meta:
db_table = "survey_job_key_values"
class ProcessorJob(models.Model):
"""Records information about running a processor."""
class Meta:
db_table = "processor_jobs"
# This field will contain an enumerated value specifying which
# processor pipeline was applied during the processor job.
pipeline_applied = models.CharField(max_length=256)
original_files = models.ManyToManyField('OriginalFile', through='ProcessorJobOriginalFileAssociation')
datasets = models.ManyToManyField('DataSet', through='ProcessorJobDataSetAssociation')
no_retry = models.BooleanField(default=False)
# Resources
ram_amount = models.IntegerField(default=2048)
volume_index = models.CharField(max_length=3, null=True)
# Tracking
start_time = models.DateTimeField(null=True)
end_time = models.DateTimeField(null=True)
success = models.NullBooleanField(null=True)
nomad_job_id = models.CharField(max_length=256, null=True)
# This field represents how many times this job has been
# retried. It starts at 0 and each time the job has to be retried
# it will be incremented.
num_retries = models.IntegerField(default=0)
# This field indicates whether or not this job has been retried
# already or not.
retried = models.BooleanField(default=False)
# This point of this field is to identify which worker ran the
# job. A few fields may actually be required or something other
# than just an id.
worker_id = models.CharField(max_length=256, null=True)
# This field corresponds to the version number of the
# data_refinery_workers project that was used to run the job.
worker_version = models.CharField(max_length=128, null=True)
# This field allows jobs to specify why they failed.
failure_reason = models.TextField(null=True)
# If the job is retried, this is the id of the new job
retried_job = models.ForeignKey('self', on_delete=models.PROTECT, null=True)
created_at = models.DateTimeField(editable=False, default=timezone.now)
last_modified = models.DateTimeField(default=timezone.now)
def get_samples(self) -> Set[Sample]:
samples = set()
for original_file in self.original_files.all():
for sample in original_file.samples.all():
samples.add(sample)
return samples
def save(self, *args, **kwargs):
""" On save, update timestamps """
current_time = timezone.now()
if not self.id:
self.created_at = current_time
self.last_modified = current_time
return super(ProcessorJob, self).save(*args, **kwargs)
def __str__(self):
return "ProcessorJob " + str(self.pk) + ": " + str(self.pipeline_applied)
class DownloaderJob(models.Model):
"""Records information about running a Downloader."""
class Meta:
db_table = "downloader_jobs"
# This field contains a string which corresponds to a valid
# Downloader Task. Valid values are enumerated in:
# data_refinery_common.job_lookup.Downloaders
downloader_task = models.CharField(max_length=256)
accession_code = models.CharField(max_length=256, blank=True, null=True)
no_retry = models.BooleanField(default=False)
original_files = models.ManyToManyField('OriginalFile', through='DownloaderJobOriginalFileAssociation')
# Tracking
start_time = models.DateTimeField(null=True)
end_time = models.DateTimeField(null=True)
success = models.NullBooleanField(null=True)
nomad_job_id = models.CharField(max_length=256, null=True)
# This field represents how many times this job has been
# retried. It starts at 0 and each time the job has to be retried
# it will be incremented.
num_retries = models.IntegerField(default=0)
# This field indicates whether or not this job has been retried
# already or not.
retried = models.BooleanField(default=False)
# This point of this field is to identify which worker ran the
# job. A few fields may actually be required or something other
# than just an id.
worker_id = models.CharField(max_length=256, null=True)
# This field corresponds to the version number of the
# data_refinery_workers project that was used to run the job.
worker_version = models.CharField(max_length=128, null=True)
# This field allows jobs to specify why they failed.
failure_reason = models.TextField(null=True)
# If the job is retried, this is the id of the new job
retried_job = models.ForeignKey('self', on_delete=models.PROTECT, null=True)
created_at = models.DateTimeField(editable=False, default=timezone.now)
last_modified = models.DateTimeField(default=timezone.now)
def get_samples(self) -> Set[Sample]:
samples = set()
for original_file in self.original_files.all():
for sample in original_file.samples.all():
samples.add(sample)
return samples
def save(self, *args, **kwargs):
""" On save, update timestamps """
current_time = timezone.now()
if not self.id:
self.created_at = current_time
self.last_modified = current_time
return super(DownloaderJob, self).save(*args, **kwargs)
def __str__(self):
return "DownloaderJob " + str(self.pk) + ": " + str(self.downloader_task)
|
import cp | p11_decltype
a = cpp11_decltype.A()
a.i = 5
if a.i != 5:
| raise RuntimeError, "Assignment to a.i failed."
a.j = 10
if a.j != 10:
raise RuntimeError, "Assignment to a.j failed."
b = a.foo(5)
if b != 10:
raise RuntimeError, "foo(5) should return 10."
b = a.foo(6)
if b != 0:
raise RuntimeError, "foo(6) should return 0."
|
import numpy as np
import cudarray as ca
from .base import PickleMixin
_FLT_MIN = np.finfo(ca.float_).tiny
class Loss(PickleMixin):
# abll: I suspect that this interface is not ideal. It would be more
# elegant if Loss only provided loss() and grad(). However, where should
# we place the logic from fprop()?
@classmethod
def from_any(cls, arg):
if isinstance(arg, Loss):
return arg
elif isinstance(arg, str):
if arg == 'softmaxce':
return SoftmaxCrossEntropy()
elif arg == 'bce':
return BinaryCrossEntropy()
elif arg == 'mse':
return MeanSquaredError()
raise ValueError('Invalid constructor arguments: %s' % arg)
def _setup(self, x_shape):
pass
def fprop(self, x):
return x
def loss(self, target, x):
""" Returns the loss calculated from the target and the input. """
raise NotImplementedError()
def grad(self, target, x):
""" Returns the input gradient. """
raise NotImplementedError()
def y_shape(self, x_shape):
return x_shape
class SoftmaxCrossEntropy(Loss):
"""
Softmax + cross entropy (aka. multinomial logistic loss)
"""
def __init__(self):
self.name = 'softmaxce'
self._tmp_x = None
self._tmp_y = None
self._tmp_target = None
self._tmp_one_hot = None
self.n_classes = None
def _setup(self, x_shape):
self.n_classes = x_shape[1]
def _softmax(self, x):
# caching wrapper
if self._tmp_x is not x:
self._tmp_y = ca.nnet.softmax(x)
self._tmp_x = x
return self._tmp_y
def _one_hot(self, target):
# caching wrapper
if self._tmp_target is not target:
self._tmp_one_hot = ca.nnet.one_hot_encode(target, self.n_classes)
self._tmp_target = target
return self._tmp_one_hot
def fprop(self, x):
return ca.nnet.one_hot_decode(self._softmax(x))
def loss(self, target, x):
y = self._softmax(x)
target = self._one_hot(target)
return ca.nnet.categorical_cross_entropy(y_pred=y, y_true=target)
def grad(self, target, x):
y = self._softmax(x)
target = self._one_hot(target)
return -(target - y)
def y_shape(self, x_shape):
return (x_shape[0],)
class BinaryCrossEntropy(Loss):
def __init__(self):
self.name = 'bce'
def loss(self, y, y_pred):
y_pred = ca.maximum(y_pred, _FLT_MIN)
return -ca.mean(y*ca.log(y_pred) + (1 - y)*ca.log(1 - y_pred), axis=1)
def grad(self, y, y_pred):
y_pred = ca.maximum(y_pred, _FLT_MIN)
return -(y/y_pred - (1-y)/(1-y_pred))
class MeanSquaredError(Loss):
| def __init__(self):
self.name = 'mse'
se | lf.n_targets = None
def _setup(self, x_shape):
self.n_targets = x_shape[1]
def loss(self, y, y_pred):
return ca.mean((y-y_pred)**2, axis=1)
def grad(self, y, y_pred):
return 2.0 / self.n_targets * (y_pred - y)
|
from rest_framew | ork import serializers
class BaseModelSerializer(serializers.ModelSerializer):
id = serializers.SerializerMethodField()
def get_id(self, instance):
return str(in | stance.id)
|
from website.addons.base.serializer import CitationsAddonSerializer
class MendeleySerializer(CitationsAddonSerializer):
addon_short_name = 'mendeley' | ||
def for_config(cls, config):
cachedir = cls.cache_dir_from_config(config)
if config.getoption("cacheclear") and cachedir.exists():
rmtree(cachedir, force=True)
cachedir.mkdir()
return cls(cachedir, config)
@staticmethod
def cache_dir_from_config(config):
return resolve_from_str(config.getini("cache_dir"), config.rootdir)
def warn(self, fmt, **args):
from _pytest.warnings import _issue_warning_captured
from _pytest.warning_types import PytestWarning
_issue_warning_captured(
PytestWarning(fmt.format(**args) if args else fmt),
self._config.hook,
stacklevel=3,
)
def makedir(self, name):
""" return a directory path object with the given name. If the
directory does not yet exist, it will be created. You can use it
to manage files likes e. g. store/retrieve database
dumps across test sessions.
:param name: must be a string not containing a ``/`` separator.
Make sure the name contains your plugin or application
identifiers to prevent clashes with other cache users.
"""
name = Path(name)
if len(name.parts) > 1:
raise ValueError("name is not allowed to contain path separators")
res = self._cachedir.joinpath("d", name)
res.mkdir(exist_ok=True, parents=True)
return py.path.local(res)
def _getvaluepath(self, key):
return self._cachedir.joinpath("v", Path(key))
def get(self, key, default):
""" return cached value for the given key. If no value
was yet cached or the value cannot be read, the specified
default is returned.
:param key: must be a ``/`` separated value. Usually the first
name is the name of your plugin or your application.
:param default: must be provided in case of a cache-miss or
invalid cache values.
"""
path = self._getvaluepath(key)
try:
with path.open("r") as f:
return json.load(f)
except (ValueError, IOError, OSError):
return default
def set(self, key, value):
""" save value for the given key.
:param key: must be a ``/`` separated value. Usually the first
name is the name of your plugin or your application.
:param value: must be of any combination of basic
python types, including nested types
like e. g. lists of dictionaries.
"""
path = self._getvaluepath(key)
try:
if path.parent.is_dir():
cache_dir_exists_already = True
else:
cache_dir_exists_already = self._cachedir.exists()
path.parent.mkdir(exist_ok=True, parents=True)
except (IOError, OSError):
self.warn("could not create cache path {path}", path=path)
return
try:
f = path.open("wb" if PY2 else "w")
except (IOError, OSError):
self.warn("cache could not write path {path}", path=path)
else:
with f:
json.dump(value, f, indent=2, sort_keys=True)
if not cache_dir_exists_already:
self._ensure_supporting_files()
def _ensure_supporting_files(self):
"""Create supporting files in the cache dir that are not really part of the cache."""
if self._cachedir.is_dir():
readme_path = self._cachedir / "README.md"
if not readme_path.is_file():
readme_path.write_text(README_CONTENT)
gitignore_path = self._cachedir.joinpath(".gitignore")
if not gitignore_path.is_file():
msg = u"# Created by pytest automatically.\n*"
gitignore_path.write_text(msg, encoding="UTF-8")
cachedir_tag_path = self._cachedir.joinpath("CACHEDIR.TAG")
if not cachedir_tag_path.is_file():
cachedir_tag_path.write_bytes(CACHEDIR_TAG_CONTENT)
class LFPlugin(object):
""" Plugin which implements the --lf (run last-failing) option """
def __init__(self, config):
self.config = config
active_keys = "lf", "failedfirst"
self.active = any(config.getoption(key) for key in active_keys)
self.lastfailed = config.cache.get("cache/lastfailed", {})
self._previously_failed_count = None
self._no_failures_behavior = self.config.getoption("last_failed_no_failures")
def pytest_report_collectionfinish(self):
if self.active and self.config.getoption("verbose") >= 0:
if not self._previously_failed_count:
return None
noun = "failure" if self._previously_failed_count == 1 else "failures"
suffix = " first" if self.config.getoption("failedfirst") else ""
mode = "rerun previous {count} {noun}{suffix}".format(
count=self._previously_failed_count, suffix=suffix, noun=noun
)
return "run-last-failure: %s" % mode
def pytest_runtest_logreport(self, report):
if (report.when == "call" and report.passed) or report.skipped:
self.lastfailed.pop(report.nodeid, None)
elif report.failed:
self.lastfailed[report.nodeid] = True
def pytest_collectreport(self, report):
passed = report.outcome in ("passed", "skipped")
if passed:
if report.nodeid in self.lastfailed:
self.lastfailed.pop(report.nodeid)
self.lastfailed.update((item.nodeid, True) for item in report.result)
else:
self.lastfailed[report.nodeid] = True
def pytest_collection_modifyitems(self, session, config, items):
if self.active:
if self.lastfailed:
previously_failed = []
previously_passed = []
for item in items:
if item.nodeid in self.lastfailed:
previously_failed.append(item)
else:
previously_passed.append(item)
self._previously_failed_count = len(previously_failed)
if not previously_failed:
# running a subset of all tests with recorded failures outside
# of the set of tests currently executing
return
if self.config.getoption("lf"):
items[:] = previously_failed
config.hook.pytest_deselected(items=previously_passed)
else:
items[:] = previously_failed + previously_passed
elif self._no_failures_behavior == "none":
config.hook.pytest_deselected(items=items)
items[:] = []
def pytest_sessionfinish(self, session):
config = self.config
if config.getoption("cacheshow") or hasattr(config, "slaveinput"):
return
saved_lastfailed = config.cache.get("cache/lastfailed", {})
if saved_lastfailed != self.lastfailed:
config.cache.set("cache/lastfailed", self.lastfailed)
class N | FPlugin(object):
""" Plugin which implements the --nf (run new-first) option """
def __init__(self, config):
self.config = config
self.active = config.option.newfirst
self.cached_nodeids = config.cache.get("cache/nodeids", [])
def pytest_collection_modifyitems(self, session, config, items):
if self.active:
new_items = OrderedDict()
other_items = OrderedDict()
for item in items:
| if item.nodeid not in self.cached_nodeids:
new_items[item.nodeid] = item
else:
other_items[item.nodeid] = item
items[:] = self._get_increasing_order(
six.itervalues(new_items)
) + self._get_increasing_order(six.itervalues(other_items))
self.cached_nodeids = [x.nodeid for x in items if isinstance(x, pytest.Item)]
def _get_increasing_order(self, items) |
import os, sys
import myfun
import numpy as np
import matplotlib as mpl
mpl.use('ps')
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from scipy import interpolate
import lagrangian_stats
import scipy.fftpack
## READ archive (too many points... somehow)
# args: name, dayi, dayf, days
#label = 'm_25_2_512'
label = 'm_25_1_particles'
dayi = 481 #10*24*2
dayf = 581 #10*24*4
days = 1
#label = sys.argv[1]
#basename = sys.argv[2]
#dayi = int(sys.argv[3])
#dayf = int(sys.argv[4])
#days = int(sys.argv[5])
path = './Velocity_CG/'
try: os.stat('./plot/'+label)
except OSError: os.mkdir('./plot/'+label)
# dimensions archives
# ML exp
Xlist = np.linspace(0,2000,161)
Ylist = np.linspace(0,2000,161)
Zlist = np.linspace(0,-50,51)
dl = [0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1]
Zlist = np.cumsum(dl)
xn = len(Xlist)
yn = len(Ylist)
zn = len(Zlist)
dx = np.diff(Xlist)
z = 1
for time in range(dayi,dayf,days):
print 'time:', time
tlabel = str(time)
while len(tlabel) < 3: tlabel = '0'+tlabel
#Velocity_CG_m_50_6e_9.csv
fileU = path+'Velocity_CG_0_'+label+'_'+str(time)+'.csv'
fileV = path+'Velocity_CG_1_'+label+'_'+str(time)+'.csv'
fileT = '../RST/Temperature_CG/Temperature_CG_'+label+'_'+str(time)+'.csv'
file1 = 'Divergence_'+label+'_'+str(time)
#
U = lagrangian_stats.read_Scalar(fileU,xn,yn,zn)
V = lagrangian_stats.read_Scalar(fileV,xn,yn,zn)
T = lagrangian_stats.read_Scalar(fileT,xn,yn,zn)
for k in range(0,len(Zlist),5):
dU = np.asarray(np.gradient(U[:,:,k]))
dV = np.asarray(np.gradient(V[:,:,k]))
Div = dU[0,:,:]/dx + dV[1,:,:]/dy
#
FT = np.zeros((xn/1,yn))
#
for j in range(len(Ylist)):
tempfft = scipy.fftpack.fft(Div[:,j]**2,xn)
FT[:,j] = abs(tempfft)**2
w = scipy.fftpack.fftfreq(xn, dx[1])
# w = scipy.fftpack.fftshift(w)
FTp = np.mean(FT,1)/xn
fig = plt.figure(figsize=(10,8))
p25, = plt.loglog(w[w>0], FTp[w>0],'r',linewidth=2)
plt.plot([5*10**-3, 5*10**-2],[5*10**3 , 5*10**-( -3+5/3.)],'k',linewidth=1.5)
plt.plot([5*10**-3, 5*10**-2],[5*10**3 , 5*10**-( -3+3.)],'k',linewidth=1.5)
plt.plot([5*10**-3, 5*10**-2],[5*10**3 , 5*10**-( -3+1.)],'k',linewidth=1.5)
plt.text(6*10**-2, 5*10 | **-( -3+5/3.), '-5/3',fontsize=18)
plt.text(6*10**-2, 5*10**-( -3+3.), '-3',fontsize=18)
plt.text(6*10**-2, 5*10**-( | -3+1.), '-1',fontsize=18)
plt.text(10**-3, 10**2,str(time*360./3600)+'hr',fontsize=18)
plt.xlabel(r'k $[m^{-1}]$',fontsize=20)
plt.ylabel(r'PSD',fontsize=20)
plt.yticks(fontsize=18)
plt.xticks(fontsize=18)
plt.xlim([1/2000.,1/10.])
plt.savefig('./plot/'+label+'/Divergence_'+str(z)+'_CG_'+label+'_'+tlabel+'_spec.eps',bbox_inches='tight')
print './plot/'+label+'/Divergence_'+str(z)+'_CG_'+label+'_'+tlabel+'_spec.eps'
plt.close()
#
v = np.linspace(0, 10, 10, endpoint=True)
vl = np.linspace(0, 10, 5, endpoint=True)
fig = plt.figure(figsize=(6,6))
fig.add_subplot(111,aspect='equal')
plt.contourf(Xlist/1000,Ylist/1000,T,v,extend='both',cmap=plt.cm.PiYG)
plt.colorbar(ticks=vl)
plt.title(str(np.round(10*(time*360./3600))/10.0)+'h')
plt.ylabel('Y [km]',fontsize=16)
plt.xlabel('X [km]',fontsize=16)
plt.savefig('./plot/'+label+'/Divergence_'+str(z)+'_CG_'+label+'_'+str(time)+'.eps',bbox_inches='tight')
print './plot/'+label+'/Divergence_'+str(z)+'_CG_'+label+'_'+str(time)+'.eps'
plt.close()
|
# -*- coding: utf-8 -*-
__versi | on__ = '0.7.0'
| |
not match.group(0).endswith('.history') and
not match.group(0).endswith('.partial')):
return True
return False
def decode_segment_name(path):
"""
Retrieve the timeline, log ID and segment ID
from the name of a xlog segment
It can handle either a full file path or a simple file name.
:param str path: the file name to decode
:rtype: list[int]
"""
name = os.path.basename(path)
match = _xlog_re.match(name)
if not match:
raise BadXlogSegmentName(name)
return [int(x, 16) if x else None for x in match.groups()]
def encode_segment_name(tli, log, seg):
"""
Build the xlog segment name based on timeline, log ID and segment ID
:param int tli: timeline number
:param int log: log number
:param int seg: segment number
:return str: segment file name
"""
return "%08X%08X%08X" % (tli, log, seg)
def encode_history_file_name(tli):
"""
Build the history file name based on timeline
:return str: history file name
"""
return "%08X.history" % (tli,)
def xlog_segments_per_file(xlog_segment_size):
"""
Given that WAL files are named using the following pattern:
<timeline_number><xlog_file_number><xlog_segment_number>
this is the number of XLOG segments in an XLOG file. By XLOG file
we don't mean an actual file on the filesystem, but the definition
used in the PostgreSQL sources: meaning a set of files containing the
same file number.
:param int xlog_segment_size: The XLOG segment size in bytes
:return int: The number of segments in an XLOG file
"""
return 0xffffffff // xlog_segment_size
def xlog_file_size(xlog_segment_size):
"""
Given that WAL files are named using the following pattern:
<timeline_number><xlog_file_number><xlog_segment_number>
this is the size in bytes of an XLOG file, which is composed on many
segments. See the documentation of `xlog_segments_per_file` for a
commentary on the definition of `XLOG` file.
:param int xlog_segment_size: The XLOG segment size in bytes
:return int: The size of an XLOG file
"""
return xlog_segment_size * xlog_segments_per_file(xlog_segment_size)
def generate_segment_names(begin, end=None, version=None,
xlog_segment_size=None):
"""
Generate a sequence of XLOG segments starting from ``begin``
If an ``end`` segment is provided the sequence will terminate after
returning it, otherwise the sequence will never terminate.
If the XLOG segment size is known, this generator is precise,
switching to the next file when required.
It the XLOG segment size is unknown, this generator will generate
all the possible XLOG file names.
The size of an XLOG segment can be every power of 2 between
the XLOG block size (8Kib) and the size of a log segment (4Gib)
:param str begin: begin segment name
:param str|None end: optional end segment name
:param int|None version: optional postgres version as an integer
(e.g. 90301 for 9.3.1)
:param int xlog_segment_size: the size of a XLOG segment
:rtype: collections.Iterable[str]
:raise: BadXlogSegmentName
"""
begin_tli, begin_log, begin_seg = decode_segment_name(begin)
end_tli, end_log, end_seg = None, None, None
if end:
end_tli, end_log, end_seg = decode_segment_name(end)
# this method doesn't support timeline changes
assert begin_tli == end_tli, (
"Begin segment (%s) and end segment (%s) "
"must have the same timeline part" % (begin, end))
# If version is less than 9.3 the last segment must be skipped
skip_last_segment = version is not None and version < 90300
# This is the number of XLOG segments in an XLOG file. By XLOG file
# we don't mean an actual file on the filesystem, but the definition
# used in the PostgreSQL sources: a set of files containing the
# same file number.
if xlog_segment_size:
# The generator is operating is precise and correct mode:
# knowing exactly when a switch to the next file is required
xlog_seg_per_file = xlog_segments_per_file(xlog_segment_size)
else:
# The generator is operating only in precise mode: generating every
# possible XLOG file name.
xlog_seg_per_file = 0x7ffff
# Start from the first xlog and generate the segments sequentially
# If ``end`` has been provided, the while condition ensure the termination
# otherwise this generator will never stop
cur_log, cur_seg = begin_log, begin_seg
while end is None or \
cur_log < end_log or \
(cur_log == end_log and cur_seg <= end_seg):
yield encode_segment_name(begin_tli, cur_log, cur_seg)
cur_seg += 1
if cur_seg > xlog_seg_per_file or (
skip_last_segment and cur_seg == xlog_seg_per_file):
cur_seg = 0
cur_log += 1
def hash_dir(path):
"""
Get the directory where the xlog segment will be stored
It can handle either a full file path or a simple file name.
:param str|unicode path: xlog file name
:return str: directory name
"""
tli, log, _ = decode_segment_name(path)
# tli is always not None
if log is not None:
return "%08X%08X" % (tli, log)
else:
return ''
def parse_lsn(lsn_string):
"""
Transform a string XLOG location, formatted as %X/%X, in the corresponding
numeric representation
:param str lsn_string: the string XLOG location, i.e. '2/82000168'
:rtype: | int
"""
lsn_list = lsn_string.split('/')
if len(lsn_list) != 2:
raise ValueError('Invalid LSN: %s', lsn_string)
return (int(lsn_list[0], 16) << 32) + int(lsn_list[1], 16)
def diff_lsn(lsn_string1, lsn_string2):
"""
Calculate the difference in bytes | between two string XLOG location,
formatted as %X/%X
Tis function is a Python implementation of
the ``pg_xlog_location_diff(str, str)`` PostgreSQL function.
:param str lsn_string1: the string XLOG location, i.e. '2/82000168'
:param str lsn_string2: the string XLOG location, i.e. '2/82000168'
:rtype: int
"""
# If one the input is None returns None
if lsn_string1 is None or lsn_string2 is None:
return None
return parse_lsn(lsn_string1) - parse_lsn(lsn_string2)
def format_lsn(lsn):
"""
Transform a numeric XLOG location, in the corresponding %X/%X string
representation
:param int lsn: numeric XLOG location
:rtype: str
"""
return "%X/%X" % (lsn >> 32, lsn & 0xFFFFFFFF)
def location_to_xlogfile_name_offset(location, timeline, xlog_segment_size):
"""
Convert transaction log location string to file_name and file_offset
This is a reimplementation of pg_xlogfile_name_offset PostgreSQL function
This method returns a dictionary containing the following data:
* file_name
* file_offset
:param str location: XLOG location
:param int timeline: timeline
:param int xlog_segment_size: the size of a XLOG segment
:rtype: dict
"""
lsn = parse_lsn(location)
log = lsn >> 32
seg = (lsn & xlog_file_size(xlog_segment_size)) >> 24
offset = lsn & 0xFFFFFF
return {
'file_name': encode_segment_name(timeline, log, seg),
'file_offset': offset,
}
def location_from_xlogfile_name_offset(file_name, file_offset):
"""
Convert file_name and file_offset to a transaction log location.
This is the inverted function of PostgreSQL's pg_xlogfile_name_offset
function.
:param str file_name: a WAL file name
:param int file_offset: a numeric offset
:rtype: str
"""
decoded_segment = decode_segment_name(file_name)
location = ((decoded_segment[1] << 32) +
(decoded_segment[2] << 24) +
file_offset)
return format_lsn(location)
def decode_history_file(wal_info, comp_manager):
"""
Read an history file and parse its contents.
Each line in the file represents a timeline switch, each field is
|
e | rsion
self.code = code
self.reason = reason
def __str_ | _(self):
return "%s %s %s" % (self.version, self.code, self.reason)
class HttpMessageWriter(object):
"""Writes an HTTP message to a socket.
"""
def __init__(self, sock, msg, write_timeout):
"""Initializes this class and writes an HTTP message to a socket.
@type sock: socket
@param sock: Socket to be written to
@type msg: http.HttpMessage
@param msg: HTTP message to be written
@type write_timeout: float
@param write_timeout: Write timeout for socket
"""
self._msg = msg
self._PrepareMessage()
buf = self._FormatMessage()
pos = 0
end = len(buf)
while pos < end:
# Send only SOCK_BUF_SIZE bytes at a time
data = buf[pos:(pos + SOCK_BUF_SIZE)]
sent = SocketOperation(sock, SOCKOP_SEND, data, write_timeout)
# Remove sent bytes
pos += sent
assert pos == end, "Message wasn't sent completely"
def _PrepareMessage(self):
"""Prepares the HTTP message by setting mandatory headers.
"""
# RFC2616, section 4.3: "The presence of a message-body in a request is
# signaled by the inclusion of a Content-Length or Transfer-Encoding header
# field in the request's message-headers."
if self._msg.body:
self._msg.headers[HTTP_CONTENT_LENGTH] = len(self._msg.body)
def _FormatMessage(self):
"""Serializes the HTTP message into a string.
"""
buf = StringIO()
# Add start line
buf.write(str(self._msg.start_line))
buf.write("\r\n")
# Add headers
if self._msg.start_line.version != HTTP_0_9:
for name, value in self._msg.headers.iteritems():
buf.write("%s: %s\r\n" % (name, value))
buf.write("\r\n")
# Add message body if needed
if self.HasMessageBody():
buf.write(self._msg.body)
elif self._msg.body:
logging.warning("Ignoring message body")
return buf.getvalue()
def HasMessageBody(self):
"""Checks whether the HTTP message contains a body.
Can be overridden by subclasses.
"""
return bool(self._msg.body)
class HttpMessageReader(object):
"""Reads HTTP message from socket.
"""
# Length limits
START_LINE_LENGTH_MAX = None
HEADER_LENGTH_MAX = None
# Parser state machine
PS_START_LINE = "start-line"
PS_HEADERS = "headers"
PS_BODY = "entity-body"
PS_COMPLETE = "complete"
def __init__(self, sock, msg, read_timeout):
"""Reads an HTTP message from a socket.
@type sock: socket
@param sock: Socket to be read from
@type msg: http.HttpMessage
@param msg: Object for the read message
@type read_timeout: float
@param read_timeout: Read timeout for socket
"""
self.sock = sock
self.msg = msg
self.start_line_buffer = None
self.header_buffer = StringIO()
self.body_buffer = StringIO()
self.parser_status = self.PS_START_LINE
self.content_length = None
self.peer_will_close = None
buf = ""
eof = False
while self.parser_status != self.PS_COMPLETE:
# TODO: Don't read more than necessary (Content-Length), otherwise
# data might be lost and/or an error could occur
data = SocketOperation(sock, SOCKOP_RECV, SOCK_BUF_SIZE, read_timeout)
if data:
buf += data
else:
eof = True
# Do some parsing and error checking while more data arrives
buf = self._ContinueParsing(buf, eof)
# Must be done only after the buffer has been evaluated
# TODO: Content-Length < len(data read) and connection closed
if (eof and
self.parser_status in (self.PS_START_LINE,
self.PS_HEADERS)):
raise HttpError("Connection closed prematurely")
# Parse rest
buf = self._ContinueParsing(buf, True)
assert self.parser_status == self.PS_COMPLETE
assert not buf, "Parser didn't read full response"
# Body is complete
msg.body = self.body_buffer.getvalue()
def _ContinueParsing(self, buf, eof):
"""Main function for HTTP message state machine.
@type buf: string
@param buf: Receive buffer
@type eof: bool
@param eof: Whether we've reached EOF on the socket
@rtype: string
@return: Updated receive buffer
"""
# TODO: Use offset instead of slicing when possible
if self.parser_status == self.PS_START_LINE:
# Expect start line
while True:
idx = buf.find("\r\n")
# RFC2616, section 4.1: "In the interest of robustness, servers SHOULD
# ignore any empty line(s) received where a Request-Line is expected.
# In other words, if the server is reading the protocol stream at the
# beginning of a message and receives a CRLF first, it should ignore
# the CRLF."
if idx == 0:
# TODO: Limit number of CRLFs/empty lines for safety?
buf = buf[2:]
continue
if idx > 0:
self.start_line_buffer = buf[:idx]
self._CheckStartLineLength(len(self.start_line_buffer))
# Remove status line, including CRLF
buf = buf[idx + 2:]
self.msg.start_line = self.ParseStartLine(self.start_line_buffer)
self.parser_status = self.PS_HEADERS
else:
# Check whether incoming data is getting too large, otherwise we just
# fill our read buffer.
self._CheckStartLineLength(len(buf))
break
# TODO: Handle messages without headers
if self.parser_status == self.PS_HEADERS:
# Wait for header end
idx = buf.find("\r\n\r\n")
if idx >= 0:
self.header_buffer.write(buf[:idx + 2])
self._CheckHeaderLength(self.header_buffer.tell())
# Remove headers, including CRLF
buf = buf[idx + 4:]
self._ParseHeaders()
self.parser_status = self.PS_BODY
else:
# Check whether incoming data is getting too large, otherwise we just
# fill our read buffer.
self._CheckHeaderLength(len(buf))
if self.parser_status == self.PS_BODY:
# TODO: Implement max size for body_buffer
self.body_buffer.write(buf)
buf = ""
# Check whether we've read everything
#
# RFC2616, section 4.4: "When a message-body is included with a message,
# the transfer-length of that body is determined by one of the following
# [...] 5. By the server closing the connection. (Closing the connection
# cannot be used to indicate the end of a request body, since that would
# leave no possibility for the server to send back a response.)"
#
# TODO: Error when buffer length > Content-Length header
if (eof or
self.content_length is None or
(self.content_length is not None and
self.body_buffer.tell() >= self.content_length)):
self.parser_status = self.PS_COMPLETE
return buf
def _CheckStartLineLength(self, length):
"""Limits the start line buffer size.
@type length: int
@param length: Buffer size
"""
if (self.START_LINE_LENGTH_MAX is not None and
length > self.START_LINE_LENGTH_MAX):
raise HttpError("Start line longer than %d chars" %
self.START_LINE_LENGTH_MAX)
def _CheckHeaderLength(self, length):
"""Limits the header buffer size.
@type length: int
@param length: Buffer size
"""
if (self.HEADER_LENGTH_MAX is not None and
length > self.HEADER_LENGTH_MAX):
raise HttpError("Headers longer than %d chars" % self.HEADER_LENGTH_MAX)
def ParseStartLine(self, start_line):
"""Parses the start line of a message.
Must be overridden by subclass.
@type start_line: string
@param start_line: Start line string
"""
raise NotImplementedError()
def _WillPeerCloseConnection(self):
"""Evaluate whether peer will close the connection.
@rtype: bool
@return: Whether peer will close the connection
"""
# RFC2616, section 14.10: "HTTP/1.1 defines the "close" connection option
# for the sender to signal that the connection will be closed after
# completion of the response. For example,
# |
# GemRB - Infinity Engine Emulator
# Copyright (C) 2003 The GemRB Project
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#
#character generation, class kit (GUICG22)
import GemRB
from GUIDefines import *
from ie_stats import *
import CharGenCommon
import GUICommon
import CommonTables
KitWindow = 0
TextAreaControl = 0
DoneButton = 0
SchoolList = 0
ClassID = 0
def OnLoad():
global KitWindow, TextAreaControl, DoneButton
global SchoolList, ClassID
if GUICommon.CloseOtherWindow(OnLoad):
if(KitWindow):
KitWindow.Unload()
KitWindow = None
return
GemRB.LoadWindowPack("GUICG", 640, 480)
RaceName = CommonTables.Races.GetRowName(GemRB.GetVar("Race")-1 )
Class = GemRB.GetVar("Class")-1
ClassName = CommonTables.Classes.GetRowName(Class)
ClassID = CommonTables.Classes.GetValue(Class, 5)
KitTable = GemRB.LoadTable("kittable")
KitTableName = KitTable.GetValue(ClassName, RaceName)
KitTable = GemRB.LoadTable(KitTableName,1 | )
SchoolList = GemRB.LoadTable("magesch")
#there is only a specialist mage window for bg1
KitWindow = GemRB.LoadWindow(12)
for i in range(8):
Button = KitWindow.GetControl(i+2)
Button.SetState(IE_GUI_BUTTON_DISABLED)
Button.SetFlags(IE_GUI_BUTTON_RADIOBUTTON, OP_OR)
if not KitTable:
RowCount = 1
else:
RowCount = KitTable.GetRowCount()
for i in range(RowCount):
Button = KitWindow.GetControl(i | +2)
if not KitTable:
if ClassID == 1:
Kit=GemRB.GetVar("MAGESCHOOL")
KitName = SchoolList.GetValue(i, 0)
else:
Kit = 0
KitName = CommonTables.Classes.GetValue(GemRB.GetVar("Class")-1, 0)
else:
Kit = KitTable.GetValue(i,0)
if ClassID == 1:
if Kit:
Kit = Kit - 21
KitName = SchoolList.GetValue(Kit, 0)
else:
if Kit:
KitName = CommonTables.KitList.GetValue(Kit, 1)
else:
KitName = CommonTables.Classes.GetValue(GemRB.GetVar("Class")-1, 0)
Button.SetState(IE_GUI_BUTTON_ENABLED)
Button.SetText(KitName)
Button.SetVarAssoc("Class Kit",Kit)
if i==0:
GemRB.SetVar("Class Kit",Kit)
Button.SetEvent(IE_GUI_BUTTON_ON_PRESS, KitPress)
BackButton = KitWindow.GetControl(12)
BackButton.SetText(15416)
DoneButton = KitWindow.GetControl(0)
DoneButton.SetText(11973)
DoneButton.SetFlags(IE_GUI_BUTTON_DEFAULT,OP_OR)
TextAreaControl = KitWindow.GetControl(11)
TextAreaControl.SetText(17247)
DoneButton.SetEvent(IE_GUI_BUTTON_ON_PRESS, NextPress)
BackButton.SetEvent(IE_GUI_BUTTON_ON_PRESS, CharGenCommon.BackPress)
#KitPress()
KitWindow.ShowModal(MODAL_SHADOW_NONE)
return
def KitPress():
Kit = GemRB.GetVar("Class Kit")
if Kit == 0:
KitName = CommonTables.Classes.GetValue(GemRB.GetVar("Class")-1, 1)
else:
if ClassID==1:
KitName = SchoolList.GetValue(Kit, 1)
else:
KitName = CommonTables.KitList.GetValue(Kit, 3)
TextAreaControl.SetText(KitName)
DoneButton.SetState(IE_GUI_BUTTON_ENABLED)
return
def NextPress():
#class
ClassIndex = GemRB.GetVar ("Class")-1
Class = CommonTables.Classes.GetValue (ClassIndex, 5)
MyChar = GemRB.GetVar ("Slot")
GemRB.SetPlayerStat (MyChar, IE_CLASS, Class)
KitIndex = GemRB.GetVar ("Class Kit")
if Class == 1:
GemRB.SetVar("MAGESCHOOL", KitIndex)
#the same as the unusable field
Kit = CommonTables.KitList.GetValue(KitIndex, 6)
GemRB.SetPlayerStat (MyChar, IE_KIT, Kit)
CharGenCommon.next()
|
""" Class to handle date-parsing and formatting """
# Workaround for http://bugs.python.org/issue8098
import _strptime # pylint: disable=unused-import
from | datetime import datetime
import time
class Date | Utils(object):
""" Class to handle date-parsing and formatting """
date_format = '%Y-%m-%dT%H:%M:%SZ'
json_date_format = '%Y-%m-%dT%H:%M:%S.%fZ'
kodi_date_format = '%Y-%m-%d %H:%M'
def get_str_date(self, date):
"""
Formats datetime to str of format %Y-%m-%dT%H:%M:%SZ
Arguments
date: datetime
"""
return datetime.strftime(date, self.date_format)
def parse_str_date(self, str_date):
"""
Parse a date of format %Y-%m-%dT%H:%M:%SZ to date
Arguments
str_date: str, %Y-%m-%dT%H:%M:%SZ
"""
return self._parse_str_date(str_date, self.date_format)
def _parse_str_date(self, str_date, date_format):
try:
return datetime.strptime(str_date, date_format)
except TypeError:
return datetime(*(time.strptime(str_date, date_format)[0:6]))
def parse_kodi_date(self, str_date):
if not str_date:
return None
return self._parse_str_date(str_date, '%Y-%m-%d %H:%M:%S')
def get_kodi_date_format(self, str_date):
"""
Returns a date on format %Y-%m-%dT%H:%M:%SZ as %Y-%m-%d %H:%M
"""
parsed_date = self._parse_str_date(str_date, self.json_date_format)
return datetime.strftime(parsed_date, '%Y-%m-%d %H:%M:%S')
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2011 Adriano Monteiro Marques
#
# Author: Piotrek Wasilewski <wasilewski.piotrek@gmail.com>
#
# This program is free | software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it w | ill be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.conf.urls.defaults import *
urlpatterns = patterns('netadmin.networks.views',
url(r'^host/(?P<object_id>\d+)/$',
'host_detail', name='host_detail'),
url(r'^host/list/$',
'host_list', name='host_list'),
url(r'^host/list/page/(?P<page>\d+)/$',
'host_list', name='host_list_page'),
url(r'^host/new/$',
'host_create', name="host_new"),
url(r'^host/edit/(?P<object_id>\d+)/$',
'host_update', name="host_update"),
url(r'^host/delete/(?P<object_id>\d+)/$',
'host_delete', name="host_delete"),
url(r'^network/(?P<object_id>\d+)/$',
'network_detail', name='network_detail'),
url(r'^network/list/$',
'network_list', name='network_list'),
url(r'^network/list/page/(?P<page>\d+)/$',
'network_list', name='network_list_page'),
url(r'^network/new/$',
'network_create', name="network_new"),
url(r'^network/edit/(?P<object_id>\d+)/$',
'network_update', name="network_update"),
url(r'^network/delete/(?P<object_id>\d+)/$',
'network_delete', name="network_delete"),
url(r'^network/events/(?P<object_id>\d+)/$',
'network_events', name='network_events'),
url(r'^network/netmask-create/$',
'subnet_network', name='subnet_network'),
url(r'/update/(?P<object_id>\d+)/$',
'network_select', name='network_select'),
url(r'share/list/(?P<object_type>host|network)/(?P<object_id>\d+)/',
'share_list', name="share_list"),
url(r'share/(?P<object_type>host|network)/(?P<object_id>\d+)/',
'share', name="share"),
url(r'share/not/(?P<object_type>host|network)/(?P<object_id>\d+)/(?P<user_id>\d+)/',
'share_not', name="share_not"),
url(r'share/edit/(?P<object_type>host|network)/(?P<object_id>\d+)/(?P<user_id>\d+)/',
'share_edit', name="share_edit"),
)
|
rayonnement[i]=RArray
return rayonnement
def convertGeoToAlt(dicoGeo):
def mean(values):
return np.nanmean(values)
Altitude={}
cstGravit=9.80665
footprint = np.array([[0,1,0],
[1,0,1],
[0,1,0]])
for i in dicoGeo:
mask=np.logical_not(dicoGeo[i] > 0).astype(int)
GeoArray=np.divide(dicoGeo[i],cstGravit)
np.putmask(GeoArray,mask,np.nan)
indices = np.where(np.isnan(GeoArray))
results = ndimage.generic_filter(GeoArray, mean, footprint=footprint)
for row, col in zip(*indices):
GeoArray[row,col] = results[row,col]
Altitude[i]=GeoArray
return Altitude
def computeDailyAccumulation(dicoBand,nbBandByDay,typeData):
accumulation={}
for i in range(0,len(dicoBand.keys())/nbBandByDay):
maxRange=nbBandByDay+i*nbBandByDay
#on ne prend pas la dernière bande... correspondante à 00-->3h
for j in range (i*nbBandByDay,maxRange):
if "array" in locals():
array=array+dicoBand.items()[j][1]
else:
array=dicoBand.items()[j][1]
accumulation[i]=array
del array
return accumulation
def computeDailyMean(dicoBand,nbBandByDay,typeData):
def meanCalc(values):
return np.nanmean(values)
mean={}
footprint = np.array([[0,1,0],
[1,0,1],
[0,1,0]])
for i in range(0,len(dicoBand.keys())/nbBandByDay):
maxRange=nbBandByDay+i*nbBandByDay
#on ne prend pas la dernière bande... correspondante à 00-->3h
for j in range (i*nbBandByDay,maxRange):
if "array" in locals():
array=array+dicoBand.items()[j][1]
np.putmask(dicoBand.items()[j][1], dicoBand.items()[j][1]==0, 0)
mask=mask+(dicoBand.items()[j][1] > 0).astype(int)
else:
array=dicoBand.items()[j][1]
np.putmask(dicoBand.items()[j][1], dicoBand.items()[j][1]==0, 0)
mask=(dicoBand.items()[j][1] > 0).astype(int)
mean[i]=array
del array
#utilisation de la fonction nanmean --> bcp plus simple
mean[i]=mean[i]/mask
indices = np.where(np.isnan(mean[i]))
results = ndimage.generic_filter(mean[i], meanCalc, footprint=footprint)
for row, col in zip(*indices):
mean[i][row,col] = results[row,col]
return mean
def computeDailyMax(dicoBand,nbBandByDay,typeData=None):
maxB={}
for i in range(0,len(dicoBand.keys())/nbBandByDay):
maxRange=nbBandByDay+i*nbBandByDay
#on ne prend pas la dernière bande... correspondante à 00-->3h si
for j in range (i*nbBandByDay,maxRange):
if "array" in locals():
array=np.fmax(array,dicoBand.items()[j][1])
else:
array=dicoBand.items()[j][1]
maxB[i]=array
del array
return maxB
def computeDailyMin(dicoBand,nbBandByDay,typeData=None):
minB={}
for i in range(0,len(dicoBand.keys())/nbBandByDay):
maxRange=nbBandByDay+i*nbBandByDay
#on ne prend pas la dernière bande... correspondante à 00-->3h
for j in range (i*nbBandByDay,maxRange):
np.putmask(dicoBand.items()[j][1],dicoBand.items()[j][1]==0,np.nan)
if "array" in locals():
array=np.fmin(array,dicoBand.items()[j][1])
else:
array=dicoBand.items()[j][1]
minB[i]=array
del array
return minB
def fusVentFromDict(dicToFus,nbBandByDay,zmesure=10):
""" Wind profile relationship [m.s-1]
Estimate wind speed at 2m
uz wind speed at height zmesure above ground surface
wind is the norm of U and V direction speed
"""
wind={}
keys=dicToFus.keys()
if (len(dicToFus)==2):
for i in dicToFus[keys[0]]:
#Math.log = ln
u=dicToFus[keys[0]][i]*4.87/math.log(67.8*zmesure-5.42);
v=dicToFus[keys[1]][i]*4.87/math.log(67.8*zmesure-5.42);
wind[i]=np.sqrt(pow(u,2)+pow(v,2))
return wind
def ComputeHumidityFromPT(pressureDico,TDico,DewDico):
""" Compute Humidity for each Band and each day based on pressure,Temperature and Dew Point"""
Humidity={}
for i in pressureDico:
Humidity[i]=esat(pressureDico[i],DewDico[i])/esat(pressureDico[i],TDico[i])*100
np.putmask(Humidity[i], pressureDico[i]==0, 0)
np.putmask(Humidity[i], DewDico[i]==0, 0)
np.putmask(Humidity[i], TDico[i]==0, 0)
return Humidity
def esat(pressure,T):
""" Compute partial presure depending on P and T
P(T)=0.61121*exp(17.502*T/(T+240.97))*(1.00072+pression*(3.2+0.00059*temperature²)/100000.0)
From Wexler and al. 1976
Pressure en hpa --> convert to kPa
T en °C
"""
pressure=pressure/10
d_es = 0.61121*np.exp(np.multiply(T,17.502)/(T+240.97))
d_f = 1.00072+pressure*(3.2+0.00059*pow(T,2))/100000.0
return d_es*d_f
def eocalc(T):
""" Saturation vapor pressure at the air temperature [KPa]
T en °C
"""
eo_calc=0.6108*np.exp(17.27*T/(T+237.3))
return eo_calc
def delta_calc(T):
# Slope of saturation vapour pressure curve at air temperature [kPa.°C-1]
# T air temperature in °C
# Equation 13 FAO
delta=4098*(0.6108*np.exp(17.27*T/(T+237.3)))/(T+237.3)**2;
return delta
def doy(datetoConvert,deltaDays):
deltaJ=timedelta(days=deltaDays)
datetoConvert=datetoConvert+deltaJ
J = datetoConvert.timetuple().tm_yday
return J
def getGeoTransform(pathToImg):
srcImage = gdal.Open(pathToImg)
geoTrans = srcImage.GetGeoTransform()
xOrigin = geoTrans[0]
yOrigin = geoTrans[3]
pixelWidth = geoTrans[1]
pixelHeight = geoTrans[5]
return (xOrigin,yOrigin,pixelWidth,pixelHeight)
def getProj(pathToShape):
driver = ogr.GetDriverByName('ESRI Shapefile')
dataSource = driver.Open(pathToShape, 0)
layer = dataSource.GetLayer()
srs = layer.GetSpatialRef()
return srs.ExportToProj4()
def getShape(pathToImg):
raster = gdal.Open(pathToImg)
transform = raster.GetGeoTransform()
pixelWidth = transform[1]
pixelHeight = transform[5]
return (pixelWidth,pixelHeight)
def getCentroidLatFromArray(shape,geotransform,grid):
lat = np.zeros(shape)
lon = np.zeros(shape)
originX = geotransform[0]
originY = geotransform[1]
for index in np.ndenumerate(lat):
lat.itemset(index[0], float(originX)+float(index[0][1])*float(grid)+(float(grid)/2))
lon.itemset(index[0], float(originY)-float(index[0][0])*float | (grid)-(float(grid)/2))
dicoLatLong={}
dicoLatLong[0]=lat
dicoLatLong[1]=lon
return dicoLatLong
def writeTiffFromDicoArray(DicoArray,outputImg,shape,geoparam,proj=None,format=gdal.GDT_Float32):
gdalFormat = 'GTiff'
driver = gdal.GetDriverByName(gdalFormat)
dst_ds = driver.Create(outputImg, shape[1], shape[0], len(DicoArray), format)
j=1
for i in DicoArray.values():
dst_ds.GetRasterBand(j).WriteArray(i, 0)
band = dst_ds.Get | RasterBand(j)
band.SetNoDataValue(0)
j+=1
originX = geoparam[0]
originY = geoparam[1]
pixelWidth = geoparam[2]
pixelHeight = geoparam[3]
dst_ds.SetGeoTransform((originX, pixelWidth, 0, originY, 0, pixelHeight))
def WriteTxtFileForEachPixel(outputFolder,et0_0,et0_1,et0_2,DateList,DoyList,Ray,RayShort,RayLong,Tmean,Tmax,Tmin,Hmean,Hmax,Hmin,vent,precipitation,pressure,Geo,latlon,projShape):
""" Write a Txtfile """
for i in range(0,et0_0[0].shape[0]):
for j in range(0,et0_0[0].shape[1]):
lat=latlon[0][i][j]
lon=latlon[1][i][j]
p1 = pp.Proj(projShape)
latP,lonP = p1(lat,lon)
numero = str(round(lat,2)).replace('.','')+str(round(l |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS | IS" BASIS, WITHOUT
# WARR | ANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from troveclient import client as tc
from troveclient.openstack.common.apiclient import exceptions
from heat.common import exception
from heat.common.i18n import _
from heat.engine.clients import client_plugin
from heat.engine import constraints
class TroveClientPlugin(client_plugin.ClientPlugin):
exceptions_module = exceptions
service_types = [DATABASE] = ['database']
def _create(self):
con = self.context
endpoint_type = self._get_client_option('trove', 'endpoint_type')
args = {
'service_type': self.DATABASE,
'auth_url': con.auth_url or '',
'proxy_token': con.auth_token,
'username': None,
'password': None,
'cacert': self._get_client_option('trove', 'ca_file'),
'insecure': self._get_client_option('trove', 'insecure'),
'endpoint_type': endpoint_type
}
client = tc.Client('1.0', **args)
management_url = self.url_for(service_type=self.DATABASE,
endpoint_type=endpoint_type)
client.client.auth_token = con.auth_token
client.client.management_url = management_url
return client
def validate_datastore(self, datastore_type, datastore_version,
ds_type_key, ds_version_key):
if datastore_type:
# get current active versions
allowed_versions = self.client().datastore_versions.list(
datastore_type)
allowed_version_names = [v.name for v in allowed_versions]
if datastore_version:
if datastore_version not in allowed_version_names:
msg = _("Datastore version %(dsversion)s "
"for datastore type %(dstype)s is not valid. "
"Allowed versions are %(allowed)s.") % {
'dstype': datastore_type,
'dsversion': datastore_version,
'allowed': ', '.join(allowed_version_names)}
raise exception.StackValidationFailed(message=msg)
else:
if len(allowed_versions) > 1:
msg = _("Multiple active datastore versions exist for "
"datastore type %(dstype)s. "
"Explicit datastore version must be provided. "
"Allowed versions are %(allowed)s.") % {
'dstype': datastore_type,
'allowed': ', '.join(allowed_version_names)}
raise exception.StackValidationFailed(message=msg)
else:
if datastore_version:
msg = _("Not allowed - %(dsver)s without %(dstype)s.") % {
'dsver': ds_version_key,
'dstype': ds_type_key}
raise exception.StackValidationFailed(message=msg)
def is_not_found(self, ex):
return isinstance(ex, exceptions.NotFound)
def is_over_limit(self, ex):
return isinstance(ex, exceptions.RequestEntityTooLarge)
def is_conflict(self, ex):
return isinstance(ex, exceptions.Conflict)
def get_flavor_id(self, flavor):
'''
Get the id for the specified flavor name.
If the specified value is flavor id, just return it.
:param flavor: the name of the flavor to find
:returns: the id of :flavor:
:raises: exception.FlavorMissing
'''
flavor_id = None
flavor_list = self.client().flavors.list()
for o in flavor_list:
if o.name == flavor:
flavor_id = o.id
break
if o.id == flavor:
flavor_id = o.id
break
if flavor_id is None:
raise exception.FlavorMissing(flavor_id=flavor)
return flavor_id
class FlavorConstraint(constraints.BaseCustomConstraint):
expected_exceptions = (exception.FlavorMissing,)
def validate_with_client(self, client, flavor):
client.client_plugin('trove').get_flavor_id(flavor)
|
# import python modules
import os
import time
import logging
import multiprocessing
# import django modules
# import third party modules
# import project specific model classes
from config.models import Origin
# import app specific utility classes
# import app specific utility functions
from .utils import packet_chunk
from .utils import run_capture
from .utils import read_pcap
def discovery_task(origin_uuid="",
offline=False,
interface="",
duration=0,
filepath="",
origin_description=""
):
logging.basicConfig(filename="/tmp/pythos_debug.log", level=logging.DEBUG)
m = multiprocessing.Manager()
packets = m.Queue()
multiprocessing.log_to_stderr(logging.INFO)
num_processes = os.cpu_count()
if not num_processes:
num_processes = 2
pool = multiprocessing.Pool(processes=num_processes, maxtasksperchild=1)
if offline:
current_origin = Origin.objects.create(name="PCAP " + filepath,
description=origin_description,
sensor_flag=True,
| plant_ | flag=False
)
discovery_process = multiprocessing.Process(target=read_pcap,
args=(filepath,
packets
)
)
logging.info("Starting to read pcap file: " + filepath)
else:
try:
current_origin = Origin.objects.get(uuid=origin_uuid)
except:
logging.error("Could not find specified origin: " + origin_uuid +
" Aborting."
)
return
discovery_process = multiprocessing.Process(target=run_capture,
args=(interface,
duration,
packets
)
)
logging.info("Starting live capture on: " + interface)
discovery_process.start()
logging.info("Starting " + str(num_processes) + " worker processes.")
while discovery_process.is_alive() or not packets.empty():
num_packets = packets.qsize()
chunk_size = max(num_packets//num_processes, 10000)
logging.debug(str(num_packets) + " packets in queue.")
if num_packets > chunk_size:
chunk = m.Queue()
for i in range(chunk_size):
chunk.put(packets.get())
logging.debug("Processing chunk with size: " + str(chunk_size))
pool.apply_async(packet_chunk, args=(chunk,
current_origin,
packets
)
)
elif not discovery_process.is_alive():
logging.debug("Processing last chunk.")
pool.apply(packet_chunk, args=(packets, current_origin, packets))
time.sleep(10)
pool.close()
pool.join()
if offline:
logging.info("Pcap " + filepath + " has been processed successfully.")
else:
logging.info("Live capture on " + interface + " has been completed.")
|
)
return status
def addOneElement(sharedList):
status = getOneStatusTS()
ps = ParserStatus(status)
tweet = Tweet(ps.getTweet())
sharedList.addElement(tweet)
def addManyElements(sharedList, randomElements):
status = getOneStatusTS()
localList = []
for i in range(0, randomElements):
ps = ParserStatus(status)
tweet = Tweet(ps.getTweet())
user = User(ps.getUser())
localList.append(tweet)
localList.append(user)
sharedList.addManyElements(localList)
def oneThread(barrier, fun, *args):
fun(*args)
barrier.wait()
def oneThreadUpSync(barrier, fun, *args):
barrier.wait()
fun(*args)
def oneThreadDoubleSync(barrier1, barrier2, fun, *args):
barrier1.wait()
fun(*args)
barrier2.wait()
def createData(base):
status = getOneStatusTS()
randomTweets = base + randomInteger(99) + 1
tweetList = ObjectList()
userList = ObjectList()
streamingList = ObjectList()
searchList = ObjectList()
for i in range(base, randomTweets):
status["id"] = i
status["user"]["id"] = i
ps = ParserStatus(status)
tweet = Tweet(ps.getTweet())
user = User(ps.getUser())
tweetList.append(tweet)
userList.append(user)
streamingList.append(TweetStreaming(randomTweetStreaming(i, 1)))
searchList.append(TweetSearch(randomTweetSearch(i, 1)))
return tweetList, userList, streamingList, searchList
sharedListDataLock = Lock()
sharedListData = []
idNumber = 0
def fakeClient(host, port):
global idNumber
global sharedListDataLock
global sharedListData
sharedListDataLock.acquire()
try:
[tweetList, userList, streamingList, searchList] = createData(idNumber)
idNumber += len(tweetList.list)
finally:
sharedListDataLock.release()
bc = BufferCommunicator(host, port)
bc.sendData(tweetList, userList, streamingList, searchList)
sharedListDataLock.acquire()
try:
sharedListData.append(tweetList)
sharedListData.append(userList)
sharedListData.append(streamingList)
sharedListData.append(searchList)
finally:
sharedListDataLock.release()
"""
class TestSharedElementList(unittest.TestCase):
def setUp(self):
self.sharedList = SharedElementList()
def test_addElement(self):
addOneElement(self.sharedList)
self.assertEqual(len(self.sharedList.elementList), 1)
def test_addManyElements(self):
randomElements = randomInteger(100)
addManyElements(self.sharedList, randomElements)
self.assertEqual(len(self.sharedList.elementList), randomElements*2)
def test_addTwoThreads(self):
barrier = Barrier(2)
thread.start_new_thread(oneThread, (barrier, addOneElement, self.sharedList,))
addOneElement(self.sharedList)
barrier.wait()
self.assertEqual(len(self.sharedList.elementList), 2)
def test_addTwoThreadsManyElements(self):
barrier = Barrier(2)
randomElements = randomInteger(100)
thread.start_new_thread(oneThread, (barrier, addManyElements, self.sharedList,randomElements,))
addManyElements(self.sharedList, randomElements)
barrier.wait()
totalElements = randomElements*2*2
self.assertEqual(len(self.sharedList.elementList), totalElements)
def test_addManyThreadsManyElements(self):
randomThreads = randomInteger(8) + 2 #Always graeter or equal than 2
barrier = Barrier(randomThreads + 1)# Include main thread
randomElements = randomInteger(100)
for i in range(0, randomThreads):
thread.start_new_thread(oneThread, (barrier, addManyElements, self.sharedList, randomElements,))
barrier.wait()
totalElements = randomElements*randomThreads*2
self.assertEqual(len(self.sharedList.elementList), totalElements)
def test_addGetAllElementsAndClean(self):
randomElements = randomInteger(100)
addManyElements(self.sharedList, randomElements)
copyElementList = self.sharedList.getAllElementsAndClean()
self.assertEqual(len(self.sharedList.elementList), 0)
self.assertEqual(len(copyElementList), randomElements*2)
def test_addGetAllElementsAndCleanWhileAdding(self):
barrier1 = Barrier(2)
barrier2 = Barrier(2)
randomElements = randomInteger(100)
thread.start_new_thread(oneThreadDoubleSync, (barrier1, barrier2, addManyElements, self.sharedList,randomElements,))
barrier1.wait()
copyElementList = self.sharedList.getAllElementsAndClean()
barrier2.wait()
totalElements = len(copyElementList) + len(self.sharedList.elementList)
self.assertEqual(randomElements*2, totalElements)
"""
def countData(lb, tweetList, userList, tweetStreamingList, tweetSearchList):
# count originals
for tweet in tweetList.list:
try:
lb.addTweet(tweet)
except:
continue
for user in userList.list:
try:
lb.addUser(user)
except:
continue
for tweetStreaming in tweetStreamingList.list:
try:
lb.addTweetStreaming(tweetStreaming)
except:
continue
for tweetSearch in tweetSearchList.list:
try:
lb.addTweetSearch(tweetSearch)
except:
continue
return lb
class TestServer(unittest.TestCase):
def setUp(self):
global sharedListData
sharedListData = []
def test_serverOneClient(self):
global sharedListData
# Create event
stopEvent = Event()
# Create server barrier
sBarrier = Barrier(2)
# Create server
bs = BufferServer(13001, 5, stopEvent, sBarrier, 5, 5, "http://localhost:8000", "quiltro", "perroCallejero")
streamingList = ObjectList()
streamingList.appen | d(Streaming(randomStreaming(1)))
bs.communicator.service.postStreamings(streamingList)
searchList = ObjectList()
searchList.append(Search(randomSearch(1)) | )
bs.communicator.service.postSearches(searchList)
bs.start()
# Create barrier for client
cBarrier = Barrier(2)
# Create client
thread.start_new_thread(oneThread, (cBarrier, fakeClient, bs.getHostName(), 13001,))
cBarrier.wait()
time.sleep(5)
# Stop server
stopEvent.set()
# Wait for server
sBarrier.wait()
time.sleep(5)
# Get data and compare
numberTweets = len(bs.globalBuffer.localBuffer.tweetList.list)
numberUsers = len(bs.globalBuffer.localBuffer.userList.list)
numberTweetStreaming = len(bs.globalBuffer.localBuffer.tweetStreamingList.list)
numberTweetSearch = len(bs.globalBuffer.localBuffer.tweetSearchList.list)
self.assertEqual(numberTweets, 0)
self.assertEqual(numberUsers, 0)
self.assertEqual(numberTweetStreaming, 0)
self.assertEqual(numberTweetSearch, 0)
# count originals
lb = LocalBuffer()
lb = countData(lb, sharedListData[0], sharedListData[1], sharedListData[2]
, sharedListData[3])
originalNumberTweets = len(lb.tweetList.list)
originalNumberUsers = len(lb.userList.list)
originalNumberTweetStreaming = len(lb.tweetStreamingList.list)
originalNumberTweetSearch = len(lb.tweetSearchList.list)
self.assertEqual(originalNumberTweets, bs.communicator.sentTweets)
self.assertEqual(originalNumberUsers, bs.communicator.sentUsers)
self.assertEqual(originalNumberTweetStreaming, bs.communicator.sentTweetStreamings)
self.assertEqual(originalNumberTweetSearch, bs.communicator.sentTweetSearches)
def test_serverFiveOrLessClient(self):
global sharedListData
# Create stop event
stopEvent = Event()
# Create server barrier
sBarrier = Barrier(2)
# Create server
bs = BufferServer(13001, 5, stopEvent, sBarrier, 5, 5, "http://localhost:8000", "quiltro", "perroCallejero")
streamingList = ObjectList()
streamingList |
import sys
from django.core.management.base import BaseCommand
from ietf.community.constants import SIGNIFICANT_STATES
from ietf.community.models import DocumentChangeDates
from ietf.doc.models import Document
class Command(BaseCommand):
help = (u"Update drafts in community lists by reviewing their rules")
def handle(self, *args, **options):
documents = Document.objects.filter(type='draft')
index = 1
total = documents.count()
for doc in documents.iterator():
(changes, created) = DocumentChangeDates.objects.get_or_create(document=doc)
new_version = doc.latest_event(type='new_revision')
normal_change = doc.latest_event()
significant_change = None
for event in doc.docevent_set.filter(type='changed_document'):
for state in SIGNIFICANT_STATES:
if ('<b>%s</b>' % state) in event.desc:
significant_change = event
break
changes.new_version_ | date = new_version and new_version.time.date()
changes.normal_change_date = normal_change and normal_change.time.date()
changes.significant_change_date = significant_change and sig | nificant_change.time.date()
changes.save()
sys.stdout.write('Document %s/%s\r' % (index, total))
sys.stdout.flush()
index += 1
print
|
# -*- coding: ISO-8859-15 -*-
# =============================================================================
# Copyright (c) 2004, 2006 Sean C. Gillies
# Copyright (c) 2009 STFC <http://www.stfc.ac.uk>
#
# Authors :
# Domi | nic Lowe <dominic.lowe@stfc.ac.uk>
#
# Contact email: dominic.lowe@stfc.ac.uk
# =================== | ==========================================================
"""
Web Feature Server (WFS) methods and metadata. Factory function.
"""
from feature import wfs100, wfs200
def WebFeatureService(url, version='1.0.0', xml=None):
''' wfs factory function, returns a version specific WebFeatureService object '''
if version in ['1.0', '1.0.0']:
return wfs100.WebFeatureService_1_0_0.__new__(wfs100.WebFeatureService_1_0_0, url, version, xml)
elif version in ['2.0', '2.0.0']:
return wfs200.WebFeatureService_2_0_0.__new__(wfs200.WebFeatureService_2_0_0, url, version, xml)
|
#!/usr/bin/env python
# -*- coding: utf | -8 -*-
import os
import sys
if __name__ == "__main__":
settings_name = "settings.local" if os.name == 'nt' else "settings.remote"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", settings_name)
from django.core.management im | port execute_from_command_line
execute_from_command_line(sys.argv)
|
ypes (b,i,l,f,s,u,bb,d,dt,td,t,st) values (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)", [None] * 12)
c.execute("select b,i,l,f,s,u,bb,d,dt,td,t,st from test_datatypes")
r = c.fetchone()
self.assertEqual(tuple([None] * 12), r)
c.execute("delete from test_datatypes")
# check sequence type
c.execute("insert into test_datatypes (i, l) values (2,4), (6,8), (10,12)")
c.execute("select l from test_datatypes where i in %s order by i", ((2,6),))
r = c.fetchall()
self.assertEqual(((4,),(8,)), r)
finally:
c.execute("drop table test_datatypes")
def test_dict(self):
""" test dict escaping """
conn = self.connections[0]
c = conn.cursor()
c.execute("create table test_dict (a integer, b integer, c integer)")
try:
c.execute("insert into test_dict (a,b,c) values (%(a)s, %(b)s, %(c)s)", {"a":1,"b":2,"c":3})
c.execute("select a,b,c from test_dict")
self.assertEqual((1,2,3), c.fetchone())
finally:
c.execute("drop table test_dict")
def test_string(self):
conn = self.connections[0]
c = conn.cursor()
c.execute("create table test_dict (a text)")
test_value = "I am a test string"
try:
c.execute("insert into test_dict (a) values (%s)", test_value)
c.execute("select a from test_dict")
self.assertEqual((test_value,), c.fetchone())
finally:
c.execute("drop table test_dict")
def test_integer(self):
conn = self.connections[0]
c = conn.cursor()
c.execute("create table test_dict (a integer)")
test_value = 12345
try:
c.execute("insert into test_dict (a) values (%s)", test_value)
c.execute("select a from test_dict")
self.assertEqual((test_value,), c.fetchone())
finally:
c.execute("drop table test_dict")
def test_big_blob(self):
""" test tons of data """
conn = self.connections[0]
c = conn.cursor()
c.execute("create table test_big_blob (b blob)")
try:
data = "pymysql" * 1024
c.execute("insert into test_big_blob (b) values (%s)", (data,))
c.execute("select b from test_big_blob")
self.assertEqual(data.encode(conn.charset), c.fetchone()[0])
finally:
c.execute("drop table test_big_blob")
def test_untyped(self):
""" test conversion of null, empty string """
conn = self.connections[0]
c = conn.cursor()
c.execute("select null,''")
self.assertEqual((None,u''), c.fetchone())
c.execute("select '',null")
self.assertEqual((u'',None), c.fetchone())
def test_timedelta(self):
""" test timedelta conversion """
conn = self.connections[0]
c = conn.cursor()
c.execute("select time('12:30'), time('23:12:59'), time('23:12:59.05100')")
self.assertEqual((datetime.timedelta(0, 45000),
datetime.timedelta(0, 83579),
datetime.timedelta(0, 83579, 51000)),
c.fetchone())
def test_datetime(self):
""" test datetime conversion """
conn = self.connections[0]
c = conn.cursor()
dt = datetime.datetime(2013,11,12,9,9,9,123450)
try:
c.execute("create table test_datetime (id int, ts datetime(6))")
c.execute("insert into test_datetime values (1,'2013-11-12 09:09:09.12345')")
c.execute("select ts from test_datetime")
self.assertEqual((dt,),c.fetchone())
except ProgrammingError:
# User is running a version of MySQL that doesn't support msecs within datetime
pass
finally:
c.execute("drop table if exists test_datetime")
class TestCursor(base.PyMySQLTestCase):
# this test case does not work quite right yet, however,
# we substitute in None for the erroneous field which is
# compatible with the DB-API 2.0 spec and has not broken
# any unit tests for anything we've tried.
#def test_description(self):
# """ test description attribute """
# # result is from MySQLdb module
# r = (('Host', 254, 11, 60, 60, 0, 0),
# ('User', 254, 16, 16, 16, 0, 0),
# ('Password', 254, 41, 41, 41, 0, 0),
# ('Select_priv', 254, 1, 1, 1, 0, 0),
# ('Insert_priv', 254, 1, 1, 1, 0, 0),
# ('Update_priv', 254, 1, 1, 1, 0, 0),
# ('Delete_priv', 254, 1, 1, 1, 0, 0),
# ('Create_priv', 254, 1, 1, 1, 0, 0),
# ('Drop_priv', 254, 1, 1, 1, 0, 0),
# ('Reload_priv', 254, 1, 1, 1, 0, 0),
# ('Shutdown_priv', 254, 1, 1, 1, 0, 0),
# ('Process_priv', 254, 1, 1, 1, 0, 0),
# ('File_priv', 254, 1, 1, 1, 0, 0),
# ('Grant_priv', 254, 1, 1, 1, 0, 0),
# ('References_priv', 254, 1, 1, 1, 0, 0),
# ('Index_priv', 254, 1, 1, 1, 0, 0),
# ('Alter_priv', 254, 1, 1, 1, 0, 0),
# ('Show_db_priv', 254, 1, 1, 1, 0, 0),
# ('Super_priv', 254, 1, 1, 1, 0, 0),
# ('Create_tmp_table_priv', 254, 1, 1, 1, 0, 0),
# ('Lock_tables_priv', 254, 1, 1, 1, 0, 0),
# ('Execute_priv', 254, 1, 1, 1, 0, 0),
# ('Repl_slave_priv', 254, 1, 1, 1, 0, 0),
# ('Repl_client_priv', 254, 1, 1, 1, 0, 0),
# ('Create_view_priv', 254, 1, 1, 1, 0, 0),
# ('Show_view_priv', 254, 1, 1, 1, 0, 0),
# ('Create_routine_priv', 254, 1, 1, 1, 0, 0),
# ('Alter_routine_priv', 254, 1, 1, 1, 0, 0),
# ('Create_user_priv', 254, 1, 1, 1, 0, 0),
# ('Event_priv', 254, 1, 1, 1, 0, 0),
# ('Trigger_priv', 254, 1, 1, 1, 0, 0),
# ('ssl_type', 254, 0, 9, 9, 0, 0),
# ('ssl_cipher', 252, 0, 65535, 65535, 0, 0),
# ('x509_issuer', 252, 0, 65535, 65535, 0, 0),
# ('x509_subject', 252, 0, 65535, 65535, 0, 0),
# ('max_questions', 3, 1, 11, 11, 0, 0),
# ('max_updates', 3, 1, 11, 11, 0, 0),
# ('max_connections', 3, 1, 11, 11, 0, 0),
# ('max_user_connections', 3, 1, 11, 11, 0, 0))
# conn = self.connections[0]
# c = conn.cursor()
# c.execute("select * from mysql.user")
#
# self.assertEqual(r, c.description)
def test_fetch_no_result(self):
""" test a fetchone() with no rows """
conn = self.connections[0]
c = conn.cursor()
c.execute("create table test_nr (b varchar(32))")
try:
data = "pymysql"
c.execute("insert into test_nr (b) values (%s)", (data,))
self.assertEqual(None, c.fetchone())
finally:
c.execute("drop table test_nr")
def test_aggregates(self):
""" test aggregate functions """
conn = self.connections[0]
c = conn.cursor()
try:
c.execute('create table test_aggregates (i integer)')
for i in range(0, 10):
c.execute('insert into test_aggregates (i) values (%s)', (i,))
c.execute('select sum(i) from test_aggregates')
r, = c.fetchone()
self.assertEqual(sum(range(0,10)), r)
finally:
c.execute('drop table test_aggregates')
def test_single_tuple(self):
""" test a single tuple """
conn = self.connections[0]
c = conn.cursor()
try:
c.execute("create table mystuff (id integer primary key)")
c.execute("insert into mystuff (id) values (1)")
c.execute("insert into mystuff (id) values (2)")
c.execute("select id from mystuff where id in %s", ((1,),))
self.assertEqual([(1,)], list(c.fetchall()))
finally:
c.execute("drop table mystuff")
class TestBulkInserts(base.PyMySQLTe | stCase | ):
cursor_type = pymysql.cursors.DictCursor
def setUp |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.