repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
Pure-Aosp/android_external_skia | platform_tools/android/gyp_gen/makefile_writer.py | 63 | 7149 | #!/usr/bin/python
# Copyright 2014 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Functions for creating an Android.mk from already created dictionaries.
"""
import os
def write_group(f, name, items, append):
"""Helper function to list all names passed to a variable.
Args:
f: File open for writing (Android.mk)
name: Name of the makefile variable (e.g. LOCAL_CFLAGS)
items: list of strings to be passed to the variable.
append: Whether to append to the variable or overwrite it.
"""
if not items:
return
# Copy the list so we can prepend it with its name.
items_to_write = list(items)
if append:
items_to_write.insert(0, '%s +=' % name)
else:
items_to_write.insert(0, '%s :=' % name)
f.write(' \\\n\t'.join(items_to_write))
f.write('\n\n')
def write_local_vars(f, var_dict, append, name):
"""Helper function to write all the members of var_dict to the makefile.
Args:
f: File open for writing (Android.mk)
var_dict: VarsDict holding the unique values for one configuration.
append: Whether to append to each makefile variable or overwrite it.
name: If not None, a string to be appended to each key.
"""
for key in var_dict.keys():
_key = key
_items = var_dict[key]
if key == 'LOCAL_CFLAGS':
# Always append LOCAL_CFLAGS. This allows us to define some early on in
# the makefile and not overwrite them.
_append = True
elif key == 'DEFINES':
# For DEFINES, we want to append to LOCAL_CFLAGS.
_append = True
_key = 'LOCAL_CFLAGS'
_items_with_D = []
for define in _items:
_items_with_D.append('-D' + define)
_items = _items_with_D
elif key == 'KNOWN_TARGETS':
# KNOWN_TARGETS are not needed in the final make file.
continue
else:
_append = append
if name:
_key += '_' + name
write_group(f, _key, _items, _append)
AUTOGEN_WARNING = (
"""
###############################################################################
#
# THIS FILE IS AUTOGENERATED BY GYP_TO_ANDROID.PY. DO NOT EDIT.
#
###############################################################################
"""
)
DEBUGGING_HELP = (
"""
###############################################################################
#
# PROBLEMS WITH SKIA DEBUGGING?? READ THIS...
#
# The debug build results in changes to the Skia headers. This means that those
# using libskia must also be built with the debug version of the Skia headers.
# There are a few scenarios where this comes into play:
#
# (1) You're building debug code that depends on libskia.
# (a) If libskia is built in release, then define SK_RELEASE when building
# your sources.
# (b) If libskia is built with debugging (see step 2), then no changes are
# needed since your sources and libskia have been built with SK_DEBUG.
# (2) You're building libskia in debug mode.
# (a) RECOMMENDED: You can build the entire system in debug mode. Do this by
# updating your build/core/config.mk to include -DSK_DEBUG on the line
# that defines COMMON_GLOBAL_CFLAGS
# (b) You can update all the users of libskia to define SK_DEBUG when they are
# building their sources.
#
# NOTE: If neither SK_DEBUG or SK_RELEASE are defined then Skia checks NDEBUG to
# determine which build type to use.
###############################################################################
"""
)
SKIA_TOOLS = (
"""
#############################################################
# Build the skia tools
#
# benchmark (timings)
include $(BASE_PATH)/bench/Android.mk
# golden-master (fidelity / regression test)
include $(BASE_PATH)/gm/Android.mk
# unit-tests
include $(BASE_PATH)/tests/Android.mk
# diamond-master (one test to rule them all)
include $(BASE_PATH)/dm/Android.mk
"""
)
class VarsDictData(object):
"""Helper class to keep a VarsDict along with a name and optional condition.
"""
def __init__(self, vars_dict, name, condition=None):
"""Create a new VarsDictData.
Args:
vars_dict: A VarsDict. Can be accessed via self.vars_dict.
name: Name associated with the VarsDict. Can be accessed via
self.name.
condition: Optional string representing a condition. If not None,
used to create a conditional inside the makefile.
"""
self.vars_dict = vars_dict
self.condition = condition
self.name = name
def write_local_path(f):
"""Add the LOCAL_PATH line to the makefile.
Args:
f: File open for writing.
"""
f.write('LOCAL_PATH:= $(call my-dir)\n')
def write_clear_vars(f):
"""Add the CLEAR_VARS line to the makefile.
Args:
f: File open for writing.
"""
f.write('include $(CLEAR_VARS)\n')
def write_include_stlport(f):
"""Add a line to include stlport.
Args:
f: File open for writing.
"""
f.write('include external/stlport/libstlport.mk\n')
def write_android_mk(target_dir, common, deviations_from_common):
"""Given all the variables, write the final make file.
Args:
target_dir: The full path to the directory to write Android.mk, or None
to use the current working directory.
common: VarsDict holding variables definitions common to all
configurations.
deviations_from_common: List of VarsDictData, one for each possible
configuration. VarsDictData.name will be appended to each key before
writing it to the makefile. VarsDictData.condition, if not None, will be
written to the makefile as a condition to determine whether to include
VarsDictData.vars_dict.
"""
target_file = 'Android.mk'
if target_dir:
target_file = os.path.join(target_dir, target_file)
with open(target_file, 'w') as f:
f.write(AUTOGEN_WARNING)
f.write('BASE_PATH := $(call my-dir)\n')
write_local_path(f)
f.write(DEBUGGING_HELP)
write_clear_vars(f)
f.write('LOCAL_ARM_MODE := thumb\n')
# need a flag to tell the C side when we're on devices with large memory
# budgets (i.e. larger than the low-end devices that initially shipped)
# On arm, only define the flag if it has VFP. For all other architectures,
# always define the flag.
f.write('ifeq ($(TARGET_ARCH),arm)\n')
f.write('\tifeq ($(ARCH_ARM_HAVE_VFP),true)\n')
f.write('\t\tLOCAL_CFLAGS += -DANDROID_LARGE_MEMORY_DEVICE\n')
f.write('\tendif\n')
f.write('else\n')
f.write('\tLOCAL_CFLAGS += -DANDROID_LARGE_MEMORY_DEVICE\n')
f.write('endif\n\n')
f.write('# used for testing\n')
f.write('#LOCAL_CFLAGS += -g -O0\n\n')
f.write('ifeq ($(NO_FALLBACK_FONT),true)\n')
f.write('\tLOCAL_CFLAGS += -DNO_FALLBACK_FONT\n')
f.write('endif\n\n')
write_local_vars(f, common, False, None)
for data in deviations_from_common:
if data.condition:
f.write('ifeq ($(%s), true)\n' % data.condition)
write_local_vars(f, data.vars_dict, True, data.name)
if data.condition:
f.write('endif\n\n')
write_include_stlport(f)
f.write('include $(BUILD_SHARED_LIBRARY)\n')
f.write(SKIA_TOOLS)
| bsd-3-clause |
tealover/nova | nova/tests/unit/objects/test_numa.py | 33 | 10050 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import exception
from nova import objects
from nova.tests.unit.objects import test_objects
fake_obj_numa = objects.NUMATopology(
cells=[
objects.NUMACell(
id=0, cpuset=set([1, 2]), memory=512,
cpu_usage=2, memory_usage=256,
mempages=[], pinned_cpus=set([]),
siblings=[]),
objects.NUMACell(
id=1, cpuset=set([3, 4]), memory=512,
cpu_usage=1, memory_usage=128,
mempages=[], pinned_cpus=set([]),
siblings=[])])
class _TestNUMA(object):
def test_convert_wipe(self):
d1 = fake_obj_numa._to_dict()
d2 = objects.NUMATopology.obj_from_primitive(d1)._to_dict()
self.assertEqual(d1, d2)
def test_from_legacy_limits(self):
old_style = {"cells": [
{"mem": {
"total": 1024,
"limit": 2048},
"cpu_limit": 96.0,
"cpus": "0,1,2,3,4,5",
"id": 0}]}
limits = objects.NUMATopologyLimits.obj_from_db_obj(old_style)
self.assertEqual(16.0, limits.cpu_allocation_ratio)
self.assertEqual(2.0, limits.ram_allocation_ratio)
def test_to_legacy_limits(self):
limits = objects.NUMATopologyLimits(
cpu_allocation_ratio=16,
ram_allocation_ratio=2)
host_topo = objects.NUMATopology(cells=[
objects.NUMACell(id=0, cpuset=set([1, 2]), memory=1024)
])
old_style = {'cells': [
{'mem': {'total': 1024,
'limit': 2048.0},
'id': 0,
'cpus': '1,2',
'cpu_limit': 32.0}]}
self.assertEqual(old_style, limits.to_dict_legacy(host_topo))
def test_free_cpus(self):
obj = objects.NUMATopology(cells=[
objects.NUMACell(
id=0, cpuset=set([1, 2]), memory=512,
cpu_usage=2, memory_usage=256,
pinned_cpus=set([1]), siblings=[],
mempages=[]),
objects.NUMACell(
id=1, cpuset=set([3, 4]), memory=512,
cpu_usage=1, memory_usage=128,
pinned_cpus=set([]), siblings=[],
mempages=[])
]
)
self.assertEqual(set([2]), obj.cells[0].free_cpus)
self.assertEqual(set([3, 4]), obj.cells[1].free_cpus)
def test_pinning_logic(self):
numacell = objects.NUMACell(id=0, cpuset=set([1, 2, 3, 4]), memory=512,
cpu_usage=2, memory_usage=256,
pinned_cpus=set([1]), siblings=[],
mempages=[])
numacell.pin_cpus(set([2, 3]))
self.assertEqual(set([4]), numacell.free_cpus)
self.assertRaises(exception.CPUPinningInvalid,
numacell.pin_cpus, set([1, 4]))
self.assertRaises(exception.CPUPinningInvalid,
numacell.pin_cpus, set([1, 6]))
self.assertRaises(exception.CPUPinningInvalid,
numacell.unpin_cpus, set([1, 4]))
numacell.unpin_cpus(set([1, 2, 3]))
self.assertEqual(set([1, 2, 3, 4]), numacell.free_cpus)
def test_pages_topology_wipe(self):
pages_topology = objects.NUMAPagesTopology(
size_kb=2048, total=1024, used=512)
self.assertEqual(2048, pages_topology.size_kb)
self.assertEqual(1024, pages_topology.total)
self.assertEqual(512, pages_topology.used)
self.assertEqual(512, pages_topology.free)
self.assertEqual(1048576, pages_topology.free_kb)
def test_can_fit_hugepages(self):
cell = objects.NUMACell(
id=0, cpuset=set([1, 2]), memory=1024,
siblings=[], pinned_cpus=set([]),
mempages=[
objects.NUMAPagesTopology(
size_kb=4, total=1548736, used=0),
objects.NUMAPagesTopology(
size_kb=2048, total=513, used=0)]) # 1,002G
pagesize = 2048
self.assertTrue(cell.can_fit_hugepages(pagesize, 2 ** 20))
self.assertFalse(cell.can_fit_hugepages(pagesize, 2 ** 21))
self.assertFalse(cell.can_fit_hugepages(pagesize, 2 ** 19 + 1))
self.assertRaises(
exception.MemoryPageSizeNotSupported,
cell.can_fit_hugepages, 12345, 2 ** 20)
def test_default_behavior(self):
inst_cell = objects.NUMACell()
self.assertEqual(0, len(inst_cell.obj_get_changes()))
def test_obj_cell_relationships(self):
obj = objects.NUMATopology(cells=[objects.NUMACell()])
rel = objects.NUMATopology.obj_relationships['cells']
for topo_ver, cell_ver in rel:
prim = obj.obj_to_primitive(target_version=topo_ver)
cell = objects.NUMATopology.obj_from_primitive(prim).cells[0]
self.assertEqual(cell_ver, cell.VERSION)
def test_numa_pages_equivalent(self):
pt1 = objects.NUMAPagesTopology(size_kb=1024, total=32, used=0)
pt2 = objects.NUMAPagesTopology(size_kb=1024, total=32, used=0)
self.assertEqual(pt1, pt2)
def test_numa_pages_not_equivalent(self):
pt1 = objects.NUMAPagesTopology(size_kb=1024, total=32, used=0)
pt2 = objects.NUMAPagesTopology(size_kb=1024, total=33, used=0)
self.assertNotEqual(pt1, pt2)
def test_numa_pages_not_equivalent_missing_a(self):
pt1 = objects.NUMAPagesTopology(size_kb=1024, used=0)
pt2 = objects.NUMAPagesTopology(size_kb=1024, total=32, used=0)
self.assertNotEqual(pt1, pt2)
def test_numa_pages_not_equivalent_missing_b(self):
pt1 = objects.NUMAPagesTopology(size_kb=1024, total=32, used=0)
pt2 = objects.NUMAPagesTopology(size_kb=1024, used=0)
self.assertNotEqual(pt1, pt2)
def test_numa_cell_equivalent(self):
cell1 = objects.NUMACell(id=1, cpuset=set([1, 2]), memory=32,
cpu_usage=10, pinned_cpus=set([3, 4]),
siblings=[set([5, 6])])
cell2 = objects.NUMACell(id=1, cpuset=set([1, 2]), memory=32,
cpu_usage=10, pinned_cpus=set([3, 4]),
siblings=[set([5, 6])])
self.assertEqual(cell1, cell2)
def test_numa_cell_not_equivalent(self):
cell1 = objects.NUMACell(id=1, cpuset=set([1, 2]), memory=32,
cpu_usage=10, pinned_cpus=set([3, 4]),
siblings=[set([5, 6])])
cell2 = objects.NUMACell(id=2, cpuset=set([1, 2]), memory=32,
cpu_usage=10, pinned_cpus=set([3, 4]),
siblings=[set([5, 6])])
self.assertNotEqual(cell1, cell2)
def test_numa_cell_not_equivalent_missing_a(self):
cell1 = objects.NUMACell(id=1, cpuset=set([1, 2]), memory=32,
pinned_cpus=set([3, 4]),
siblings=[set([5, 6])])
cell2 = objects.NUMACell(id=2, cpuset=set([1, 2]), memory=32,
cpu_usage=10, pinned_cpus=set([3, 4]),
siblings=[set([5, 6])])
self.assertNotEqual(cell1, cell2)
def test_numa_cell_not_equivalent_missing_b(self):
cell1 = objects.NUMACell(id=1, cpuset=set([1, 2]), memory=32,
cpu_usage=10, pinned_cpus=set([3, 4]),
siblings=[set([5, 6])])
cell2 = objects.NUMACell(id=2, cpuset=set([1, 2]), memory=32,
pinned_cpus=set([3, 4]),
siblings=[set([5, 6])])
self.assertNotEqual(cell1, cell2)
def test_numa_cell_equivalent_different_pages(self):
pt1 = objects.NUMAPagesTopology(size_kb=1024, total=32, used=0)
pt2 = objects.NUMAPagesTopology(size_kb=1024, total=32, used=0)
cell1 = objects.NUMACell(id=1, cpuset=set([1, 2]), memory=32,
cpu_usage=10, pinned_cpus=set([3, 4]),
siblings=[set([5, 6])],
mempages=[pt1])
cell2 = objects.NUMACell(id=1, cpuset=set([1, 2]), memory=32,
cpu_usage=10, pinned_cpus=set([3, 4]),
siblings=[set([5, 6])],
mempages=[pt2])
self.assertEqual(cell1, cell2)
def test_numa_cell_not_equivalent_different_pages(self):
pt1 = objects.NUMAPagesTopology(size_kb=1024, total=32, used=0)
pt2 = objects.NUMAPagesTopology(size_kb=1024, total=32, used=1)
cell1 = objects.NUMACell(id=1, cpuset=set([1, 2]), memory=32,
cpu_usage=10, pinned_cpus=set([3, 4]),
siblings=[set([5, 6])],
mempages=[pt1])
cell2 = objects.NUMACell(id=1, cpuset=set([1, 2]), memory=32,
cpu_usage=10, pinned_cpus=set([3, 4]),
siblings=[set([5, 6])],
mempages=[pt2])
self.assertNotEqual(cell1, cell2)
class TestNUMA(test_objects._LocalTest,
_TestNUMA):
pass
class TestNUMARemote(test_objects._RemoteTest,
_TestNUMA):
pass
| apache-2.0 |
XCSoar/XCSoar | python/test/test_xcsoar.py | 37 | 2408 | #!/usr/bin/env python
import xcsoar
import argparse
from pprint import pprint
# Parse command line parameters
parser = argparse.ArgumentParser(
description='Please give me a IGC file name...')
parser.add_argument('file_name', type=str)
args = parser.parse_args()
print "Init xcsoar.Flight, don't store flight in memory"
flight = xcsoar.Flight(args.file_name, False)
times = flight.times()
for dtime in times:
takeoff = dtime['takeoff']
release = dtime['release']
landing = dtime['landing']
print "Takeoff: {}, location {}".format(takeoff['time'], takeoff['location'])
print "Release: {}, location {}".format(release['time'], release['location'])
print "Landing: {}, location {}".format(landing['time'], landing['location'])
print "Flight path from takeoff to release:"
fixes = flight.path(takeoff['time'], release['time'])
for fix in fixes:
print fix
del flight
print
print "Init xcsoar.Flight, store flight on init in memory"
flight = xcsoar.Flight(args.file_name, True)
times = flight.times()
flight_sequence = None
for dtime in times:
takeoff = dtime['takeoff']
release = dtime['release']
landing = dtime['landing']
print "Takeoff: {}, location {}".format(takeoff['time'], takeoff['location'])
print "Release: {}, location {}".format(release['time'], release['location'])
print "Landing: {}, location {}".format(landing['time'], landing['location'])
print "Flight path from takeoff to release:"
fixes = flight.path(takeoff['time'], release['time'])
for fix in fixes:
print fix
flight.reduce(takeoff['time'], landing['time'], max_points=10)
print "Flight path from takeoff to landing, reduced:"
fixes = flight.path(takeoff['time'], landing['time'])
for fix in fixes:
print fix
flight_sequence = fixes
analysis = flight.analyse(takeoff=takeoff['time'],
scoring_start=release['time'],
scoring_end=landing['time'],
landing=landing['time'])
pprint(analysis)
fixes = flight.path(takeoff['time'], landing['time'])
print xcsoar.encode([(row[2]['longitude'], row[2]['latitude']) for row in fixes], floor=10e5, method="double")
pprint(flight.encode())
del flight
print
print "Init xcsoar.Flight with a python sequence"
flight = xcsoar.Flight([fix[0:5] for fix in flight_sequence])
for fix in flight.path():
print fix
del flight
| gpl-2.0 |
JianyuWang/nova | nova/tests/unit/virt/hyperv/test_networkutilsv2.py | 68 | 1724 | # Copyright 2013 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova.tests.unit.virt.hyperv import test_networkutils
from nova.virt.hyperv import networkutilsv2
class NetworkUtilsV2TestCase(test_networkutils.NetworkUtilsTestCase):
"""Unit tests for the Hyper-V NetworkUtilsV2 class."""
_MSVM_VIRTUAL_SWITCH = 'Msvm_VirtualEthernetSwitch'
def setUp(self):
super(NetworkUtilsV2TestCase, self).setUp()
self._networkutils = networkutilsv2.NetworkUtilsV2()
self._networkutils._conn = mock.MagicMock()
def _prepare_external_port(self, mock_vswitch, mock_ext_port):
mock_lep = mock_ext_port.associators()[0]
mock_lep1 = mock_lep.associators()[0]
mock_esw = mock_lep1.associators()[0]
mock_esw.associators.return_value = [mock_vswitch]
def test_create_vswitch_port(self):
self.assertRaises(
NotImplementedError,
self._networkutils.create_vswitch_port,
mock.sentinel.FAKE_VSWITCH_PATH,
mock.sentinel.FAKE_PORT_NAME)
def test_vswitch_port_needed(self):
self.assertFalse(self._networkutils.vswitch_port_needed())
| apache-2.0 |
aleximplode/apylife | src/main.py | 1 | 2710 | import copy
import sys
import pygame
__author__ = 'alex'
# Constants
SCREEN_WIDTH = 400
SCREEN_HEIGHT = 400
BG_COLOUR = (255, 255, 255)
CELL_WIDTH = 40
CELL_HEIGHT = 40
def start():
pygame.init()
screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT), 0, 32)
clock = pygame.time.Clock()
cells = [[0 for x in range(CELL_WIDTH)] for x in range(CELL_HEIGHT)]
cells[3][6] = 1
cells[4][7] = 1
cells[5][5] = 1
cells[5][6] = 1
cells[5][7] = 1
editMode = False
while True:
deltaTime = clock.tick(15)
# Event handling
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
elif event.type == pygame.KEYUP:
if event.key == pygame.K_RETURN:
editMode = not editMode
elif event.type == pygame.MOUSEBUTTONDOWN and event.button == 1 and editMode:
row = event.pos[1] / (SCREEN_HEIGHT / CELL_HEIGHT)
col = event.pos[0] / (SCREEN_WIDTH / CELL_WIDTH)
cells[row][col] = (cells[row][col] + 1) % 2
if not editMode:
processCells(cells)
screen.fill(BG_COLOUR)
render(screen, cells)
pygame.display.flip()
def countSiblingCells(cells, x, y):
return cells[y-1][x-1] + \
cells[y][x-1] + \
cells[(y+1) % CELL_HEIGHT][x-1] + \
cells[y-1][x] + \
cells[(y+1) % CELL_HEIGHT][x] + \
cells[y-1][(x+1) % CELL_WIDTH] + \
cells[y][(x+1) % CELL_WIDTH] + \
cells[(y+1) % CELL_HEIGHT][(x+1) % CELL_WIDTH]
def processCells(cells):
newCells = copy.deepcopy(cells)
for row in range(CELL_HEIGHT):
for col in range(CELL_WIDTH):
neighbours = countSiblingCells(newCells, col, row)
if newCells[row][col] == 1:
if neighbours < 2:
cells[row][col] = 0
elif 2 <= neighbours <= 3:
pass
elif neighbours > 3:
cells[row][col] = 0
else:
if neighbours == 3:
cells[row][col] = 1
def render(screen, cells):
for row in range(CELL_HEIGHT):
for col in range(CELL_WIDTH):
cell = pygame.Rect(col * (SCREEN_WIDTH / CELL_WIDTH), row * (SCREEN_HEIGHT / CELL_HEIGHT),
SCREEN_WIDTH / CELL_WIDTH, SCREEN_HEIGHT / CELL_HEIGHT)
colour = (0, 0, 0)
border = 1
if cells[row][col] == 1:
border = 0
pygame.draw.rect(screen, colour, cell, border)
if __name__ == '__main__':
start() | mit |
joopert/home-assistant | homeassistant/components/folder/sensor.py | 2 | 3154 | """Sensor for monitoring the contents of a folder."""
from datetime import timedelta
import glob
import logging
import os
import voluptuous as vol
from homeassistant.helpers.entity import Entity
import homeassistant.helpers.config_validation as cv
from homeassistant.components.sensor import PLATFORM_SCHEMA
_LOGGER = logging.getLogger(__name__)
CONF_FOLDER_PATHS = "folder"
CONF_FILTER = "filter"
DEFAULT_FILTER = "*"
SCAN_INTERVAL = timedelta(minutes=1)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_FOLDER_PATHS): cv.isdir,
vol.Optional(CONF_FILTER, default=DEFAULT_FILTER): cv.string,
}
)
def get_files_list(folder_path, filter_term):
"""Return the list of files, applying filter."""
query = folder_path + filter_term
files_list = glob.glob(query)
return files_list
def get_size(files_list):
"""Return the sum of the size in bytes of files in the list."""
size_list = [os.stat(f).st_size for f in files_list if os.path.isfile(f)]
return sum(size_list)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the folder sensor."""
path = config.get(CONF_FOLDER_PATHS)
if not hass.config.is_allowed_path(path):
_LOGGER.error("folder %s is not valid or allowed", path)
else:
folder = Folder(path, config.get(CONF_FILTER))
add_entities([folder], True)
class Folder(Entity):
"""Representation of a folder."""
ICON = "mdi:folder"
def __init__(self, folder_path, filter_term):
"""Initialize the data object."""
folder_path = os.path.join(folder_path, "") # If no trailing / add it
self._folder_path = folder_path # Need to check its a valid path
self._filter_term = filter_term
self._number_of_files = None
self._size = None
self._name = os.path.split(os.path.split(folder_path)[0])[1]
self._unit_of_measurement = "MB"
self._file_list = None
def update(self):
"""Update the sensor."""
files_list = get_files_list(self._folder_path, self._filter_term)
self._file_list = files_list
self._number_of_files = len(files_list)
self._size = get_size(files_list)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
decimals = 2
size_mb = round(self._size / 1e6, decimals)
return size_mb
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return self.ICON
@property
def device_state_attributes(self):
"""Return other details about the sensor state."""
attr = {
"path": self._folder_path,
"filter": self._filter_term,
"number_of_files": self._number_of_files,
"bytes": self._size,
"file_list": self._file_list,
}
return attr
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
| apache-2.0 |
blankenberg/tools-iuc | tools/table_compute/scripts/safety.py | 17 | 9977 | import re
class Safety():
"""
Class to safely evaluate mathematical expression on single
or table data
"""
__allowed_tokens = (
'(', ')', 'if', 'else', 'or', 'and', 'not', 'in',
'+', '-', '*', '/', '%', ',', '!=', '==', '>', '>=', '<', '<=',
'min', 'max', 'sum',
)
__allowed_ref_types = {
'pd.DataFrame': {
'abs', 'add', 'agg', 'aggregate', 'align', 'all', 'any', 'append',
'apply', 'applymap', 'as_matrix', 'asfreq', 'at', 'axes', 'bool',
'clip', 'clip_lower', 'clip_upper', 'columns', 'combine',
'compound', 'corr', 'count', 'cov', 'cummax', 'cummin', 'cumprod',
'cumsum', 'describe', 'div', 'divide', 'dot', 'drop',
'drop_duplicates', 'droplevel', 'dropna', 'duplicated', 'empty',
'eq', 'equals', 'expanding', 'ffill', 'fillna', 'filter', 'first',
'first_valid_index', 'floordiv', 'ge', 'groupby', 'gt', 'head',
'iat', 'iloc', 'index', 'insert', 'interpolate', 'isin', 'isna',
'isnull', 'items', 'iteritems', 'iterrows', 'itertuples', 'ix',
'join', 'keys', 'kurt', 'kurtosis', 'last', 'last_valid_index',
'le', 'loc', 'lookup', 'lt', 'mad', 'mask', 'max', 'mean',
'median', 'melt', 'merge', 'min', 'mod', 'mode', 'mul', 'multiply',
'ndim', 'ne', 'nlargest', 'notna', 'notnull', 'nsmallest',
'nunique', 'pct_change', 'pivot', 'pivot_table', 'pop', 'pow',
'prod', 'product', 'quantile', 'radd', 'rank', 'rdiv', 'replace',
'resample', 'rfloordiv', 'rmod', 'rmul', 'rolling', 'round',
'rpow', 'rsub', 'rtruediv', 'sample', 'select',
'sem', 'shape', 'shift', 'size', 'skew', 'slice_shift',
'squeeze', 'stack', 'std', 'sub', 'subtract', 'sum', 'swapaxes',
'swaplevel', 'T', 'tail', 'take', 'transform', 'transpose',
'truediv', 'truncate', 'tshift', 'unstack', 'var', 'where',
},
'pd.Series': {
'abs', 'add', 'agg', 'aggregate', 'align', 'all', 'any', 'append',
'apply', 'argsort', 'as_matrix', 'asfreq', 'asof', 'astype', 'at',
'at_time', 'autocorr', 'axes', 'between', 'between_time', 'bfill',
'bool', 'cat', 'clip', 'clip_lower', 'clip_upper', 'combine',
'combine_first', 'compound', 'corr', 'count', 'cov', 'cummax',
'cummin', 'cumprod', 'cumsum', 'describe', 'diff', 'div', 'divide',
'divmod', 'dot', 'drop', 'drop_duplicates', 'droplevel', 'dropna',
'dt', 'dtype', 'dtypes', 'duplicated', 'empty', 'eq', 'equals',
'ewm', 'expanding', 'factorize', 'ffill', 'fillna', 'filter',
'first', 'first_valid_index', 'flags', 'floordiv', 'ge', 'groupby',
'gt', 'hasnans', 'head', 'iat', 'idxmax', 'idxmin', 'iloc', 'imag',
'index', 'interpolate', 'is_monotonic', 'is_monotonic_decreasing',
'is_monotonic_increasing', 'is_unique', 'isin', 'isna', 'isnull',
'item', 'items', 'iteritems', 'ix', 'keys', 'kurt', 'kurtosis',
'last', 'last_valid_index', 'le', 'loc', 'lt', 'mad', 'map',
'mask', 'max', 'mean', 'median', 'min', 'mod', 'mode', 'mul',
'multiply', 'name', 'ndim', 'ne', 'nlargest', 'nonzero', 'notna',
'notnull', 'nsmallest', 'nunique', 'pct_change', 'pop', 'pow',
'prod', 'product', 'ptp', 'quantile', 'radd', 'rank', 'rdiv',
'rdivmod', 'real', 'repeat', 'replace', 'resample', 'rfloordiv',
'rmod', 'rmul', 'rolling', 'round', 'rpow', 'rsub', 'rtruediv',
'sample', 'searchsorted', 'select', 'sem', 'shape', 'shift',
'size', 'skew', 'slice_shift', 'sort_index', 'sort_values',
'squeeze', 'std', 'sub', 'subtract', 'sum', 'swapaxes',
'swaplevel', 'T', 'tail', 'take', 'transform', 'transpose',
'truediv', 'truncate', 'tshift', 'unique', 'unstack',
'value_counts', 'var', 'where', 'xs',
},
}
__allowed_qualified = {
# allowed numpy functionality
'np': {
'abs', 'add', 'all', 'any', 'append', 'array', 'bool', 'ceil',
'complex', 'cos', 'cosh', 'cov', 'cumprod', 'cumsum', 'degrees',
'divide', 'divmod', 'dot', 'e', 'empty', 'exp', 'float', 'floor',
'hypot', 'inf', 'int', 'isfinite', 'isin', 'isinf', 'isnan', 'log',
'log10', 'log2', 'max', 'mean', 'median', 'min', 'mod', 'multiply',
'nan', 'ndim', 'pi', 'product', 'quantile', 'radians', 'rank',
'remainder', 'round', 'sin', 'sinh', 'size', 'sqrt', 'squeeze',
'stack', 'std', 'str', 'subtract', 'sum', 'swapaxes', 'take',
'tan', 'tanh', 'transpose', 'unique', 'var', 'where',
},
# allowed math functionality
'math': {
'acos', 'acosh', 'asin', 'asinh', 'atan', 'atan2', 'atanh', 'ceil',
'copysign', 'cos', 'cosh', 'degrees', 'e', 'erf', 'erfc', 'exp',
'expm1', 'fabs', 'factorial', 'floor', 'fmod', 'frexp', 'fsum',
'gamma', 'gcd', 'hypot', 'inf', 'isclose', 'isfinite', 'isinf',
'isnan', 'ldexp', 'lgamma', 'log', 'log10', 'log1p', 'log2',
'modf', 'nan', 'pi', 'pow', 'radians', 'remainder', 'sin', 'sinh',
'sqrt', 'tan', 'tanh', 'tau', 'trunc',
},
# allowed pd functionality
'pd': {
'DataFrame', 'array', 'concat', 'cut', 'date_range', 'factorize',
'interval_range', 'isna', 'isnull', 'melt', 'merge', 'notna',
'notnull', 'period_range', 'pivot', 'pivot_table', 'unique',
'value_counts', 'wide_to_long',
},
}
def __init__(self, expression,
ref_whitelist=None, ref_type=None,
custom_qualified=None):
self.allowed_qualified = self.__allowed_qualified.copy()
if ref_whitelist is None:
self.these = []
else:
self.these = ref_whitelist
if ref_type is None or ref_type not in self.__allowed_ref_types:
self.allowed_qualified['_this'] = set()
else:
self.allowed_qualified[
'_this'
] = self.__allowed_ref_types[ref_type]
if custom_qualified is not None:
self.allowed_qualified.update(custom_qualified)
self.expr = expression
self.__assertSafe()
def generateFunction(self):
"Generates a function to be evaluated outside the class"
cust_fun = "def fun(%s):\n\treturn(%s)" % (self.these[0], self.expr)
return cust_fun
def __assertSafe(self):
indeed, problematic_token = self.__isSafeStatement()
if not indeed:
self.detailedExcuse(problematic_token)
raise ValueError("Custom Expression is not safe.")
@staticmethod
def detailedExcuse(word):
"Gives a verbose statement for why users should not use some specific operators."
mess = None
if word == "for":
mess = "for loops and comprehensions are not allowed. Use numpy or pandas table operations instead."
elif word == ":":
mess = "Colons are not allowed. Use inline Python if/else statements."
elif word == "=":
mess = "Variable assignment is not allowed. Use object methods to substitute values."
elif word in ("[", "]"):
mess = "Direct indexing of arrays is not allowed. Use numpy or pandas functions/methods to address specific parts of tables."
else:
mess = "Not an allowed token in this operation"
print("( '%s' ) %s" % (word, mess))
def __isSafeStatement(self):
"""
Determines if a user-expression is safe to evaluate.
To be considered safe an expression may contain only:
- standard Python operators and numbers
- inline conditional expressions
- select functions and objects
by default, these come from the math, numpy and pandas
libraries, and must be qualified with the modules' conventional
names math, np, pd; can be overridden at the instance level
- references to a whitelist of objects (pd.DataFrames by default)
and their methods
"""
safe = True
# examples of user-expressions
# '-math.log(1 - elem/4096) * 4096 if elem != 1 else elem - 0.5'
# 'vec.median() + vec.sum()'
# 1. Break expressions into tokens
# e.g.,
# [
# '-', 'math.log', '(', '1', '-', 'elem', '/', '4096', ')', '*',
# '4096', 'if', 'elem', '!=', '1', 'else', 'elem', '-', '0.5'
# ]
# or
# ['vec.median', '(', ')', '+', 'vec.sum', '(', ')']
tokens = [
e for e in re.split(
r'([a-zA-Z0-9_.]+|[^a-zA-Z0-9_.() ]+|[()])', self.expr
) if e.strip()
]
# 2. Subtract allowed standard tokens
rem = [e for e in tokens if e not in self.__allowed_tokens]
# 3. Subtract allowed qualified objects from allowed modules
# and whitelisted references and their attributes
rem2 = []
for e in rem:
parts = e.split('.')
if len(parts) == 1:
if parts[0] in self.these:
continue
if len(parts) == 2:
if parts[0] in self.these:
parts[0] = '_this'
if parts[0] in self.allowed_qualified:
if parts[1] in self.allowed_qualified[parts[0]]:
continue
rem2.append(e)
# 4. Assert that rest are real numbers or strings
e = ''
for e in rem2:
try:
_ = float(e)
except ValueError:
safe = False
break
return safe, e
| mit |
asvetlov/orm | setup.py | 1 | 1663 | import os
import re
import sys
from setuptools import setup, find_packages
PY_VER = sys.version_info
if PY_VER < (3, 4):
raise RuntimeError("orm doesn't suppport Python earlier than 3.4")
def read(f):
return open(os.path.join(os.path.dirname(__file__), f)).read().strip()
def read_version():
regexp = re.compile(r"^__version__\W*=\W*'([\d.abrc]+)'")
init_py = os.path.join(os.path.dirname(__file__), 'orm', '__init__.py')
with open(init_py) as f:
for line in f:
match = regexp.match(line)
if match is not None:
return match.group(1)
else:
raise RuntimeError('Cannot find version in orm/__init__.py')
classifiers = [
'License :: OSI Approved :: BSD License',
'Intended Audience :: Developers',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Operating System :: POSIX',
'Operating System :: MacOS :: MacOS X',
'Environment :: Web Environment',
'Development Status :: 4 - Beta',
'Topic :: Database',
'Topic :: Database :: Front-Ends',
]
setup(name='orm',
version=read_version(),
description=('ORM for asyncio-compatible SQL drivers.'),
long_description='\n\n'.join((read('README.rst'), read('CHANGES.txt'))),
classifiers=classifiers,
platforms=['POSIX'],
author='Andrew Svetlov',
author_email='andrew.svetlov@gmail.com',
url='http://orm.readthedocs.org',
download_url='https://pypi.python.org/pypi/orm',
license='Apache 2',
packages=find_packages(),
include_package_data=True)
| apache-2.0 |
jesramirez/odoo | addons/website/tests/test_converter.py | 280 | 8947 | # -*- coding: utf-8 -*-
import textwrap
import unittest2
from lxml import etree, html
from lxml.builder import E
from openerp.tests import common
from openerp.addons.base.ir import ir_qweb
from openerp.addons.website.models.ir_qweb import html_to_text
from openerp.addons.website.models.website import slugify, unslug
class TestUnslug(unittest2.TestCase):
def test_unslug(self):
tests = {
'': (None, None),
'foo': (None, None),
'foo-': (None, None),
'-': (None, None),
'foo-1': ('foo', 1),
'foo-bar-1': ('foo-bar', 1),
'foo--1': ('foo', -1),
'1': (None, 1),
'1-1': ('1', 1),
'--1': (None, None),
'foo---1': (None, None),
'foo1': (None, None),
}
for slug, expected in tests.iteritems():
self.assertEqual(unslug(slug), expected)
class TestHTMLToText(unittest2.TestCase):
def test_rawstring(self):
self.assertEqual(
"foobar",
html_to_text(E.div("foobar")))
def test_br(self):
self.assertEqual(
"foo\nbar",
html_to_text(E.div("foo", E.br(), "bar")))
self.assertEqual(
"foo\n\nbar\nbaz",
html_to_text(E.div(
"foo", E.br(), E.br(),
"bar", E.br(),
"baz")))
def test_p(self):
self.assertEqual(
"foo\n\nbar\n\nbaz",
html_to_text(E.div(
"foo",
E.p("bar"),
"baz")))
self.assertEqual(
"foo",
html_to_text(E.div(E.p("foo"))))
self.assertEqual(
"foo\n\nbar",
html_to_text(E.div("foo", E.p("bar"))))
self.assertEqual(
"foo\n\nbar",
html_to_text(E.div(E.p("foo"), "bar")))
self.assertEqual(
"foo\n\nbar\n\nbaz",
html_to_text(E.div(
E.p("foo"),
E.p("bar"),
E.p("baz"),
)))
def test_div(self):
self.assertEqual(
"foo\nbar\nbaz",
html_to_text(E.div(
"foo",
E.div("bar"),
"baz"
)))
self.assertEqual(
"foo",
html_to_text(E.div(E.div("foo"))))
self.assertEqual(
"foo\nbar",
html_to_text(E.div("foo", E.div("bar"))))
self.assertEqual(
"foo\nbar",
html_to_text(E.div(E.div("foo"), "bar")))
self.assertEqual(
"foo\nbar\nbaz",
html_to_text(E.div(
"foo",
E.div("bar"),
E.div("baz")
)))
def test_other_block(self):
self.assertEqual(
"foo\nbar\nbaz",
html_to_text(E.div(
"foo",
E.section("bar"),
"baz"
)))
def test_inline(self):
self.assertEqual(
"foobarbaz",
html_to_text(E.div("foo", E.span("bar"), "baz")))
def test_whitespace(self):
self.assertEqual(
"foo bar\nbaz",
html_to_text(E.div(
"foo\nbar",
E.br(),
"baz")
))
self.assertEqual(
"foo bar\nbaz",
html_to_text(E.div(
E.div(E.span("foo"), " bar"),
"baz")))
class TestConvertBack(common.TransactionCase):
def setUp(self):
super(TestConvertBack, self).setUp()
def field_rountrip_result(self, field, value, expected):
model = 'website.converter.test'
Model = self.registry(model)
id = Model.create(
self.cr, self.uid, {
field: value
})
[record] = Model.browse(self.cr, self.uid, [id])
e = etree.Element('span')
field_value = 'record.%s' % field
e.set('t-field', field_value)
rendered = self.registry('website.qweb').render_tag_field(
e, {'field': field_value}, '', ir_qweb.QWebContext(self.cr, self.uid, {
'record': record,
}, context={'inherit_branding': True}))
element = html.fromstring(
rendered, parser=html.HTMLParser(encoding='utf-8'))
converter = self.registry('website.qweb').get_converter_for(
element.get('data-oe-type'))
value_back = converter.from_html(
self.cr, self.uid, model, Model._fields[field], element)
if isinstance(expected, str):
expected = expected.decode('utf-8')
self.assertEqual(value_back, expected)
def field_roundtrip(self, field, value):
self.field_rountrip_result(field, value, value)
def test_integer(self):
self.field_roundtrip('integer', 42)
def test_float(self):
self.field_roundtrip('float', 42.567890)
self.field_roundtrip('float', 324542.567890)
def test_numeric(self):
self.field_roundtrip('numeric', 42.77)
def test_char(self):
self.field_roundtrip('char', "foo bar")
self.field_roundtrip('char', "ⒸⓄⓇⒼⒺ")
def test_selection(self):
self.field_roundtrip('selection', 3)
def test_selection_str(self):
self.field_roundtrip('selection_str', 'B')
def test_text(self):
self.field_roundtrip('text', textwrap.dedent("""\
You must obey the dance commander
Givin' out the order for fun
You must obey the dance commander
You know that he's the only one
Who gives the orders here,
Alright
Who gives the orders here,
Alright
It would be awesome
If we could dance-a
It would be awesome, yeah
Let's take the chance-a
It would be awesome, yeah
Let's start the show
Because you never know
You never know
You never know until you go"""))
def test_m2o(self):
""" the M2O field conversion (from html) is markedly different from
others as it directly writes into the m2o and returns nothing at all.
"""
model = 'website.converter.test'
field = 'many2one'
Sub = self.registry('website.converter.test.sub')
sub_id = Sub.create(self.cr, self.uid, {'name': "Foo"})
Model = self.registry(model)
id = Model.create(self.cr, self.uid, {field: sub_id})
[record] = Model.browse(self.cr, self.uid, [id])
e = etree.Element('span')
field_value = 'record.%s' % field
e.set('t-field', field_value)
rendered = self.registry('website.qweb').render_tag_field(
e, {'field': field_value}, '', ir_qweb.QWebContext(self.cr, self.uid, {
'record': record,
}, context={'inherit_branding': True}))
element = html.fromstring(rendered, parser=html.HTMLParser(encoding='utf-8'))
# emulate edition
element.text = "New content"
converter = self.registry('website.qweb').get_converter_for(
element.get('data-oe-type'))
value_back = converter.from_html(
self.cr, self.uid, model, Model._fields[field], element)
self.assertIsNone(
value_back, "the m2o converter should return None to avoid spurious"
" or useless writes on the parent record")
self.assertEqual(
Sub.browse(self.cr, self.uid, sub_id).name,
"New content",
"element edition should have been written directly to the m2o record"
)
class TestTitleToSlug(unittest2.TestCase):
"""
Those tests should pass with or without python-slugify
See website/models/website.py slugify method
"""
def test_spaces(self):
self.assertEqual(
"spaces",
slugify(u" spaces ")
)
def test_unicode(self):
self.assertEqual(
"heterogeneite",
slugify(u"hétérogénéité")
)
def test_underscore(self):
self.assertEqual(
"one-two",
slugify(u"one_two")
)
def test_caps(self):
self.assertEqual(
"camelcase",
slugify(u"CamelCase")
)
def test_special_chars(self):
self.assertEqual(
"o-d-o-o",
slugify(u"o!#d{|\o/@~o&%^?")
)
def test_str_to_unicode(self):
self.assertEqual(
"espana",
slugify("España")
)
def test_numbers(self):
self.assertEqual(
"article-1",
slugify(u"Article 1")
)
def test_all(self):
self.assertEqual(
"do-you-know-martine-a-la-plage",
slugify(u"Do YOU know 'Martine à la plage' ?")
)
| agpl-3.0 |
emory-libraries/eulfedora | test/test_fedora/base.py | 2 | 4077 | # file test_fedora/base.py
#
# Copyright 2011 Emory University Libraries
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
import logging
from eulxml import xmlmap
from eulfedora.server import Repository
from eulfedora.util import RequestFailed, force_bytes, force_text
from test.localsettings import FEDORA_ROOT, \
FEDORA_USER, FEDORA_PASSWORD, FEDORA_PIDSPACE
logger = logging.getLogger(__name__)
FIXTURE_ROOT = os.path.join(os.path.dirname(__file__), 'fixtures')
def fixture_path(fname):
return os.path.join(FIXTURE_ROOT, fname)
def load_fixture_data(fname):
with open(fixture_path(fname)) as f:
return force_bytes(f.read())
class _MinimalFoxml(xmlmap.XmlObject):
pid = xmlmap.StringField('@PID')
class FedoraTestCase(unittest.TestCase):
def __init__(self, *args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
self.fedora_fixtures_ingested = []
self.pidspace = FEDORA_PIDSPACE
self.repo = Repository(FEDORA_ROOT, FEDORA_USER, FEDORA_PASSWORD)
# fixture cleanup happens in tearDown, which doesn't always run
# if a test fails - clean up stale test objects from a previous run here
stale_objects = list(self.repo.find_objects(pid__contains='%s:*' % self.pidspace))
if stale_objects:
logger.info('Removing %d stale test object(s) in pidspace %s' \
% (len(stale_objects), self.pidspace))
for obj in stale_objects:
try:
self.repo.purge_object(obj.pid)
except RequestFailed as rf:
logger.warn('Error purging stale test object %s (TestCase init): %s' % \
(obj.pid, rf))
def setUp(self):
# NOTE: queries require RI flush=True or test objects will not show up in RI
self.repo.risearch.RISEARCH_FLUSH_ON_QUERY = True
self.api = self.repo.api
# self.api = ApiFacade(self.opener)
fixtures = getattr(self, 'fixtures', [])
for fixture in fixtures:
self.ingestFixture(fixture)
def tearDown(self):
for pid in self.fedora_fixtures_ingested:
try:
self.repo.purge_object(pid)
except RequestFailed as rf:
logger.warn('Error purging test object %s in tear down: %s' % \
(pid, rf))
def getNextPid(self):
pidspace = getattr(self, 'pidspace', None)
return self.repo.get_next_pid(namespace=pidspace)
def loadFixtureData(self, fname):
data = load_fixture_data(fname)
# if pidspace is specified, get a new pid from fedora and set it as the pid in the xml
if hasattr(self, 'pidspace'):
xml = xmlmap.load_xmlobject_from_string(data, _MinimalFoxml)
xml.pid = self.getNextPid()
return xml.serialize()
else:
return data
def ingestFixture(self, fname):
obj = self.loadFixtureData(fname)
pid = self.repo.ingest(force_text(obj))
if pid:
# we'd like this always to be true. if ingest fails we should
# throw an exception. that probably hasn't been thoroughly
# tested yet, though, so we'll check it until it has been.
self.append_pid(pid)
# note: renamed from append_test_pid so that nosetests doesn't
# autodetect and attempt to run as a unit test.
def append_pid(self, pid):
self.fedora_fixtures_ingested.append(force_text(pid))
| apache-2.0 |
abstract-open-solutions/OCB | addons/crm/wizard/crm_lead_to_opportunity.py | 146 | 13701 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
import re
class crm_lead2opportunity_partner(osv.osv_memory):
_name = 'crm.lead2opportunity.partner'
_description = 'Lead To Opportunity Partner'
_inherit = 'crm.partner.binding'
_columns = {
'name': fields.selection([
('convert', 'Convert to opportunity'),
('merge', 'Merge with existing opportunities')
], 'Conversion Action', required=True),
'opportunity_ids': fields.many2many('crm.lead', string='Opportunities'),
'user_id': fields.many2one('res.users', 'Salesperson', select=True),
'section_id': fields.many2one('crm.case.section', 'Sales Team', select=True),
}
def onchange_action(self, cr, uid, ids, action, context=None):
return {'value': {'partner_id': False if action != 'exist' else self._find_matching_partner(cr, uid, context=context)}}
def _get_duplicated_leads(self, cr, uid, partner_id, email, include_lost=False, context=None):
"""
Search for opportunities that have the same partner and that arent done or cancelled
"""
return self.pool.get('crm.lead')._get_duplicated_leads_by_emails(cr, uid, partner_id, email, include_lost=include_lost, context=context)
def default_get(self, cr, uid, fields, context=None):
"""
Default get for name, opportunity_ids.
If there is an exisitng partner link to the lead, find all existing
opportunities links with this partner to merge all information together
"""
lead_obj = self.pool.get('crm.lead')
res = super(crm_lead2opportunity_partner, self).default_get(cr, uid, fields, context=context)
if context.get('active_id'):
tomerge = [int(context['active_id'])]
partner_id = res.get('partner_id')
lead = lead_obj.browse(cr, uid, int(context['active_id']), context=context)
email = lead.partner_id and lead.partner_id.email or lead.email_from
tomerge.extend(self._get_duplicated_leads(cr, uid, partner_id, email, include_lost=True, context=context))
tomerge = list(set(tomerge))
if 'action' in fields and not res.get('action'):
res.update({'action' : partner_id and 'exist' or 'create'})
if 'partner_id' in fields:
res.update({'partner_id' : partner_id})
if 'name' in fields:
res.update({'name' : len(tomerge) >= 2 and 'merge' or 'convert'})
if 'opportunity_ids' in fields and len(tomerge) >= 2:
res.update({'opportunity_ids': tomerge})
if lead.user_id:
res.update({'user_id': lead.user_id.id})
if lead.section_id:
res.update({'section_id': lead.section_id.id})
return res
def on_change_user(self, cr, uid, ids, user_id, section_id, context=None):
""" When changing the user, also set a section_id or restrict section id
to the ones user_id is member of. """
if user_id:
if section_id:
user_in_section = self.pool.get('crm.case.section').search(cr, uid, [('id', '=', section_id), '|', ('user_id', '=', user_id), ('member_ids', '=', user_id)], context=context, count=True)
else:
user_in_section = False
if not user_in_section:
result = self.pool['crm.lead'].on_change_user(cr, uid, ids, user_id, context=context)
section_id = result.get('value') and result['value'].get('section_id') and result['value']['section_id'] or False
return {'value': {'section_id': section_id}}
def view_init(self, cr, uid, fields, context=None):
"""
Check some preconditions before the wizard executes.
"""
if context is None:
context = {}
lead_obj = self.pool.get('crm.lead')
for lead in lead_obj.browse(cr, uid, context.get('active_ids', []), context=context):
if lead.probability == 100:
raise osv.except_osv(_("Warning!"), _("Closed/Dead leads cannot be converted into opportunities."))
return False
def _convert_opportunity(self, cr, uid, ids, vals, context=None):
if context is None:
context = {}
lead = self.pool.get('crm.lead')
res = False
lead_ids = vals.get('lead_ids', [])
team_id = vals.get('section_id', False)
partner_id = vals.get('partner_id')
data = self.browse(cr, uid, ids, context=context)[0]
leads = lead.browse(cr, uid, lead_ids, context=context)
for lead_id in leads:
partner_id = self._create_partner(cr, uid, lead_id.id, data.action, partner_id or lead_id.partner_id.id, context=context)
res = lead.convert_opportunity(cr, uid, [lead_id.id], partner_id, [], False, context=context)
user_ids = vals.get('user_ids', False)
if context.get('no_force_assignation'):
leads_to_allocate = [lead_id.id for lead_id in leads if not lead_id.user_id]
else:
leads_to_allocate = lead_ids
if user_ids:
lead.allocate_salesman(cr, uid, leads_to_allocate, user_ids, team_id=team_id, context=context)
return res
def action_apply(self, cr, uid, ids, context=None):
"""
Convert lead to opportunity or merge lead and opportunity and open
the freshly created opportunity view.
"""
if context is None:
context = {}
lead_obj = self.pool['crm.lead']
w = self.browse(cr, uid, ids, context=context)[0]
opp_ids = [o.id for o in w.opportunity_ids]
vals = {
'section_id': w.section_id.id,
}
if w.partner_id:
vals['partner_id'] = w.partner_id.id
if w.name == 'merge':
lead_id = lead_obj.merge_opportunity(cr, uid, opp_ids, context=context)
lead_ids = [lead_id]
lead = lead_obj.read(cr, uid, lead_id, ['type', 'user_id'], context=context)
if lead['type'] == "lead":
context = dict(context, active_ids=lead_ids)
vals.update({'lead_ids': lead_ids, 'user_ids': [w.user_id.id]})
self._convert_opportunity(cr, uid, ids, vals, context=context)
elif not context.get('no_force_assignation') or not lead['user_id']:
vals.update({'user_id': w.user_id.id})
lead_obj.write(cr, uid, lead_id, vals, context=context)
else:
lead_ids = context.get('active_ids', [])
vals.update({'lead_ids': lead_ids, 'user_ids': [w.user_id.id]})
self._convert_opportunity(cr, uid, ids, vals, context=context)
return self.pool.get('crm.lead').redirect_opportunity_view(cr, uid, lead_ids[0], context=context)
def _create_partner(self, cr, uid, lead_id, action, partner_id, context=None):
"""
Create partner based on action.
:return dict: dictionary organized as followed: {lead_id: partner_assigned_id}
"""
#TODO this method in only called by crm_lead2opportunity_partner
#wizard and would probably diserve to be refactored or at least
#moved to a better place
if context is None:
context = {}
lead = self.pool.get('crm.lead')
if action == 'each_exist_or_create':
ctx = dict(context)
ctx['active_id'] = lead_id
partner_id = self._find_matching_partner(cr, uid, context=ctx)
action = 'create'
res = lead.handle_partner_assignation(cr, uid, [lead_id], action, partner_id, context=context)
return res.get(lead_id)
class crm_lead2opportunity_mass_convert(osv.osv_memory):
_name = 'crm.lead2opportunity.partner.mass'
_description = 'Mass Lead To Opportunity Partner'
_inherit = 'crm.lead2opportunity.partner'
_columns = {
'user_ids': fields.many2many('res.users', string='Salesmen'),
'section_id': fields.many2one('crm.case.section', 'Sales Team', select=True),
'deduplicate': fields.boolean('Apply deduplication', help='Merge with existing leads/opportunities of each partner'),
'action': fields.selection([
('each_exist_or_create', 'Use existing partner or create'),
('nothing', 'Do not link to a customer')
], 'Related Customer', required=True),
'force_assignation': fields.boolean('Force assignation', help='If unchecked, this will leave the salesman of duplicated opportunities'),
}
_defaults = {
'deduplicate': True,
}
def default_get(self, cr, uid, fields, context=None):
res = super(crm_lead2opportunity_mass_convert, self).default_get(cr, uid, fields, context)
if 'partner_id' in fields:
# avoid forcing the partner of the first lead as default
res['partner_id'] = False
if 'action' in fields:
res['action'] = 'each_exist_or_create'
if 'name' in fields:
res['name'] = 'convert'
if 'opportunity_ids' in fields:
res['opportunity_ids'] = False
return res
def on_change_action(self, cr, uid, ids, action, context=None):
vals = {}
if action != 'exist':
vals = {'value': {'partner_id': False}}
return vals
def on_change_deduplicate(self, cr, uid, ids, deduplicate, context=None):
if context is None:
context = {}
active_leads = self.pool['crm.lead'].browse(cr, uid, context['active_ids'], context=context)
partner_ids = [(lead.partner_id.id, lead.partner_id and lead.partner_id.email or lead.email_from) for lead in active_leads]
partners_duplicated_leads = {}
for partner_id, email in partner_ids:
duplicated_leads = self._get_duplicated_leads(cr, uid, partner_id, email)
if len(duplicated_leads) > 1:
partners_duplicated_leads.setdefault((partner_id, email), []).extend(duplicated_leads)
leads_with_duplicates = []
for lead in active_leads:
lead_tuple = (lead.partner_id.id, lead.partner_id.email if lead.partner_id else lead.email_from)
if len(partners_duplicated_leads.get(lead_tuple, [])) > 1:
leads_with_duplicates.append(lead.id)
return {'value': {'opportunity_ids': leads_with_duplicates}}
def _convert_opportunity(self, cr, uid, ids, vals, context=None):
"""
When "massively" (more than one at a time) converting leads to
opportunities, check the salesteam_id and salesmen_ids and update
the values before calling super.
"""
if context is None:
context = {}
data = self.browse(cr, uid, ids, context=context)[0]
salesteam_id = data.section_id and data.section_id.id or False
salesmen_ids = []
if data.user_ids:
salesmen_ids = [x.id for x in data.user_ids]
vals.update({'user_ids': salesmen_ids, 'section_id': salesteam_id})
return super(crm_lead2opportunity_mass_convert, self)._convert_opportunity(cr, uid, ids, vals, context=context)
def mass_convert(self, cr, uid, ids, context=None):
data = self.browse(cr, uid, ids, context=context)[0]
ctx = dict(context)
if data.name == 'convert' and data.deduplicate:
merged_lead_ids = []
remaining_lead_ids = []
lead_selected = context.get('active_ids', [])
for lead_id in lead_selected:
if lead_id not in merged_lead_ids:
lead = self.pool['crm.lead'].browse(cr, uid, lead_id, context=context)
duplicated_lead_ids = self._get_duplicated_leads(cr, uid, lead.partner_id.id, lead.partner_id and lead.partner_id.email or lead.email_from)
if len(duplicated_lead_ids) > 1:
lead_id = self.pool.get('crm.lead').merge_opportunity(cr, uid, duplicated_lead_ids, False, False, context=context)
merged_lead_ids.extend(duplicated_lead_ids)
remaining_lead_ids.append(lead_id)
active_ids = set(context.get('active_ids', []))
active_ids = active_ids.difference(merged_lead_ids)
active_ids = active_ids.union(remaining_lead_ids)
ctx['active_ids'] = list(active_ids)
ctx['no_force_assignation'] = context.get('no_force_assignation', not data.force_assignation)
return self.action_apply(cr, uid, ids, context=ctx)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
nnaabbcc/exercise | python/Helloword.py | 1 | 1449 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# This is demo for Hello word, not Hello World. just kidding
# 一些中文的注释也是可以用的
import os
def main():
print 'Hello word, not Hello world, just kiding'
# This is single line comments
print "This is in single quote, okay"
print "This is also okay. in double quote"
''' This is multi line comments
second line of the comments
'''
foo(5, 10) # This is also comments, call a function
print "Hello " * 10
hello10 = 'Hello ' * 20
print hello10
print '当前目录是: ' + os.getcwd() # utf-8的文件可以使用中文
# 变量需要先声明, 后使用
counter = 0
counter += 1
# 看看循环是怎么用的
foods = ['苹果', '李子', '杏子', '梨'] # 列表类型
for food in foods: # for loop should end of :
print 'I love ' + food
print 'Count to 10'
for i in range(10):
print i
def foo(param1, param2):
res = param1 + param2
print '%s + %s = %s' % (param1, param2, res)
# Let's do some if
if param1 < 5:
print '%s smaller than 5' % param1
elif param1 > 7 and (param2 < 10 or res < 15):
print "I don't know something"
else:
print "Papa"
return res
if __name__ == '__main__':
print 'main() is not main, here is the main'
main()
print 'This is python'
| mit |
parag2489/Image-Quality | referenceCNN_color_imageQuality_regressMOS.py | 1 | 18087 | import pdb
from keras.models import Sequential, Graph
from keras.layers.core import Dense, Dropout, Activation, Flatten, Reshape, Lambda
from keras.layers.convolutional import Convolution1D, Convolution2D, MaxPooling2D
# from keras.layers.normalization import BatchNormalization
# from keras.layers.advanced_activations import LeakyReLU
from keras.optimizers import SGD, RMSprop, Adam
from keras.layers.core import Merge
from keras.regularizers import l2, activity_l2
import sys
import numpy as np
import scipy
import theano
from keras.layers.convolutional import ZeroPadding2D
# from scipy import io
from keras import backend as K
import h5py
from keras.utils import np_utils
import time
import cv2
from keras import callbacks
from keras.callbacks import ModelCheckpoint, EarlyStopping
from decimal import Decimal
mySeed = sys.argv[1]
np.random.seed(int(float(mySeed)))
doWeightLoadSaveTest = True
patchHeight = 32
patchWidth = 32
channels = 3
learningRate = 0.005
regularizer = 0.0005
initialization = "he_normal"
# leak = 1./3. # for PReLU()
Numepochs = 75
batchSize = 50
validateAfterEpochs = 1
nb_output = 1
TrainFilesPath = '/media/ASUAD\pchandak/Seagate Expansion Drive/imageQuality_mulitPatchBackup_Apr23/imageQuality_HDF5Files_Apr20/hdf5Files_train/'
ValFilesPath = '/media/ASUAD\pchandak/Seagate Expansion Drive/imageQuality_mulitPatchBackup_Apr23/imageQuality_HDF5Files_Apr20/hdf5Files_val/'
TestFilesPath = '/media/ASUAD\pchandak/Seagate Expansion Drive/imageQuality_mulitPatchBackup_Apr23/imageQuality_HDF5Files_Apr20/hdf5Files_test/'
# logger = '/media/AccessParag/Code/DNN_imageQuality_regression_Apr20_corrlnLoss_lowKernels.txt'
weightSavePath = '/media/AccessParag/Code/weights_MOSRegress/'
class myCallback(callbacks.Callback):
def on_epoch_begin(self, epoch, logs={}):
# pdb.set_trace()
if epoch == 0:
self.best_mean_corr = -np.inf
self.metric = []
def on_epoch_end(self, epoch, logs={}):
model.save_weights(weightSavePath + "bestWeights_referenceCNN_latestModel.h5",overwrite=True)
if modelIndex == 1:
predictedScoresVal = np.ravel((model.predict({'input': valData},batch_size=batchSize)).get('output'))
else:
predictedScoresVal = np.ravel(model.predict(valData,batch_size=batchSize))
sroccVal = scipy.stats.spearmanr(predictedScoresVal, valLabels)
plccVal = scipy.stats.pearsonr(predictedScoresVal, valLabels)
t_str_val = '\nSpearman corr for validation set is ' + str(sroccVal[0]) + '\nPearson corr for validation set is '+ str(plccVal[0]) + '\nMean absolute error for validation set is ' + str(np.mean(np.abs(predictedScoresVal-valLabels))) + '\n'
print t_str_val
mean_corr = sroccVal[0] + plccVal[0]
if mean_corr > self.best_mean_corr:
self.best_mean_corr = mean_corr
model.save_weights(weightSavePath + "bestWeights_referenceCNN_bestCorr.h5",overwrite=True)
print("Best correlation model saved at Epoch " + str(epoch) + '\n')
if modelIndex == 1:
predictedScoresTest = np.ravel((model.predict({'input': testData},batch_size=batchSize)).get('output'))
else:
predictedScoresTest = np.ravel(model.predict(testData,batch_size=batchSize))
sroccTest = scipy.stats.spearmanr(predictedScoresTest, testLabels)
plccTest = scipy.stats.pearsonr(predictedScoresTest, testLabels)
t_str_test = '\nSpearman corr for test set is ' + str(sroccTest[0]) + '\nPearson corr for test set is '+ str(plccTest[0]) + '\nMean absolute error for test set is ' + str(np.mean(np.abs(predictedScoresTest-testLabels))) + '\n'
print t_str_test
self.metric.append(logs.get("val_loss"))
if epoch % 10 == 0 and epoch != 0:
model.optimizer.lr.set_value(round(Decimal(0.5*model.optimizer.lr.get_value()),8))
learningRate = model.optimizer.lr.get_value()
# print("")
print("The current learning rate is: " + str(learningRate) + '\n')
def min_pool_inp(x):
return -x
def linear_correlation_loss(y_true, y_pred):
mean_y_true = K.mean(y_true)
mean_y_pred = K.mean(y_pred)
std_y_true = K.std(y_true)+1e-6
std_y_pred = K.std(y_pred)+1e-6
nSamples = K.shape(y_true)[0]
firstTerm = (y_true - mean_y_true)/std_y_true
secondTerm = (y_pred - mean_y_pred)/std_y_pred
pearsonCorr = K.sum(firstTerm*secondTerm)/(nSamples-1)
pearsonCorr = K.clip(pearsonCorr,-1.,1.)
maeLoss = K.mean(K.abs(y_true-y_pred))
# loss = 1./(0.1+K.exp(-0.5*K.log(maeLoss+(1-pearsonCorr))))
loss = (1./(0.1+K.exp(-0.5*K.log(maeLoss))))*(2-pearsonCorr)
return loss
def constructDNNModel(modelIndex):
model = []
if modelIndex == 1: # CVPR'14 CNN
model = Graph()
model.add_input(name='input', input_shape=(channels, patchHeight, patchWidth))
model.add_node(Convolution2D(50, 7, 7, init=initialization, activation='linear', border_mode='valid',
input_shape=(1, 32, 32)), name='conv1', input='input')
model.add_node(MaxPooling2D(pool_size=(26, 26)), name='max_pool', input='conv1')
model.add_node(Flatten(), name='flat_max', input='max_pool')
model.add_node(layer=Lambda(min_pool_inp, output_shape=(50, 26, 26)), name='invert_val', input='conv1')
model.add_node(MaxPooling2D(pool_size=(26, 26)), name='min_pool', input='invert_val')
model.add_node(Flatten(), name='flat_min', input='min_pool')
model.add_node(Dense(800, init=initialization, activation='relu'), name='dense1',
inputs=['flat_max', 'flat_min'], merge_mode='concat')
model.add_node(Dense(800, init=initialization, activation='relu'), name='dense2', input='dense1')
model.add_node(Dropout(0.5), name='dropout2', input='dense2')
model.add_node(Dense(1, activation='linear'), name='output', input='dropout2', create_output=True)
# print model.get_config()
print("Model params = " + str(model.count_params()))
sgd = SGD(lr=learningRate, momentum=0.9, decay=1e-6, Nesterov=True)
model.compile(loss={'output':'mae'},optimizer=sgd)
print 'Finsihed compiling the model. No error in model construction'
#
print '......Starting training .........\n\n'
elif modelIndex == 2: # train_imageQuality_regressMOS_loweKernels.py
model = Sequential()
model.add(Activation('linear',input_shape=(channels,patchHeight,patchWidth))) # 32
model.add(Convolution2D(48, 3, 3, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu")) # 30
model.add(Convolution2D(48, 3, 3, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu")) # 28
model.add(Convolution2D(48, 3, 3, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu")) # 26
model.add(MaxPooling2D(pool_size=(2,2),strides=(1,1))) # 25
# ------------------------------------------------------------------------------------------------------------------------------------------------ #
model.add(Convolution2D(48, 3, 3, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu")) # 23
model.add(Convolution2D(48, 3, 3, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu")) # 21
model.add(Convolution2D(48, 3, 3, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu")) # 19
model.add(MaxPooling2D(pool_size=(2,2),strides=(1,1))) # 18
# ------------------------------------------------------------------------------------------------------------------------------------------------ #
model.add(Convolution2D(64, 3, 3, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu")) # 16
model.add(Convolution2D(64, 3, 3, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu")) # 14
model.add(Convolution2D(64, 3, 3, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu")) # 12
model.add(MaxPooling2D(pool_size=(2,2),strides=(1,1))) # 11
# ------------------------------------------------------------------------------------------------------------------------------------------------ #
model.add(Convolution2D(64, 3, 3, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu")) # 9
model.add(Convolution2D(64, 3, 3, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu")) # 7
model.add(Convolution2D(64, 3, 3, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu")) # 5
model.add(MaxPooling2D(pool_size=(2,2))) # 2
# ------------------------------------------------------------------------------------------------------------------------------------------------ #
model.add(Flatten())
# model.add(Dropout(0.25))
model.add(Dense(800, trainable=True, init=initialization, W_regularizer=l2(regularizer), activation = "relu"))
model.add(Dropout(0.5))
model.add(Dense(800, trainable=True, init=initialization, W_regularizer=l2(regularizer), activation = "relu"))
model.add(Dropout(0.5))
model.add(Dense(nb_output, trainable=True, init=initialization, W_regularizer=l2(regularizer), activation = "linear"))
print("Built the model")
# ------------------------------------------------------------------------------------------------------------------------------------------------ #
if doWeightLoadSaveTest:
# pdb.set_trace()
model.save_weights(weightSavePath + 'weightsLoadSaveTest.h5', overwrite=True)
model.load_weights(weightSavePath + 'weightsLoadSaveTest.h5')
print("Weight load/save test passed...")
# model.load_weights('/media/AccessParag/Code/weights/bestWeightsAtEpoch_000.h5')
# print("Weights at Epoch 0 loaded")
# ------------------------------------------------------------------------------------------------------------------------------------------------ #
sgd = SGD(lr=learningRate, decay=1e-6, momentum=0.9, nesterov=True)
# adam = Adam(lr=learningRate, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
model.compile(loss=linear_correlation_loss, optimizer=sgd)
print("Compilation Finished")
elif modelIndex == 3: # train_imageQuality_regressMOS_loweKernels.py
model = Sequential()
model.add(Activation('linear',input_shape=(channels,patchHeight,patchWidth))) # 32
model.add(Convolution2D(48, 3, 3, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu")) # 30
model.add(Convolution2D(48, 3, 3, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu")) # 28
model.add(Convolution2D(48, 3, 3, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu")) # 26
model.add(MaxPooling2D(pool_size=(2,2),strides=(1,1))) # 25
# ------------------------------------------------------------------------------------------------------------------------------------------------ #
model.add(Convolution2D(64, 3, 3, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu")) # 23
model.add(Convolution2D(64, 3, 3, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu")) # 21
model.add(Convolution2D(64, 3, 3, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu")) # 19
model.add(MaxPooling2D(pool_size=(2,2),strides=(1,1))) # 18
# ------------------------------------------------------------------------------------------------------------------------------------------------ #
model.add(Convolution2D(64, 3, 3, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu")) # 16
model.add(Convolution2D(64, 3, 3, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu")) # 14
model.add(Convolution2D(64, 3, 3, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu")) # 12
model.add(MaxPooling2D(pool_size=(2,2),strides=(1,1))) # 11
# ------------------------------------------------------------------------------------------------------------------------------------------------ #
model.add(Convolution2D(128, 3, 3, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu")) # 9
model.add(Convolution2D(128, 3, 3, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu")) # 7
model.add(Convolution2D(128, 3, 3, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu")) # 5
model.add(MaxPooling2D(pool_size=(2,2))) # 2
# ------------------------------------------------------------------------------------------------------------------------------------------------ #
model.add(Flatten())
# model.add(Dropout(0.25))
model.add(Dense(800, trainable=True, init=initialization, W_regularizer=l2(regularizer), activation = "relu"))
model.add(Dropout(0.5))
model.add(Dense(800, trainable=True, init=initialization, W_regularizer=l2(regularizer), activation = "relu"))
model.add(Dropout(0.5))
model.add(Dense(nb_output, trainable=True, init=initialization, W_regularizer=l2(regularizer), activation = "linear"))
print("Built the model")
# ------------------------------------------------------------------------------------------------------------------------------------------------ #
if doWeightLoadSaveTest:
# pdb.set_trace()
model.save_weights(weightSavePath + 'weightsLoadSaveTest.h5', overwrite=True)
model.load_weights(weightSavePath + 'weightsLoadSaveTest.h5')
print("Weight load/save test passed...")
# ------------------------------------------------------------------------------------------------------------------------------------------------ #
sgd = SGD(lr=learningRate, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss=linear_correlation_loss, optimizer=sgd)
print("Compilation Finished")
return model
print('Parameters that will be used')
print("---------------------------------------------------------------------------------")
print("**Image Sizes**")
print("Image Height : "+str(patchHeight))
print("Image Width : "+str(patchWidth))
print("Image Channels: "+str(channels))
print("\n")
print("**Network Parameters**")
print("Learning Rate : "+str(learningRate))
print("Regularizer : "+str(regularizer))
print("Initialization : "+initialization)
print("\n")
print("**Run Variables**")
print("Total # of epochs : "+str(Numepochs))
print("# samples per batch : "+str(batchSize))
print("Validate After Epochs : "+str(validateAfterEpochs))
print("\n")
print("**Files Path**")
print("Trainig Files Path : "+TrainFilesPath)
print("Valid Files Path : "+ValFilesPath)
print("Weights Save Path : "+weightSavePath)
print("\n")
print("---------------------------------------------------------------------------------")
cb = myCallback()
terminateTraining = EarlyStopping(monitor='val_loss', patience=20, verbose=1, mode='auto')
checkpointer = ModelCheckpoint(filepath = weightSavePath + 'bestWeights_referenceCNN_valLoss.h5', verbose=1, save_best_only=True)
# ------------------------------------------------------------------------------------------------------------------------------------------------ #
hdfFileTrain = h5py.File(TrainFilesPath + "QualityRegressMOS_data_March31.h5","r")
trainData = hdfFileTrain["data"][:]
trainLabels = hdfFileTrain["labels"][:]
hdfFileVal = h5py.File(ValFilesPath + "QualityRegressMOS_data_March31.h5","r")
valData = hdfFileVal["data"][:]
valLabels = hdfFileVal["labels"][:]
hdfFileTest = h5py.File(TestFilesPath + "QualityRegressMOS_data_March31.h5","r")
testData = hdfFileTest["data"][:]
testLabels = hdfFileTest["labels"][:]
modelIndex = int(float(sys.argv[2]))
model = constructDNNModel(modelIndex)
if modelIndex == 1:
model.fit({'input':trainData, 'output':trainLabels}, batch_size=batchSize, nb_epoch=Numepochs, verbose=0, validation_data={'input':valData, 'output': valLabels},shuffle=True,callbacks=[checkpointer,cb,terminateTraining])
else:
model.fit(trainData,trainLabels,batch_size=batchSize,nb_epoch=Numepochs,verbose=1,callbacks=[cb,checkpointer,terminateTraining],validation_data=(valData,valLabels),shuffle=True,show_accuracy=False)
# pdb.set_trace() | mit |
opennode/nodeconductor-openstack | src/waldur_openstack/openstack_tenant/migrations/0034_immutable_default_json.py | 1 | 1296 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2018-07-03 17:09
from __future__ import unicode_literals
from django.db import migrations
import waldur_core.core.fields
class Migration(migrations.Migration):
dependencies = [
('openstack_tenant', '0033_unique_instance_backend_id'),
]
operations = [
migrations.AlterField(
model_name='instance',
name='action_details',
field=waldur_core.core.fields.JSONField(default=dict),
),
migrations.AlterField(
model_name='snapshot',
name='action_details',
field=waldur_core.core.fields.JSONField(default=dict),
),
migrations.AlterField(
model_name='subnet',
name='allocation_pools',
field=waldur_core.core.fields.JSONField(default=dict),
),
migrations.AlterField(
model_name='subnet',
name='dns_nameservers',
field=waldur_core.core.fields.JSONField(default=list, help_text='List of DNS name servers associated with the subnet.'),
),
migrations.AlterField(
model_name='volume',
name='action_details',
field=waldur_core.core.fields.JSONField(default=dict),
),
]
| mit |
areski/django | tests/template_tests/filter_tests/test_truncatewords.py | 235 | 1755 | from django.template.defaultfilters import truncatewords
from django.test import SimpleTestCase
from django.utils.safestring import mark_safe
from ..utils import setup
class TruncatewordsTests(SimpleTestCase):
@setup({
'truncatewords01': '{% autoescape off %}{{ a|truncatewords:"2" }} {{ b|truncatewords:"2"}}{% endautoescape %}'
})
def test_truncatewords01(self):
output = self.engine.render_to_string(
'truncatewords01', {'a': 'alpha & bravo', 'b': mark_safe('alpha & bravo')}
)
self.assertEqual(output, 'alpha & ... alpha & ...')
@setup({'truncatewords02': '{{ a|truncatewords:"2" }} {{ b|truncatewords:"2"}}'})
def test_truncatewords02(self):
output = self.engine.render_to_string(
'truncatewords02', {'a': 'alpha & bravo', 'b': mark_safe('alpha & bravo')}
)
self.assertEqual(output, 'alpha & ... alpha & ...')
class FunctionTests(SimpleTestCase):
def test_truncate(self):
self.assertEqual(truncatewords('A sentence with a few words in it', 1), 'A ...')
def test_truncate2(self):
self.assertEqual(
truncatewords('A sentence with a few words in it', 5),
'A sentence with a few ...',
)
def test_overtruncate(self):
self.assertEqual(
truncatewords('A sentence with a few words in it', 100),
'A sentence with a few words in it',
)
def test_invalid_number(self):
self.assertEqual(
truncatewords('A sentence with a few words in it', 'not a number'),
'A sentence with a few words in it',
)
def test_non_string_input(self):
self.assertEqual(truncatewords(123, 2), '123')
| bsd-3-clause |
171121130/SWI | app/manager/forms.py | 1 | 1950 | from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField, SubmitField, SelectField, RadioField
from wtforms.validators import Required, Length, Email, Regexp, EqualTo
from wtforms import ValidationError
from ..models import User
class UserForm(FlaskForm):
username = StringField('用户名', validators=[
Required(), Length(1, 64), Regexp('^[A-Za-z][A-Za-z0-9_.]*$', 0,
'Usernames must have only letters, '
'numbers, dots or underscores')])
email = StringField('邮箱', validators=[Required(), Length(1, 64), Email()])
submit = SubmitField('提交')
def validate_email(self, field):
if User.query.filter_by(email=field.data).first():
raise ValidationError('邮箱已被注册')
def validate_username(self, field):
if User.query.filter_by(username=field.data).first():
raise ValidationError('用户名已被使用')
class SearchuserForm(FlaskForm):
username = StringField('用户名', validators=[
Required(), Length(1, 64), Regexp('^[A-Za-z][A-Za-z0-9_.]*$', 0,
'Usernames must have only letters, '
'numbers, dots or underscores')])
add = SubmitField('添加')
submit = SubmitField('搜索')
class SearchuserForm2(FlaskForm):
username = StringField('用户名', validators=[
Required(), Length(1, 64), Regexp('^[A-Za-z][A-Za-z0-9_.]*$', 0,
'Usernames must have only letters, '
'numbers, dots or underscores')])
submit = SubmitField('搜索')
submit2 = SubmitField('下载excel表格')
# class ListForm(FlaskForm):
# apply = BooleanField('Validation Apply')
# validation = BooleanField('Validation')
#
# submit = SubmitField('Search') | mit |
riveridea/gnuradio | grc/gui/Block.py | 6 | 8155 | """
Copyright 2007, 2008, 2009 Free Software Foundation, Inc.
This file is part of GNU Radio
GNU Radio Companion is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
GNU Radio Companion is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
"""
from Element import Element
import Utils
import Colors
from .. base import odict
from Constants import BORDER_PROXIMITY_SENSITIVITY
from Constants import \
BLOCK_LABEL_PADDING, \
PORT_SEPARATION, LABEL_SEPARATION, \
PORT_BORDER_SEPARATION, POSSIBLE_ROTATIONS
import pygtk
pygtk.require('2.0')
import gtk
import pango
BLOCK_MARKUP_TMPL="""\
#set $foreground = $block.is_valid() and 'black' or 'red'
<span foreground="$foreground" font_desc="Sans 8"><b>$encode($block.get_name())</b></span>"""
class Block(Element):
"""The graphical signal block."""
def __init__(self):
"""
Block contructor.
Add graphics related params to the block.
"""
#add the position param
self.get_params().append(self.get_parent().get_parent().Param(
block=self,
n=odict({
'name': 'GUI Coordinate',
'key': '_coordinate',
'type': 'raw',
'value': '(0, 0)',
'hide': 'all',
})
))
self.get_params().append(self.get_parent().get_parent().Param(
block=self,
n=odict({
'name': 'GUI Rotation',
'key': '_rotation',
'type': 'raw',
'value': '0',
'hide': 'all',
})
))
Element.__init__(self)
def get_coordinate(self):
"""
Get the coordinate from the position param.
Returns:
the coordinate tuple (x, y) or (0, 0) if failure
"""
try: #should evaluate to tuple
coor = eval(self.get_param('_coordinate').get_value())
x, y = map(int, coor)
fgW,fgH = self.get_parent().get_size()
if x <= 0:
x = 0
elif x >= fgW - BORDER_PROXIMITY_SENSITIVITY:
x = fgW - BORDER_PROXIMITY_SENSITIVITY
if y <= 0:
y = 0
elif y >= fgH - BORDER_PROXIMITY_SENSITIVITY:
y = fgH - BORDER_PROXIMITY_SENSITIVITY
return (x, y)
except:
self.set_coordinate((0, 0))
return (0, 0)
def set_coordinate(self, coor):
"""
Set the coordinate into the position param.
Args:
coor: the coordinate tuple (x, y)
"""
self.get_param('_coordinate').set_value(str(coor))
def get_rotation(self):
"""
Get the rotation from the position param.
Returns:
the rotation in degrees or 0 if failure
"""
try: #should evaluate to dict
rotation = eval(self.get_param('_rotation').get_value())
return int(rotation)
except:
self.set_rotation(POSSIBLE_ROTATIONS[0])
return POSSIBLE_ROTATIONS[0]
def set_rotation(self, rot):
"""
Set the rotation into the position param.
Args:
rot: the rotation in degrees
"""
self.get_param('_rotation').set_value(str(rot))
def create_shapes(self):
"""Update the block, parameters, and ports when a change occurs."""
Element.create_shapes(self)
if self.is_horizontal(): self.add_area((0, 0), (self.W, self.H))
elif self.is_vertical(): self.add_area((0, 0), (self.H, self.W))
def create_labels(self):
"""Create the labels for the signal block."""
Element.create_labels(self)
self._bg_color = self.get_enabled() and Colors.BLOCK_ENABLED_COLOR or Colors.BLOCK_DISABLED_COLOR
layouts = list()
#create the main layout
layout = gtk.DrawingArea().create_pango_layout('')
layouts.append(layout)
layout.set_markup(Utils.parse_template(BLOCK_MARKUP_TMPL, block=self))
self.label_width, self.label_height = layout.get_pixel_size()
#display the params
markups = [param.get_markup() for param in self.get_params() if param.get_hide() not in ('all', 'part')]
if markups:
layout = gtk.DrawingArea().create_pango_layout('')
layout.set_spacing(LABEL_SEPARATION*pango.SCALE)
layout.set_markup('\n'.join(markups))
layouts.append(layout)
w,h = layout.get_pixel_size()
self.label_width = max(w, self.label_width)
self.label_height += h + LABEL_SEPARATION
width = self.label_width
height = self.label_height
#setup the pixmap
pixmap = self.get_parent().new_pixmap(width, height)
gc = pixmap.new_gc()
gc.set_foreground(self._bg_color)
pixmap.draw_rectangle(gc, True, 0, 0, width, height)
#draw the layouts
h_off = 0
for i,layout in enumerate(layouts):
w,h = layout.get_pixel_size()
if i == 0: w_off = (width-w)/2
else: w_off = 0
pixmap.draw_layout(gc, w_off, h_off, layout)
h_off = h + h_off + LABEL_SEPARATION
#create vertical and horizontal pixmaps
self.horizontal_label = pixmap
if self.is_vertical():
self.vertical_label = self.get_parent().new_pixmap(height, width)
Utils.rotate_pixmap(gc, self.horizontal_label, self.vertical_label)
#calculate width and height needed
self.W = self.label_width + 2*BLOCK_LABEL_PADDING
self.H = max(*(
[self.label_height+2*BLOCK_LABEL_PADDING] + [2*PORT_BORDER_SEPARATION + \
sum([port.H + PORT_SEPARATION for port in ports]) - PORT_SEPARATION
for ports in (self.get_sources_gui(), self.get_sinks_gui())] +
[4*PORT_BORDER_SEPARATION + \
sum([(port.H) + PORT_SEPARATION for port in ports]) - PORT_SEPARATION
for ports in ([i for i in self.get_sources_gui() if i.get_type() == 'bus'], [i for i in self.get_sinks_gui() if i.get_type() == 'bus'])]
))
def draw(self, gc, window):
"""
Draw the signal block with label and inputs/outputs.
Args:
gc: the graphics context
window: the gtk window to draw on
"""
x, y = self.get_coordinate()
#draw main block
Element.draw(
self, gc, window, bg_color=self._bg_color,
border_color=self.is_highlighted() and Colors.HIGHLIGHT_COLOR or Colors.BORDER_COLOR,
)
#draw label image
if self.is_horizontal():
window.draw_drawable(gc, self.horizontal_label, 0, 0, x+BLOCK_LABEL_PADDING, y+(self.H-self.label_height)/2, -1, -1)
elif self.is_vertical():
window.draw_drawable(gc, self.vertical_label, 0, 0, x+(self.H-self.label_height)/2, y+BLOCK_LABEL_PADDING, -1, -1)
#draw ports
for port in self.get_ports_gui():
port.draw(gc, window)
def what_is_selected(self, coor, coor_m=None):
"""
Get the element that is selected.
Args:
coor: the (x,y) tuple
coor_m: the (x_m, y_m) tuple
Returns:
this block, a port, or None
"""
for port in self.get_ports_gui():
port_selected = port.what_is_selected(coor, coor_m)
if port_selected: return port_selected
return Element.what_is_selected(self, coor, coor_m)
| gpl-3.0 |
sciCloud/OLiMS | lims/tools/bika_services_export.py | 2 | 2289 | from dependencies.dependency import ClassSecurityInfo
from dependencies.dependency import InitializeClass
from dependencies.dependency import SimpleItem
from dependencies.dependency import permissions
from dependencies.dependency import UniqueObject, getToolByName
from lims.tools import ToolFolder
from dependencies.dependency import StringIO
import csv
from lims.interfaces.tools import Ibika_services_export
from dependencies.dependency import implements
class bika_services_export(UniqueObject, SimpleItem):
""" ServicesExportTool """
implements(Ibika_services_export)
security = ClassSecurityInfo()
id = 'bika_services_export'
title = 'Services Export Tool'
description = 'Exports Analysis Service Data.'
meta_type = 'Services Export Tool'
security.declareProtected(permissions.View, 'export_file')
def export_file(self):
plone_view = self.restrictedTraverse('@@plone')
""" create the output file """
delimiter = ','
filename = 'Services.csv'
rows = []
# header labels
header = ['Analysis Category', 'Analysis Service', 'KeyWord', 'InstrumentKey', 'Price', 'bulk discount']
rows.append(header)
for s in self.portal_catalog(portal_type = 'AnalysisService',
sort_on = 'sortable_title'):
service = s.getObject()
# create detail line
detail = [service.getCategoryTitle(), service.Title(), service.getKeyword(), service.getInstrumentKeyword(), service.getPrice(), service.getBulkPrice()]
rows.append(detail)
#convert lists to csv string
ramdisk = StringIO()
writer = csv.writer(ramdisk, delimiter = delimiter, \
quoting = csv.QUOTE_NONNUMERIC)
assert(writer)
writer.writerows(rows)
result = ramdisk.getvalue()
ramdisk.close()
#stream file to browser
setheader = self.REQUEST.RESPONSE.setHeader
setheader('Content-Length', len(result))
setheader('Content-Type',
'text/comma-separated-values')
setheader('Content-Disposition', 'inline; filename=%s' % filename)
self.REQUEST.RESPONSE.write(result)
return
InitializeClass(bika_services_export)
| agpl-3.0 |
blueskycoco/rtt | bsp/stm32l476-nucleo/rtconfig.py | 12 | 3840 | import os
# toolchains options
ARCH='arm'
CPU='cortex-m4'
CROSS_TOOL='gcc'
if os.getenv('RTT_CC'):
CROSS_TOOL = os.getenv('RTT_CC')
if os.getenv('RTT_ROOT'):
RTT_ROOT = os.getenv('RTT_ROOT')
# cross_tool provides the cross compiler
# EXEC_PATH is the compiler execute path, for example, CodeSourcery, Keil MDK, IAR
if CROSS_TOOL == 'gcc':
PLATFORM = 'gcc'
EXEC_PATH = r'C:\Users\james\MentorGraphics\Sourcery_CodeBench_Lite_for_ARM_EABI\bin'
elif CROSS_TOOL == 'keil':
PLATFORM = 'armcc'
EXEC_PATH = r'C:/Keil_v5'
elif CROSS_TOOL == 'iar':
print '================ERROR============================'
print 'Not support iar yet!'
print '================================================='
exit(0)
if os.getenv('RTT_EXEC_PATH'):
EXEC_PATH = os.getenv('RTT_EXEC_PATH')
BUILD = 'debug'
STM32_TYPE = 'STM32F429xx'
if PLATFORM == 'gcc':
# toolchains
PREFIX = 'arm-none-eabi-'
CC = PREFIX + 'gcc'
AS = PREFIX + 'gcc'
AR = PREFIX + 'ar'
CXX = PREFIX + 'g++'
LINK = PREFIX + 'gcc'
TARGET_EXT = 'axf'
SIZE = PREFIX + 'size'
OBJDUMP = PREFIX + 'objdump'
OBJCPY = PREFIX + 'objcopy'
DEVICE = ' -mcpu=cortex-m4 -mthumb -mfpu=fpv4-sp-d16 -mfloat-abi=hard -ffunction-sections -fdata-sections'
CFLAGS = DEVICE + ' -g -Wall -DSTM32F429ZI -DSTM32F429_439xx -D__ASSEMBLY__'
AFLAGS = ' -c' + DEVICE + ' -x assembler-with-cpp -Wa,-mimplicit-it=thumb '
LFLAGS = DEVICE + ' -lm -lgcc -lc' + ' -nostartfiles -Wl,--gc-sections,-Map=rtthread-stm32.map,-cref,-u,Reset_Handler -T stm32_rom.ld'
CPATH = ''
LPATH = ''
if BUILD == 'debug':
CFLAGS += ' -O0 -gdwarf-2'
AFLAGS += ' -gdwarf-2'
else:
CFLAGS += ' -O2'
POST_ACTION = OBJCPY + ' -O binary $TARGET rtthread.bin\n' + SIZE + ' $TARGET \n'
M_CFLAGS = CFLAGS + ' -mlong-calls -Dsourcerygxx -O0 -fPIC '
M_LFLAGS = DEVICE + ' -Wl,-z,max-page-size=0x4 -shared -fPIC -e main -nostdlib'
elif PLATFORM == 'armcc':
# toolchains
CC = 'armcc'
AS = 'armasm'
AR = 'armar'
LINK = 'armlink'
TARGET_EXT = 'axf'
DEVICE = ' --cpu=cortex-m4.fp'
CFLAGS = DEVICE + ' --apcs=interwork -DSTM32F429_439xx'
AFLAGS = DEVICE
LFLAGS = DEVICE + ' --info sizes --info totals --info unused --info veneers --list rtthread-stm32.map --scatter stm32_rom.sct'
CFLAGS += ' -I' + EXEC_PATH + '/ARM/RV31/INC'
LFLAGS += ' --libpath ' + EXEC_PATH + '/ARM/RV31/LIB'
EXEC_PATH += '/arm/bin40/'
if BUILD == 'debug':
CFLAGS += ' -g -O0'
AFLAGS += ' -g'
else:
CFLAGS += ' -O2'
POST_ACTION = 'fromelf --bin $TARGET --output rtthread.bin \nfromelf -z $TARGET'
elif PLATFORM == 'iar':
# toolchains
CC = 'iccarm'
AS = 'iasmarm'
AR = 'iarchive'
LINK = 'ilinkarm'
TARGET_EXT = 'out'
DEVICE = ' -D USE_STDPERIPH_DRIVER' + ' -D STM32F10X_HD'
CFLAGS = DEVICE
CFLAGS += ' --diag_suppress Pa050'
CFLAGS += ' --no_cse'
CFLAGS += ' --no_unroll'
CFLAGS += ' --no_inline'
CFLAGS += ' --no_code_motion'
CFLAGS += ' --no_tbaa'
CFLAGS += ' --no_clustering'
CFLAGS += ' --no_scheduling'
CFLAGS += ' --debug'
CFLAGS += ' --endian=little'
CFLAGS += ' --cpu=Cortex-M4'
CFLAGS += ' -e'
CFLAGS += ' --fpu=None'
CFLAGS += ' --dlib_config "' + IAR_PATH + '/arm/INC/c/DLib_Config_Normal.h"'
CFLAGS += ' -Ol'
CFLAGS += ' --use_c++_inline'
AFLAGS = ''
AFLAGS += ' -s+'
AFLAGS += ' -w+'
AFLAGS += ' -r'
AFLAGS += ' --cpu Cortex-M4'
AFLAGS += ' --fpu None'
LFLAGS = ' --config stm32f10x_flash.icf'
LFLAGS += ' --redirect _Printf=_PrintfTiny'
LFLAGS += ' --redirect _Scanf=_ScanfSmall'
LFLAGS += ' --entry __iar_program_start'
EXEC_PATH = IAR_PATH + '/arm/bin/'
POST_ACTION = ''
| gpl-2.0 |
nvoron23/socialite | jython/Lib/test/test_tcl.py | 75 | 4645 | #!/usr/bin/env python
import unittest
import os
from test import test_support
from Tkinter import Tcl
from _tkinter import TclError
class TclTest(unittest.TestCase):
def setUp(self):
self.interp = Tcl()
def testEval(self):
tcl = self.interp
tcl.eval('set a 1')
self.assertEqual(tcl.eval('set a'),'1')
def testEvalException(self):
tcl = self.interp
self.assertRaises(TclError,tcl.eval,'set a')
def testEvalException2(self):
tcl = self.interp
self.assertRaises(TclError,tcl.eval,'this is wrong')
def testCall(self):
tcl = self.interp
tcl.call('set','a','1')
self.assertEqual(tcl.call('set','a'),'1')
def testCallException(self):
tcl = self.interp
self.assertRaises(TclError,tcl.call,'set','a')
def testCallException2(self):
tcl = self.interp
self.assertRaises(TclError,tcl.call,'this','is','wrong')
def testSetVar(self):
tcl = self.interp
tcl.setvar('a','1')
self.assertEqual(tcl.eval('set a'),'1')
def testSetVarArray(self):
tcl = self.interp
tcl.setvar('a(1)','1')
self.assertEqual(tcl.eval('set a(1)'),'1')
def testGetVar(self):
tcl = self.interp
tcl.eval('set a 1')
self.assertEqual(tcl.getvar('a'),'1')
def testGetVarArray(self):
tcl = self.interp
tcl.eval('set a(1) 1')
self.assertEqual(tcl.getvar('a(1)'),'1')
def testGetVarException(self):
tcl = self.interp
self.assertRaises(TclError,tcl.getvar,'a')
def testGetVarArrayException(self):
tcl = self.interp
self.assertRaises(TclError,tcl.getvar,'a(1)')
def testUnsetVar(self):
tcl = self.interp
tcl.setvar('a',1)
self.assertEqual(tcl.eval('info exists a'),'1')
tcl.unsetvar('a')
self.assertEqual(tcl.eval('info exists a'),'0')
def testUnsetVarArray(self):
tcl = self.interp
tcl.setvar('a(1)',1)
tcl.setvar('a(2)',2)
self.assertEqual(tcl.eval('info exists a(1)'),'1')
self.assertEqual(tcl.eval('info exists a(2)'),'1')
tcl.unsetvar('a(1)')
self.assertEqual(tcl.eval('info exists a(1)'),'0')
self.assertEqual(tcl.eval('info exists a(2)'),'1')
def testUnsetVarException(self):
tcl = self.interp
self.assertRaises(TclError,tcl.unsetvar,'a')
def testEvalFile(self):
tcl = self.interp
filename = "testEvalFile.tcl"
fd = open(filename,'w')
script = """set a 1
set b 2
set c [ expr $a + $b ]
"""
fd.write(script)
fd.close()
tcl.evalfile(filename)
os.remove(filename)
self.assertEqual(tcl.eval('set a'),'1')
self.assertEqual(tcl.eval('set b'),'2')
self.assertEqual(tcl.eval('set c'),'3')
def testEvalFileException(self):
tcl = self.interp
filename = "doesnotexists"
try:
os.remove(filename)
except Exception,e:
pass
self.assertRaises(TclError,tcl.evalfile,filename)
def testPackageRequireException(self):
tcl = self.interp
self.assertRaises(TclError,tcl.eval,'package require DNE')
def testLoadTk(self):
import os
if 'DISPLAY' not in os.environ:
# skipping test of clean upgradeability
return
tcl = Tcl()
self.assertRaises(TclError,tcl.winfo_geometry)
tcl.loadtk()
self.assertEqual('1x1+0+0', tcl.winfo_geometry())
tcl.destroy()
def testLoadTkFailure(self):
import os
old_display = None
import sys
if sys.platform.startswith(('win', 'darwin', 'cygwin')):
return # no failure possible on windows?
if 'DISPLAY' in os.environ:
old_display = os.environ['DISPLAY']
del os.environ['DISPLAY']
# on some platforms, deleting environment variables
# doesn't actually carry through to the process level
# because they don't support unsetenv
# If that's the case, abort.
display = os.popen('echo $DISPLAY').read().strip()
if display:
return
try:
tcl = Tcl()
self.assertRaises(TclError, tcl.winfo_geometry)
self.assertRaises(TclError, tcl.loadtk)
finally:
if old_display is not None:
os.environ['DISPLAY'] = old_display
def test_main():
test_support.run_unittest(TclTest)
if __name__ == "__main__":
test_main()
| apache-2.0 |
kustodian/ansible | lib/ansible/plugins/connection/lxd.py | 74 | 4629 | # (c) 2016 Matt Clay <matt@mystile.com>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
author: Matt Clay <matt@mystile.com>
connection: lxd
short_description: Run tasks in lxc containers via lxc CLI
description:
- Run commands or put/fetch files to an existing lxc container using lxc CLI
version_added: "2.0"
options:
remote_addr:
description:
- Container identifier
default: inventory_hostname
vars:
- name: ansible_host
- name: ansible_lxd_host
executable:
description:
- shell to use for execution inside container
default: /bin/sh
vars:
- name: ansible_executable
- name: ansible_lxd_executable
"""
import os
from distutils.spawn import find_executable
from subprocess import Popen, PIPE
from ansible.errors import AnsibleError, AnsibleConnectionFailure, AnsibleFileNotFound
from ansible.module_utils._text import to_bytes, to_text
from ansible.plugins.connection import ConnectionBase
class Connection(ConnectionBase):
""" lxd based connections """
transport = "lxd"
has_pipelining = True
default_user = 'root'
def __init__(self, play_context, new_stdin, *args, **kwargs):
super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
self._host = self._play_context.remote_addr
self._lxc_cmd = find_executable("lxc")
if not self._lxc_cmd:
raise AnsibleError("lxc command not found in PATH")
if self._play_context.remote_user is not None and self._play_context.remote_user != 'root':
self._display.warning('lxd does not support remote_user, using container default: root')
def _connect(self):
"""connect to lxd (nothing to do here) """
super(Connection, self)._connect()
if not self._connected:
self._display.vvv(u"ESTABLISH LXD CONNECTION FOR USER: root", host=self._host)
self._connected = True
def exec_command(self, cmd, in_data=None, sudoable=True):
""" execute a command on the lxd host """
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
self._display.vvv(u"EXEC {0}".format(cmd), host=self._host)
local_cmd = [self._lxc_cmd, "exec", self._host, "--", self._play_context.executable, "-c", cmd]
local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
in_data = to_bytes(in_data, errors='surrogate_or_strict', nonstring='passthru')
process = Popen(local_cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
stdout, stderr = process.communicate(in_data)
stdout = to_text(stdout)
stderr = to_text(stderr)
if stderr == "error: Container is not running.\n":
raise AnsibleConnectionFailure("container not running: %s" % self._host)
if stderr == "error: not found\n":
raise AnsibleConnectionFailure("container not found: %s" % self._host)
return process.returncode, stdout, stderr
def put_file(self, in_path, out_path):
""" put a file from local to lxd """
super(Connection, self).put_file(in_path, out_path)
self._display.vvv(u"PUT {0} TO {1}".format(in_path, out_path), host=self._host)
if not os.path.isfile(to_bytes(in_path, errors='surrogate_or_strict')):
raise AnsibleFileNotFound("input path is not a file: %s" % in_path)
local_cmd = [self._lxc_cmd, "file", "push", in_path, self._host + "/" + out_path]
local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
process = Popen(local_cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
process.communicate()
def fetch_file(self, in_path, out_path):
""" fetch a file from lxd to local """
super(Connection, self).fetch_file(in_path, out_path)
self._display.vvv(u"FETCH {0} TO {1}".format(in_path, out_path), host=self._host)
local_cmd = [self._lxc_cmd, "file", "pull", self._host + "/" + in_path, out_path]
local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
process = Popen(local_cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
process.communicate()
def close(self):
""" close the connection (nothing to do here) """
super(Connection, self).close()
self._connected = False
| gpl-3.0 |
KimNorgaard/ansible-modules-extras | cloud/vmware/vmware_cluster.py | 71 | 9862 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Joseph Callen <jcallen () csc.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: vmware_cluster
short_description: Create VMware vSphere Cluster
description:
- Create VMware vSphere Cluster
version_added: 2.0
author: Joseph Callen (@jcpowermac)
notes:
requirements:
- Tested on ESXi 5.5
- PyVmomi installed
options:
datacenter_name:
description:
- The name of the datacenter the cluster will be created in.
required: True
cluster_name:
description:
- The name of the cluster that will be created
required: True
enable_ha:
description:
- If set to True will enable HA when the cluster is created.
required: False
default: False
enable_drs:
description:
- If set to True will enable DRS when the cluster is created.
required: False
default: False
enable_vsan:
description:
- If set to True will enable vSAN when the cluster is created.
required: False
default: False
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = '''
# Example vmware_cluster command from Ansible Playbooks
- name: Create Cluster
local_action: >
vmware_cluster
hostname="{{ ansible_ssh_host }}" username=root password=vmware
datacenter_name="datacenter"
cluster_name="cluster"
enable_ha=True
enable_drs=True
enable_vsan=True
'''
try:
from pyVmomi import vim, vmodl
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
class VMwareCluster(object):
def __init__(self, module):
self.module = module
self.enable_ha = module.params['enable_ha']
self.enable_drs = module.params['enable_drs']
self.enable_vsan = module.params['enable_vsan']
self.cluster_name = module.params['cluster_name']
self.desired_state = module.params['state']
self.datacenter = None
self.cluster = None
self.content = connect_to_api(module)
self.datacenter_name = module.params['datacenter_name']
def process_state(self):
cluster_states = {
'absent': {
'present': self.state_destroy_cluster,
'absent': self.state_exit_unchanged,
},
'present': {
'update': self.state_update_cluster,
'present': self.state_exit_unchanged,
'absent': self.state_create_cluster,
}
}
current_state = self.check_cluster_configuration()
# Based on the desired_state and the current_state call
# the appropriate method from the dictionary
cluster_states[self.desired_state][current_state]()
def configure_ha(self):
das_config = vim.cluster.DasConfigInfo()
das_config.enabled = self.enable_ha
das_config.admissionControlPolicy = vim.cluster.FailoverLevelAdmissionControlPolicy()
das_config.admissionControlPolicy.failoverLevel = 2
return das_config
def configure_drs(self):
drs_config = vim.cluster.DrsConfigInfo()
drs_config.enabled = self.enable_drs
# Set to partially automated
drs_config.vmotionRate = 3
return drs_config
def configure_vsan(self):
vsan_config = vim.vsan.cluster.ConfigInfo()
vsan_config.enabled = self.enable_vsan
vsan_config.defaultConfig = vim.vsan.cluster.ConfigInfo.HostDefaultInfo()
vsan_config.defaultConfig.autoClaimStorage = False
return vsan_config
def state_create_cluster(self):
try:
cluster_config_spec = vim.cluster.ConfigSpecEx()
cluster_config_spec.dasConfig = self.configure_ha()
cluster_config_spec.drsConfig = self.configure_drs()
if self.enable_vsan:
cluster_config_spec.vsanConfig = self.configure_vsan()
if not self.module.check_mode:
self.datacenter.hostFolder.CreateClusterEx(self.cluster_name, cluster_config_spec)
self.module.exit_json(changed=True)
except vim.fault.DuplicateName:
self.module.fail_json(msg="A cluster with the name %s already exists" % self.cluster_name)
except vmodl.fault.InvalidArgument:
self.module.fail_json(msg="Cluster configuration specification parameter is invalid")
except vim.fault.InvalidName:
self.module.fail_json(msg="%s is an invalid name for a cluster" % self.cluster_name)
except vmodl.fault.NotSupported:
# This should never happen
self.module.fail_json(msg="Trying to create a cluster on an incorrect folder object")
except vmodl.RuntimeFault as runtime_fault:
self.module.fail_json(msg=runtime_fault.msg)
except vmodl.MethodFault as method_fault:
# This should never happen either
self.module.fail_json(msg=method_fault.msg)
def state_destroy_cluster(self):
changed = True
result = None
try:
if not self.module.check_mode:
task = self.cluster.Destroy_Task()
changed, result = wait_for_task(task)
self.module.exit_json(changed=changed, result=result)
except vim.fault.VimFault as vim_fault:
self.module.fail_json(msg=vim_fault.msg)
except vmodl.RuntimeFault as runtime_fault:
self.module.fail_json(msg=runtime_fault.msg)
except vmodl.MethodFault as method_fault:
self.module.fail_json(msg=method_fault.msg)
def state_exit_unchanged(self):
self.module.exit_json(changed=False)
def state_update_cluster(self):
cluster_config_spec = vim.cluster.ConfigSpecEx()
changed = True
result = None
if self.cluster.configurationEx.dasConfig.enabled != self.enable_ha:
cluster_config_spec.dasConfig = self.configure_ha()
if self.cluster.configurationEx.drsConfig.enabled != self.enable_drs:
cluster_config_spec.drsConfig = self.configure_drs()
if self.cluster.configurationEx.vsanConfigInfo.enabled != self.enable_vsan:
cluster_config_spec.vsanConfig = self.configure_vsan()
try:
if not self.module.check_mode:
task = self.cluster.ReconfigureComputeResource_Task(cluster_config_spec, True)
changed, result = wait_for_task(task)
self.module.exit_json(changed=changed, result=result)
except vmodl.RuntimeFault as runtime_fault:
self.module.fail_json(msg=runtime_fault.msg)
except vmodl.MethodFault as method_fault:
self.module.fail_json(msg=method_fault.msg)
except TaskError as task_e:
self.module.fail_json(msg=str(task_e))
def check_cluster_configuration(self):
try:
self.datacenter = find_datacenter_by_name(self.content, self.datacenter_name)
if self.datacenter is None:
self.module.fail_json(msg="Datacenter %s does not exist, "
"please create first with Ansible Module vmware_datacenter or manually."
% self.datacenter_name)
self.cluster = find_cluster_by_name_datacenter(self.datacenter, self.cluster_name)
if self.cluster is None:
return 'absent'
else:
desired_state = (self.enable_ha,
self.enable_drs,
self.enable_vsan)
current_state = (self.cluster.configurationEx.dasConfig.enabled,
self.cluster.configurationEx.drsConfig.enabled,
self.cluster.configurationEx.vsanConfigInfo.enabled)
if cmp(desired_state, current_state) != 0:
return 'update'
else:
return 'present'
except vmodl.RuntimeFault as runtime_fault:
self.module.fail_json(msg=runtime_fault.msg)
except vmodl.MethodFault as method_fault:
self.module.fail_json(msg=method_fault.msg)
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(dict(datacenter_name=dict(required=True, type='str'),
cluster_name=dict(required=True, type='str'),
enable_ha=dict(default=False, required=False, type='bool'),
enable_drs=dict(default=False, required=False, type='bool'),
enable_vsan=dict(default=False, required=False, type='bool'),
state=dict(default='present', choices=['present', 'absent'], type='str')))
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
if not HAS_PYVMOMI:
module.fail_json(msg='pyvmomi is required for this module')
vmware_cluster = VMwareCluster(module)
vmware_cluster.process_state()
from ansible.module_utils.vmware import *
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
jvenezia/heroku-buildpack-python | vendor/pip-pop/pip/_vendor/distlib/_backport/tarfile.py | 1005 | 92627 | #-------------------------------------------------------------------
# tarfile.py
#-------------------------------------------------------------------
# Copyright (C) 2002 Lars Gustaebel <lars@gustaebel.de>
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
from __future__ import print_function
"""Read from and write to tar format archives.
"""
__version__ = "$Revision$"
version = "0.9.0"
__author__ = "Lars Gust\u00e4bel (lars@gustaebel.de)"
__date__ = "$Date: 2011-02-25 17:42:01 +0200 (Fri, 25 Feb 2011) $"
__cvsid__ = "$Id: tarfile.py 88586 2011-02-25 15:42:01Z marc-andre.lemburg $"
__credits__ = "Gustavo Niemeyer, Niels Gust\u00e4bel, Richard Townsend."
#---------
# Imports
#---------
import sys
import os
import stat
import errno
import time
import struct
import copy
import re
try:
import grp, pwd
except ImportError:
grp = pwd = None
# os.symlink on Windows prior to 6.0 raises NotImplementedError
symlink_exception = (AttributeError, NotImplementedError)
try:
# WindowsError (1314) will be raised if the caller does not hold the
# SeCreateSymbolicLinkPrivilege privilege
symlink_exception += (WindowsError,)
except NameError:
pass
# from tarfile import *
__all__ = ["TarFile", "TarInfo", "is_tarfile", "TarError"]
if sys.version_info[0] < 3:
import __builtin__ as builtins
else:
import builtins
_open = builtins.open # Since 'open' is TarFile.open
#---------------------------------------------------------
# tar constants
#---------------------------------------------------------
NUL = b"\0" # the null character
BLOCKSIZE = 512 # length of processing blocks
RECORDSIZE = BLOCKSIZE * 20 # length of records
GNU_MAGIC = b"ustar \0" # magic gnu tar string
POSIX_MAGIC = b"ustar\x0000" # magic posix tar string
LENGTH_NAME = 100 # maximum length of a filename
LENGTH_LINK = 100 # maximum length of a linkname
LENGTH_PREFIX = 155 # maximum length of the prefix field
REGTYPE = b"0" # regular file
AREGTYPE = b"\0" # regular file
LNKTYPE = b"1" # link (inside tarfile)
SYMTYPE = b"2" # symbolic link
CHRTYPE = b"3" # character special device
BLKTYPE = b"4" # block special device
DIRTYPE = b"5" # directory
FIFOTYPE = b"6" # fifo special device
CONTTYPE = b"7" # contiguous file
GNUTYPE_LONGNAME = b"L" # GNU tar longname
GNUTYPE_LONGLINK = b"K" # GNU tar longlink
GNUTYPE_SPARSE = b"S" # GNU tar sparse file
XHDTYPE = b"x" # POSIX.1-2001 extended header
XGLTYPE = b"g" # POSIX.1-2001 global header
SOLARIS_XHDTYPE = b"X" # Solaris extended header
USTAR_FORMAT = 0 # POSIX.1-1988 (ustar) format
GNU_FORMAT = 1 # GNU tar format
PAX_FORMAT = 2 # POSIX.1-2001 (pax) format
DEFAULT_FORMAT = GNU_FORMAT
#---------------------------------------------------------
# tarfile constants
#---------------------------------------------------------
# File types that tarfile supports:
SUPPORTED_TYPES = (REGTYPE, AREGTYPE, LNKTYPE,
SYMTYPE, DIRTYPE, FIFOTYPE,
CONTTYPE, CHRTYPE, BLKTYPE,
GNUTYPE_LONGNAME, GNUTYPE_LONGLINK,
GNUTYPE_SPARSE)
# File types that will be treated as a regular file.
REGULAR_TYPES = (REGTYPE, AREGTYPE,
CONTTYPE, GNUTYPE_SPARSE)
# File types that are part of the GNU tar format.
GNU_TYPES = (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK,
GNUTYPE_SPARSE)
# Fields from a pax header that override a TarInfo attribute.
PAX_FIELDS = ("path", "linkpath", "size", "mtime",
"uid", "gid", "uname", "gname")
# Fields from a pax header that are affected by hdrcharset.
PAX_NAME_FIELDS = set(("path", "linkpath", "uname", "gname"))
# Fields in a pax header that are numbers, all other fields
# are treated as strings.
PAX_NUMBER_FIELDS = {
"atime": float,
"ctime": float,
"mtime": float,
"uid": int,
"gid": int,
"size": int
}
#---------------------------------------------------------
# Bits used in the mode field, values in octal.
#---------------------------------------------------------
S_IFLNK = 0o120000 # symbolic link
S_IFREG = 0o100000 # regular file
S_IFBLK = 0o060000 # block device
S_IFDIR = 0o040000 # directory
S_IFCHR = 0o020000 # character device
S_IFIFO = 0o010000 # fifo
TSUID = 0o4000 # set UID on execution
TSGID = 0o2000 # set GID on execution
TSVTX = 0o1000 # reserved
TUREAD = 0o400 # read by owner
TUWRITE = 0o200 # write by owner
TUEXEC = 0o100 # execute/search by owner
TGREAD = 0o040 # read by group
TGWRITE = 0o020 # write by group
TGEXEC = 0o010 # execute/search by group
TOREAD = 0o004 # read by other
TOWRITE = 0o002 # write by other
TOEXEC = 0o001 # execute/search by other
#---------------------------------------------------------
# initialization
#---------------------------------------------------------
if os.name in ("nt", "ce"):
ENCODING = "utf-8"
else:
ENCODING = sys.getfilesystemencoding()
#---------------------------------------------------------
# Some useful functions
#---------------------------------------------------------
def stn(s, length, encoding, errors):
"""Convert a string to a null-terminated bytes object.
"""
s = s.encode(encoding, errors)
return s[:length] + (length - len(s)) * NUL
def nts(s, encoding, errors):
"""Convert a null-terminated bytes object to a string.
"""
p = s.find(b"\0")
if p != -1:
s = s[:p]
return s.decode(encoding, errors)
def nti(s):
"""Convert a number field to a python number.
"""
# There are two possible encodings for a number field, see
# itn() below.
if s[0] != chr(0o200):
try:
n = int(nts(s, "ascii", "strict") or "0", 8)
except ValueError:
raise InvalidHeaderError("invalid header")
else:
n = 0
for i in range(len(s) - 1):
n <<= 8
n += ord(s[i + 1])
return n
def itn(n, digits=8, format=DEFAULT_FORMAT):
"""Convert a python number to a number field.
"""
# POSIX 1003.1-1988 requires numbers to be encoded as a string of
# octal digits followed by a null-byte, this allows values up to
# (8**(digits-1))-1. GNU tar allows storing numbers greater than
# that if necessary. A leading 0o200 byte indicates this particular
# encoding, the following digits-1 bytes are a big-endian
# representation. This allows values up to (256**(digits-1))-1.
if 0 <= n < 8 ** (digits - 1):
s = ("%0*o" % (digits - 1, n)).encode("ascii") + NUL
else:
if format != GNU_FORMAT or n >= 256 ** (digits - 1):
raise ValueError("overflow in number field")
if n < 0:
# XXX We mimic GNU tar's behaviour with negative numbers,
# this could raise OverflowError.
n = struct.unpack("L", struct.pack("l", n))[0]
s = bytearray()
for i in range(digits - 1):
s.insert(0, n & 0o377)
n >>= 8
s.insert(0, 0o200)
return s
def calc_chksums(buf):
"""Calculate the checksum for a member's header by summing up all
characters except for the chksum field which is treated as if
it was filled with spaces. According to the GNU tar sources,
some tars (Sun and NeXT) calculate chksum with signed char,
which will be different if there are chars in the buffer with
the high bit set. So we calculate two checksums, unsigned and
signed.
"""
unsigned_chksum = 256 + sum(struct.unpack("148B", buf[:148]) + struct.unpack("356B", buf[156:512]))
signed_chksum = 256 + sum(struct.unpack("148b", buf[:148]) + struct.unpack("356b", buf[156:512]))
return unsigned_chksum, signed_chksum
def copyfileobj(src, dst, length=None):
"""Copy length bytes from fileobj src to fileobj dst.
If length is None, copy the entire content.
"""
if length == 0:
return
if length is None:
while True:
buf = src.read(16*1024)
if not buf:
break
dst.write(buf)
return
BUFSIZE = 16 * 1024
blocks, remainder = divmod(length, BUFSIZE)
for b in range(blocks):
buf = src.read(BUFSIZE)
if len(buf) < BUFSIZE:
raise IOError("end of file reached")
dst.write(buf)
if remainder != 0:
buf = src.read(remainder)
if len(buf) < remainder:
raise IOError("end of file reached")
dst.write(buf)
return
filemode_table = (
((S_IFLNK, "l"),
(S_IFREG, "-"),
(S_IFBLK, "b"),
(S_IFDIR, "d"),
(S_IFCHR, "c"),
(S_IFIFO, "p")),
((TUREAD, "r"),),
((TUWRITE, "w"),),
((TUEXEC|TSUID, "s"),
(TSUID, "S"),
(TUEXEC, "x")),
((TGREAD, "r"),),
((TGWRITE, "w"),),
((TGEXEC|TSGID, "s"),
(TSGID, "S"),
(TGEXEC, "x")),
((TOREAD, "r"),),
((TOWRITE, "w"),),
((TOEXEC|TSVTX, "t"),
(TSVTX, "T"),
(TOEXEC, "x"))
)
def filemode(mode):
"""Convert a file's mode to a string of the form
-rwxrwxrwx.
Used by TarFile.list()
"""
perm = []
for table in filemode_table:
for bit, char in table:
if mode & bit == bit:
perm.append(char)
break
else:
perm.append("-")
return "".join(perm)
class TarError(Exception):
"""Base exception."""
pass
class ExtractError(TarError):
"""General exception for extract errors."""
pass
class ReadError(TarError):
"""Exception for unreadble tar archives."""
pass
class CompressionError(TarError):
"""Exception for unavailable compression methods."""
pass
class StreamError(TarError):
"""Exception for unsupported operations on stream-like TarFiles."""
pass
class HeaderError(TarError):
"""Base exception for header errors."""
pass
class EmptyHeaderError(HeaderError):
"""Exception for empty headers."""
pass
class TruncatedHeaderError(HeaderError):
"""Exception for truncated headers."""
pass
class EOFHeaderError(HeaderError):
"""Exception for end of file headers."""
pass
class InvalidHeaderError(HeaderError):
"""Exception for invalid headers."""
pass
class SubsequentHeaderError(HeaderError):
"""Exception for missing and invalid extended headers."""
pass
#---------------------------
# internal stream interface
#---------------------------
class _LowLevelFile(object):
"""Low-level file object. Supports reading and writing.
It is used instead of a regular file object for streaming
access.
"""
def __init__(self, name, mode):
mode = {
"r": os.O_RDONLY,
"w": os.O_WRONLY | os.O_CREAT | os.O_TRUNC,
}[mode]
if hasattr(os, "O_BINARY"):
mode |= os.O_BINARY
self.fd = os.open(name, mode, 0o666)
def close(self):
os.close(self.fd)
def read(self, size):
return os.read(self.fd, size)
def write(self, s):
os.write(self.fd, s)
class _Stream(object):
"""Class that serves as an adapter between TarFile and
a stream-like object. The stream-like object only
needs to have a read() or write() method and is accessed
blockwise. Use of gzip or bzip2 compression is possible.
A stream-like object could be for example: sys.stdin,
sys.stdout, a socket, a tape device etc.
_Stream is intended to be used only internally.
"""
def __init__(self, name, mode, comptype, fileobj, bufsize):
"""Construct a _Stream object.
"""
self._extfileobj = True
if fileobj is None:
fileobj = _LowLevelFile(name, mode)
self._extfileobj = False
if comptype == '*':
# Enable transparent compression detection for the
# stream interface
fileobj = _StreamProxy(fileobj)
comptype = fileobj.getcomptype()
self.name = name or ""
self.mode = mode
self.comptype = comptype
self.fileobj = fileobj
self.bufsize = bufsize
self.buf = b""
self.pos = 0
self.closed = False
try:
if comptype == "gz":
try:
import zlib
except ImportError:
raise CompressionError("zlib module is not available")
self.zlib = zlib
self.crc = zlib.crc32(b"")
if mode == "r":
self._init_read_gz()
else:
self._init_write_gz()
if comptype == "bz2":
try:
import bz2
except ImportError:
raise CompressionError("bz2 module is not available")
if mode == "r":
self.dbuf = b""
self.cmp = bz2.BZ2Decompressor()
else:
self.cmp = bz2.BZ2Compressor()
except:
if not self._extfileobj:
self.fileobj.close()
self.closed = True
raise
def __del__(self):
if hasattr(self, "closed") and not self.closed:
self.close()
def _init_write_gz(self):
"""Initialize for writing with gzip compression.
"""
self.cmp = self.zlib.compressobj(9, self.zlib.DEFLATED,
-self.zlib.MAX_WBITS,
self.zlib.DEF_MEM_LEVEL,
0)
timestamp = struct.pack("<L", int(time.time()))
self.__write(b"\037\213\010\010" + timestamp + b"\002\377")
if self.name.endswith(".gz"):
self.name = self.name[:-3]
# RFC1952 says we must use ISO-8859-1 for the FNAME field.
self.__write(self.name.encode("iso-8859-1", "replace") + NUL)
def write(self, s):
"""Write string s to the stream.
"""
if self.comptype == "gz":
self.crc = self.zlib.crc32(s, self.crc)
self.pos += len(s)
if self.comptype != "tar":
s = self.cmp.compress(s)
self.__write(s)
def __write(self, s):
"""Write string s to the stream if a whole new block
is ready to be written.
"""
self.buf += s
while len(self.buf) > self.bufsize:
self.fileobj.write(self.buf[:self.bufsize])
self.buf = self.buf[self.bufsize:]
def close(self):
"""Close the _Stream object. No operation should be
done on it afterwards.
"""
if self.closed:
return
if self.mode == "w" and self.comptype != "tar":
self.buf += self.cmp.flush()
if self.mode == "w" and self.buf:
self.fileobj.write(self.buf)
self.buf = b""
if self.comptype == "gz":
# The native zlib crc is an unsigned 32-bit integer, but
# the Python wrapper implicitly casts that to a signed C
# long. So, on a 32-bit box self.crc may "look negative",
# while the same crc on a 64-bit box may "look positive".
# To avoid irksome warnings from the `struct` module, force
# it to look positive on all boxes.
self.fileobj.write(struct.pack("<L", self.crc & 0xffffffff))
self.fileobj.write(struct.pack("<L", self.pos & 0xffffFFFF))
if not self._extfileobj:
self.fileobj.close()
self.closed = True
def _init_read_gz(self):
"""Initialize for reading a gzip compressed fileobj.
"""
self.cmp = self.zlib.decompressobj(-self.zlib.MAX_WBITS)
self.dbuf = b""
# taken from gzip.GzipFile with some alterations
if self.__read(2) != b"\037\213":
raise ReadError("not a gzip file")
if self.__read(1) != b"\010":
raise CompressionError("unsupported compression method")
flag = ord(self.__read(1))
self.__read(6)
if flag & 4:
xlen = ord(self.__read(1)) + 256 * ord(self.__read(1))
self.read(xlen)
if flag & 8:
while True:
s = self.__read(1)
if not s or s == NUL:
break
if flag & 16:
while True:
s = self.__read(1)
if not s or s == NUL:
break
if flag & 2:
self.__read(2)
def tell(self):
"""Return the stream's file pointer position.
"""
return self.pos
def seek(self, pos=0):
"""Set the stream's file pointer to pos. Negative seeking
is forbidden.
"""
if pos - self.pos >= 0:
blocks, remainder = divmod(pos - self.pos, self.bufsize)
for i in range(blocks):
self.read(self.bufsize)
self.read(remainder)
else:
raise StreamError("seeking backwards is not allowed")
return self.pos
def read(self, size=None):
"""Return the next size number of bytes from the stream.
If size is not defined, return all bytes of the stream
up to EOF.
"""
if size is None:
t = []
while True:
buf = self._read(self.bufsize)
if not buf:
break
t.append(buf)
buf = "".join(t)
else:
buf = self._read(size)
self.pos += len(buf)
return buf
def _read(self, size):
"""Return size bytes from the stream.
"""
if self.comptype == "tar":
return self.__read(size)
c = len(self.dbuf)
while c < size:
buf = self.__read(self.bufsize)
if not buf:
break
try:
buf = self.cmp.decompress(buf)
except IOError:
raise ReadError("invalid compressed data")
self.dbuf += buf
c += len(buf)
buf = self.dbuf[:size]
self.dbuf = self.dbuf[size:]
return buf
def __read(self, size):
"""Return size bytes from stream. If internal buffer is empty,
read another block from the stream.
"""
c = len(self.buf)
while c < size:
buf = self.fileobj.read(self.bufsize)
if not buf:
break
self.buf += buf
c += len(buf)
buf = self.buf[:size]
self.buf = self.buf[size:]
return buf
# class _Stream
class _StreamProxy(object):
"""Small proxy class that enables transparent compression
detection for the Stream interface (mode 'r|*').
"""
def __init__(self, fileobj):
self.fileobj = fileobj
self.buf = self.fileobj.read(BLOCKSIZE)
def read(self, size):
self.read = self.fileobj.read
return self.buf
def getcomptype(self):
if self.buf.startswith(b"\037\213\010"):
return "gz"
if self.buf.startswith(b"BZh91"):
return "bz2"
return "tar"
def close(self):
self.fileobj.close()
# class StreamProxy
class _BZ2Proxy(object):
"""Small proxy class that enables external file object
support for "r:bz2" and "w:bz2" modes. This is actually
a workaround for a limitation in bz2 module's BZ2File
class which (unlike gzip.GzipFile) has no support for
a file object argument.
"""
blocksize = 16 * 1024
def __init__(self, fileobj, mode):
self.fileobj = fileobj
self.mode = mode
self.name = getattr(self.fileobj, "name", None)
self.init()
def init(self):
import bz2
self.pos = 0
if self.mode == "r":
self.bz2obj = bz2.BZ2Decompressor()
self.fileobj.seek(0)
self.buf = b""
else:
self.bz2obj = bz2.BZ2Compressor()
def read(self, size):
x = len(self.buf)
while x < size:
raw = self.fileobj.read(self.blocksize)
if not raw:
break
data = self.bz2obj.decompress(raw)
self.buf += data
x += len(data)
buf = self.buf[:size]
self.buf = self.buf[size:]
self.pos += len(buf)
return buf
def seek(self, pos):
if pos < self.pos:
self.init()
self.read(pos - self.pos)
def tell(self):
return self.pos
def write(self, data):
self.pos += len(data)
raw = self.bz2obj.compress(data)
self.fileobj.write(raw)
def close(self):
if self.mode == "w":
raw = self.bz2obj.flush()
self.fileobj.write(raw)
# class _BZ2Proxy
#------------------------
# Extraction file object
#------------------------
class _FileInFile(object):
"""A thin wrapper around an existing file object that
provides a part of its data as an individual file
object.
"""
def __init__(self, fileobj, offset, size, blockinfo=None):
self.fileobj = fileobj
self.offset = offset
self.size = size
self.position = 0
if blockinfo is None:
blockinfo = [(0, size)]
# Construct a map with data and zero blocks.
self.map_index = 0
self.map = []
lastpos = 0
realpos = self.offset
for offset, size in blockinfo:
if offset > lastpos:
self.map.append((False, lastpos, offset, None))
self.map.append((True, offset, offset + size, realpos))
realpos += size
lastpos = offset + size
if lastpos < self.size:
self.map.append((False, lastpos, self.size, None))
def seekable(self):
if not hasattr(self.fileobj, "seekable"):
# XXX gzip.GzipFile and bz2.BZ2File
return True
return self.fileobj.seekable()
def tell(self):
"""Return the current file position.
"""
return self.position
def seek(self, position):
"""Seek to a position in the file.
"""
self.position = position
def read(self, size=None):
"""Read data from the file.
"""
if size is None:
size = self.size - self.position
else:
size = min(size, self.size - self.position)
buf = b""
while size > 0:
while True:
data, start, stop, offset = self.map[self.map_index]
if start <= self.position < stop:
break
else:
self.map_index += 1
if self.map_index == len(self.map):
self.map_index = 0
length = min(size, stop - self.position)
if data:
self.fileobj.seek(offset + (self.position - start))
buf += self.fileobj.read(length)
else:
buf += NUL * length
size -= length
self.position += length
return buf
#class _FileInFile
class ExFileObject(object):
"""File-like object for reading an archive member.
Is returned by TarFile.extractfile().
"""
blocksize = 1024
def __init__(self, tarfile, tarinfo):
self.fileobj = _FileInFile(tarfile.fileobj,
tarinfo.offset_data,
tarinfo.size,
tarinfo.sparse)
self.name = tarinfo.name
self.mode = "r"
self.closed = False
self.size = tarinfo.size
self.position = 0
self.buffer = b""
def readable(self):
return True
def writable(self):
return False
def seekable(self):
return self.fileobj.seekable()
def read(self, size=None):
"""Read at most size bytes from the file. If size is not
present or None, read all data until EOF is reached.
"""
if self.closed:
raise ValueError("I/O operation on closed file")
buf = b""
if self.buffer:
if size is None:
buf = self.buffer
self.buffer = b""
else:
buf = self.buffer[:size]
self.buffer = self.buffer[size:]
if size is None:
buf += self.fileobj.read()
else:
buf += self.fileobj.read(size - len(buf))
self.position += len(buf)
return buf
# XXX TextIOWrapper uses the read1() method.
read1 = read
def readline(self, size=-1):
"""Read one entire line from the file. If size is present
and non-negative, return a string with at most that
size, which may be an incomplete line.
"""
if self.closed:
raise ValueError("I/O operation on closed file")
pos = self.buffer.find(b"\n") + 1
if pos == 0:
# no newline found.
while True:
buf = self.fileobj.read(self.blocksize)
self.buffer += buf
if not buf or b"\n" in buf:
pos = self.buffer.find(b"\n") + 1
if pos == 0:
# no newline found.
pos = len(self.buffer)
break
if size != -1:
pos = min(size, pos)
buf = self.buffer[:pos]
self.buffer = self.buffer[pos:]
self.position += len(buf)
return buf
def readlines(self):
"""Return a list with all remaining lines.
"""
result = []
while True:
line = self.readline()
if not line: break
result.append(line)
return result
def tell(self):
"""Return the current file position.
"""
if self.closed:
raise ValueError("I/O operation on closed file")
return self.position
def seek(self, pos, whence=os.SEEK_SET):
"""Seek to a position in the file.
"""
if self.closed:
raise ValueError("I/O operation on closed file")
if whence == os.SEEK_SET:
self.position = min(max(pos, 0), self.size)
elif whence == os.SEEK_CUR:
if pos < 0:
self.position = max(self.position + pos, 0)
else:
self.position = min(self.position + pos, self.size)
elif whence == os.SEEK_END:
self.position = max(min(self.size + pos, self.size), 0)
else:
raise ValueError("Invalid argument")
self.buffer = b""
self.fileobj.seek(self.position)
def close(self):
"""Close the file object.
"""
self.closed = True
def __iter__(self):
"""Get an iterator over the file's lines.
"""
while True:
line = self.readline()
if not line:
break
yield line
#class ExFileObject
#------------------
# Exported Classes
#------------------
class TarInfo(object):
"""Informational class which holds the details about an
archive member given by a tar header block.
TarInfo objects are returned by TarFile.getmember(),
TarFile.getmembers() and TarFile.gettarinfo() and are
usually created internally.
"""
__slots__ = ("name", "mode", "uid", "gid", "size", "mtime",
"chksum", "type", "linkname", "uname", "gname",
"devmajor", "devminor",
"offset", "offset_data", "pax_headers", "sparse",
"tarfile", "_sparse_structs", "_link_target")
def __init__(self, name=""):
"""Construct a TarInfo object. name is the optional name
of the member.
"""
self.name = name # member name
self.mode = 0o644 # file permissions
self.uid = 0 # user id
self.gid = 0 # group id
self.size = 0 # file size
self.mtime = 0 # modification time
self.chksum = 0 # header checksum
self.type = REGTYPE # member type
self.linkname = "" # link name
self.uname = "" # user name
self.gname = "" # group name
self.devmajor = 0 # device major number
self.devminor = 0 # device minor number
self.offset = 0 # the tar header starts here
self.offset_data = 0 # the file's data starts here
self.sparse = None # sparse member information
self.pax_headers = {} # pax header information
# In pax headers the "name" and "linkname" field are called
# "path" and "linkpath".
def _getpath(self):
return self.name
def _setpath(self, name):
self.name = name
path = property(_getpath, _setpath)
def _getlinkpath(self):
return self.linkname
def _setlinkpath(self, linkname):
self.linkname = linkname
linkpath = property(_getlinkpath, _setlinkpath)
def __repr__(self):
return "<%s %r at %#x>" % (self.__class__.__name__,self.name,id(self))
def get_info(self):
"""Return the TarInfo's attributes as a dictionary.
"""
info = {
"name": self.name,
"mode": self.mode & 0o7777,
"uid": self.uid,
"gid": self.gid,
"size": self.size,
"mtime": self.mtime,
"chksum": self.chksum,
"type": self.type,
"linkname": self.linkname,
"uname": self.uname,
"gname": self.gname,
"devmajor": self.devmajor,
"devminor": self.devminor
}
if info["type"] == DIRTYPE and not info["name"].endswith("/"):
info["name"] += "/"
return info
def tobuf(self, format=DEFAULT_FORMAT, encoding=ENCODING, errors="surrogateescape"):
"""Return a tar header as a string of 512 byte blocks.
"""
info = self.get_info()
if format == USTAR_FORMAT:
return self.create_ustar_header(info, encoding, errors)
elif format == GNU_FORMAT:
return self.create_gnu_header(info, encoding, errors)
elif format == PAX_FORMAT:
return self.create_pax_header(info, encoding)
else:
raise ValueError("invalid format")
def create_ustar_header(self, info, encoding, errors):
"""Return the object as a ustar header block.
"""
info["magic"] = POSIX_MAGIC
if len(info["linkname"]) > LENGTH_LINK:
raise ValueError("linkname is too long")
if len(info["name"]) > LENGTH_NAME:
info["prefix"], info["name"] = self._posix_split_name(info["name"])
return self._create_header(info, USTAR_FORMAT, encoding, errors)
def create_gnu_header(self, info, encoding, errors):
"""Return the object as a GNU header block sequence.
"""
info["magic"] = GNU_MAGIC
buf = b""
if len(info["linkname"]) > LENGTH_LINK:
buf += self._create_gnu_long_header(info["linkname"], GNUTYPE_LONGLINK, encoding, errors)
if len(info["name"]) > LENGTH_NAME:
buf += self._create_gnu_long_header(info["name"], GNUTYPE_LONGNAME, encoding, errors)
return buf + self._create_header(info, GNU_FORMAT, encoding, errors)
def create_pax_header(self, info, encoding):
"""Return the object as a ustar header block. If it cannot be
represented this way, prepend a pax extended header sequence
with supplement information.
"""
info["magic"] = POSIX_MAGIC
pax_headers = self.pax_headers.copy()
# Test string fields for values that exceed the field length or cannot
# be represented in ASCII encoding.
for name, hname, length in (
("name", "path", LENGTH_NAME), ("linkname", "linkpath", LENGTH_LINK),
("uname", "uname", 32), ("gname", "gname", 32)):
if hname in pax_headers:
# The pax header has priority.
continue
# Try to encode the string as ASCII.
try:
info[name].encode("ascii", "strict")
except UnicodeEncodeError:
pax_headers[hname] = info[name]
continue
if len(info[name]) > length:
pax_headers[hname] = info[name]
# Test number fields for values that exceed the field limit or values
# that like to be stored as float.
for name, digits in (("uid", 8), ("gid", 8), ("size", 12), ("mtime", 12)):
if name in pax_headers:
# The pax header has priority. Avoid overflow.
info[name] = 0
continue
val = info[name]
if not 0 <= val < 8 ** (digits - 1) or isinstance(val, float):
pax_headers[name] = str(val)
info[name] = 0
# Create a pax extended header if necessary.
if pax_headers:
buf = self._create_pax_generic_header(pax_headers, XHDTYPE, encoding)
else:
buf = b""
return buf + self._create_header(info, USTAR_FORMAT, "ascii", "replace")
@classmethod
def create_pax_global_header(cls, pax_headers):
"""Return the object as a pax global header block sequence.
"""
return cls._create_pax_generic_header(pax_headers, XGLTYPE, "utf8")
def _posix_split_name(self, name):
"""Split a name longer than 100 chars into a prefix
and a name part.
"""
prefix = name[:LENGTH_PREFIX + 1]
while prefix and prefix[-1] != "/":
prefix = prefix[:-1]
name = name[len(prefix):]
prefix = prefix[:-1]
if not prefix or len(name) > LENGTH_NAME:
raise ValueError("name is too long")
return prefix, name
@staticmethod
def _create_header(info, format, encoding, errors):
"""Return a header block. info is a dictionary with file
information, format must be one of the *_FORMAT constants.
"""
parts = [
stn(info.get("name", ""), 100, encoding, errors),
itn(info.get("mode", 0) & 0o7777, 8, format),
itn(info.get("uid", 0), 8, format),
itn(info.get("gid", 0), 8, format),
itn(info.get("size", 0), 12, format),
itn(info.get("mtime", 0), 12, format),
b" ", # checksum field
info.get("type", REGTYPE),
stn(info.get("linkname", ""), 100, encoding, errors),
info.get("magic", POSIX_MAGIC),
stn(info.get("uname", ""), 32, encoding, errors),
stn(info.get("gname", ""), 32, encoding, errors),
itn(info.get("devmajor", 0), 8, format),
itn(info.get("devminor", 0), 8, format),
stn(info.get("prefix", ""), 155, encoding, errors)
]
buf = struct.pack("%ds" % BLOCKSIZE, b"".join(parts))
chksum = calc_chksums(buf[-BLOCKSIZE:])[0]
buf = buf[:-364] + ("%06o\0" % chksum).encode("ascii") + buf[-357:]
return buf
@staticmethod
def _create_payload(payload):
"""Return the string payload filled with zero bytes
up to the next 512 byte border.
"""
blocks, remainder = divmod(len(payload), BLOCKSIZE)
if remainder > 0:
payload += (BLOCKSIZE - remainder) * NUL
return payload
@classmethod
def _create_gnu_long_header(cls, name, type, encoding, errors):
"""Return a GNUTYPE_LONGNAME or GNUTYPE_LONGLINK sequence
for name.
"""
name = name.encode(encoding, errors) + NUL
info = {}
info["name"] = "././@LongLink"
info["type"] = type
info["size"] = len(name)
info["magic"] = GNU_MAGIC
# create extended header + name blocks.
return cls._create_header(info, USTAR_FORMAT, encoding, errors) + \
cls._create_payload(name)
@classmethod
def _create_pax_generic_header(cls, pax_headers, type, encoding):
"""Return a POSIX.1-2008 extended or global header sequence
that contains a list of keyword, value pairs. The values
must be strings.
"""
# Check if one of the fields contains surrogate characters and thereby
# forces hdrcharset=BINARY, see _proc_pax() for more information.
binary = False
for keyword, value in pax_headers.items():
try:
value.encode("utf8", "strict")
except UnicodeEncodeError:
binary = True
break
records = b""
if binary:
# Put the hdrcharset field at the beginning of the header.
records += b"21 hdrcharset=BINARY\n"
for keyword, value in pax_headers.items():
keyword = keyword.encode("utf8")
if binary:
# Try to restore the original byte representation of `value'.
# Needless to say, that the encoding must match the string.
value = value.encode(encoding, "surrogateescape")
else:
value = value.encode("utf8")
l = len(keyword) + len(value) + 3 # ' ' + '=' + '\n'
n = p = 0
while True:
n = l + len(str(p))
if n == p:
break
p = n
records += bytes(str(p), "ascii") + b" " + keyword + b"=" + value + b"\n"
# We use a hardcoded "././@PaxHeader" name like star does
# instead of the one that POSIX recommends.
info = {}
info["name"] = "././@PaxHeader"
info["type"] = type
info["size"] = len(records)
info["magic"] = POSIX_MAGIC
# Create pax header + record blocks.
return cls._create_header(info, USTAR_FORMAT, "ascii", "replace") + \
cls._create_payload(records)
@classmethod
def frombuf(cls, buf, encoding, errors):
"""Construct a TarInfo object from a 512 byte bytes object.
"""
if len(buf) == 0:
raise EmptyHeaderError("empty header")
if len(buf) != BLOCKSIZE:
raise TruncatedHeaderError("truncated header")
if buf.count(NUL) == BLOCKSIZE:
raise EOFHeaderError("end of file header")
chksum = nti(buf[148:156])
if chksum not in calc_chksums(buf):
raise InvalidHeaderError("bad checksum")
obj = cls()
obj.name = nts(buf[0:100], encoding, errors)
obj.mode = nti(buf[100:108])
obj.uid = nti(buf[108:116])
obj.gid = nti(buf[116:124])
obj.size = nti(buf[124:136])
obj.mtime = nti(buf[136:148])
obj.chksum = chksum
obj.type = buf[156:157]
obj.linkname = nts(buf[157:257], encoding, errors)
obj.uname = nts(buf[265:297], encoding, errors)
obj.gname = nts(buf[297:329], encoding, errors)
obj.devmajor = nti(buf[329:337])
obj.devminor = nti(buf[337:345])
prefix = nts(buf[345:500], encoding, errors)
# Old V7 tar format represents a directory as a regular
# file with a trailing slash.
if obj.type == AREGTYPE and obj.name.endswith("/"):
obj.type = DIRTYPE
# The old GNU sparse format occupies some of the unused
# space in the buffer for up to 4 sparse structures.
# Save the them for later processing in _proc_sparse().
if obj.type == GNUTYPE_SPARSE:
pos = 386
structs = []
for i in range(4):
try:
offset = nti(buf[pos:pos + 12])
numbytes = nti(buf[pos + 12:pos + 24])
except ValueError:
break
structs.append((offset, numbytes))
pos += 24
isextended = bool(buf[482])
origsize = nti(buf[483:495])
obj._sparse_structs = (structs, isextended, origsize)
# Remove redundant slashes from directories.
if obj.isdir():
obj.name = obj.name.rstrip("/")
# Reconstruct a ustar longname.
if prefix and obj.type not in GNU_TYPES:
obj.name = prefix + "/" + obj.name
return obj
@classmethod
def fromtarfile(cls, tarfile):
"""Return the next TarInfo object from TarFile object
tarfile.
"""
buf = tarfile.fileobj.read(BLOCKSIZE)
obj = cls.frombuf(buf, tarfile.encoding, tarfile.errors)
obj.offset = tarfile.fileobj.tell() - BLOCKSIZE
return obj._proc_member(tarfile)
#--------------------------------------------------------------------------
# The following are methods that are called depending on the type of a
# member. The entry point is _proc_member() which can be overridden in a
# subclass to add custom _proc_*() methods. A _proc_*() method MUST
# implement the following
# operations:
# 1. Set self.offset_data to the position where the data blocks begin,
# if there is data that follows.
# 2. Set tarfile.offset to the position where the next member's header will
# begin.
# 3. Return self or another valid TarInfo object.
def _proc_member(self, tarfile):
"""Choose the right processing method depending on
the type and call it.
"""
if self.type in (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK):
return self._proc_gnulong(tarfile)
elif self.type == GNUTYPE_SPARSE:
return self._proc_sparse(tarfile)
elif self.type in (XHDTYPE, XGLTYPE, SOLARIS_XHDTYPE):
return self._proc_pax(tarfile)
else:
return self._proc_builtin(tarfile)
def _proc_builtin(self, tarfile):
"""Process a builtin type or an unknown type which
will be treated as a regular file.
"""
self.offset_data = tarfile.fileobj.tell()
offset = self.offset_data
if self.isreg() or self.type not in SUPPORTED_TYPES:
# Skip the following data blocks.
offset += self._block(self.size)
tarfile.offset = offset
# Patch the TarInfo object with saved global
# header information.
self._apply_pax_info(tarfile.pax_headers, tarfile.encoding, tarfile.errors)
return self
def _proc_gnulong(self, tarfile):
"""Process the blocks that hold a GNU longname
or longlink member.
"""
buf = tarfile.fileobj.read(self._block(self.size))
# Fetch the next header and process it.
try:
next = self.fromtarfile(tarfile)
except HeaderError:
raise SubsequentHeaderError("missing or bad subsequent header")
# Patch the TarInfo object from the next header with
# the longname information.
next.offset = self.offset
if self.type == GNUTYPE_LONGNAME:
next.name = nts(buf, tarfile.encoding, tarfile.errors)
elif self.type == GNUTYPE_LONGLINK:
next.linkname = nts(buf, tarfile.encoding, tarfile.errors)
return next
def _proc_sparse(self, tarfile):
"""Process a GNU sparse header plus extra headers.
"""
# We already collected some sparse structures in frombuf().
structs, isextended, origsize = self._sparse_structs
del self._sparse_structs
# Collect sparse structures from extended header blocks.
while isextended:
buf = tarfile.fileobj.read(BLOCKSIZE)
pos = 0
for i in range(21):
try:
offset = nti(buf[pos:pos + 12])
numbytes = nti(buf[pos + 12:pos + 24])
except ValueError:
break
if offset and numbytes:
structs.append((offset, numbytes))
pos += 24
isextended = bool(buf[504])
self.sparse = structs
self.offset_data = tarfile.fileobj.tell()
tarfile.offset = self.offset_data + self._block(self.size)
self.size = origsize
return self
def _proc_pax(self, tarfile):
"""Process an extended or global header as described in
POSIX.1-2008.
"""
# Read the header information.
buf = tarfile.fileobj.read(self._block(self.size))
# A pax header stores supplemental information for either
# the following file (extended) or all following files
# (global).
if self.type == XGLTYPE:
pax_headers = tarfile.pax_headers
else:
pax_headers = tarfile.pax_headers.copy()
# Check if the pax header contains a hdrcharset field. This tells us
# the encoding of the path, linkpath, uname and gname fields. Normally,
# these fields are UTF-8 encoded but since POSIX.1-2008 tar
# implementations are allowed to store them as raw binary strings if
# the translation to UTF-8 fails.
match = re.search(br"\d+ hdrcharset=([^\n]+)\n", buf)
if match is not None:
pax_headers["hdrcharset"] = match.group(1).decode("utf8")
# For the time being, we don't care about anything other than "BINARY".
# The only other value that is currently allowed by the standard is
# "ISO-IR 10646 2000 UTF-8" in other words UTF-8.
hdrcharset = pax_headers.get("hdrcharset")
if hdrcharset == "BINARY":
encoding = tarfile.encoding
else:
encoding = "utf8"
# Parse pax header information. A record looks like that:
# "%d %s=%s\n" % (length, keyword, value). length is the size
# of the complete record including the length field itself and
# the newline. keyword and value are both UTF-8 encoded strings.
regex = re.compile(br"(\d+) ([^=]+)=")
pos = 0
while True:
match = regex.match(buf, pos)
if not match:
break
length, keyword = match.groups()
length = int(length)
value = buf[match.end(2) + 1:match.start(1) + length - 1]
# Normally, we could just use "utf8" as the encoding and "strict"
# as the error handler, but we better not take the risk. For
# example, GNU tar <= 1.23 is known to store filenames it cannot
# translate to UTF-8 as raw strings (unfortunately without a
# hdrcharset=BINARY header).
# We first try the strict standard encoding, and if that fails we
# fall back on the user's encoding and error handler.
keyword = self._decode_pax_field(keyword, "utf8", "utf8",
tarfile.errors)
if keyword in PAX_NAME_FIELDS:
value = self._decode_pax_field(value, encoding, tarfile.encoding,
tarfile.errors)
else:
value = self._decode_pax_field(value, "utf8", "utf8",
tarfile.errors)
pax_headers[keyword] = value
pos += length
# Fetch the next header.
try:
next = self.fromtarfile(tarfile)
except HeaderError:
raise SubsequentHeaderError("missing or bad subsequent header")
# Process GNU sparse information.
if "GNU.sparse.map" in pax_headers:
# GNU extended sparse format version 0.1.
self._proc_gnusparse_01(next, pax_headers)
elif "GNU.sparse.size" in pax_headers:
# GNU extended sparse format version 0.0.
self._proc_gnusparse_00(next, pax_headers, buf)
elif pax_headers.get("GNU.sparse.major") == "1" and pax_headers.get("GNU.sparse.minor") == "0":
# GNU extended sparse format version 1.0.
self._proc_gnusparse_10(next, pax_headers, tarfile)
if self.type in (XHDTYPE, SOLARIS_XHDTYPE):
# Patch the TarInfo object with the extended header info.
next._apply_pax_info(pax_headers, tarfile.encoding, tarfile.errors)
next.offset = self.offset
if "size" in pax_headers:
# If the extended header replaces the size field,
# we need to recalculate the offset where the next
# header starts.
offset = next.offset_data
if next.isreg() or next.type not in SUPPORTED_TYPES:
offset += next._block(next.size)
tarfile.offset = offset
return next
def _proc_gnusparse_00(self, next, pax_headers, buf):
"""Process a GNU tar extended sparse header, version 0.0.
"""
offsets = []
for match in re.finditer(br"\d+ GNU.sparse.offset=(\d+)\n", buf):
offsets.append(int(match.group(1)))
numbytes = []
for match in re.finditer(br"\d+ GNU.sparse.numbytes=(\d+)\n", buf):
numbytes.append(int(match.group(1)))
next.sparse = list(zip(offsets, numbytes))
def _proc_gnusparse_01(self, next, pax_headers):
"""Process a GNU tar extended sparse header, version 0.1.
"""
sparse = [int(x) for x in pax_headers["GNU.sparse.map"].split(",")]
next.sparse = list(zip(sparse[::2], sparse[1::2]))
def _proc_gnusparse_10(self, next, pax_headers, tarfile):
"""Process a GNU tar extended sparse header, version 1.0.
"""
fields = None
sparse = []
buf = tarfile.fileobj.read(BLOCKSIZE)
fields, buf = buf.split(b"\n", 1)
fields = int(fields)
while len(sparse) < fields * 2:
if b"\n" not in buf:
buf += tarfile.fileobj.read(BLOCKSIZE)
number, buf = buf.split(b"\n", 1)
sparse.append(int(number))
next.offset_data = tarfile.fileobj.tell()
next.sparse = list(zip(sparse[::2], sparse[1::2]))
def _apply_pax_info(self, pax_headers, encoding, errors):
"""Replace fields with supplemental information from a previous
pax extended or global header.
"""
for keyword, value in pax_headers.items():
if keyword == "GNU.sparse.name":
setattr(self, "path", value)
elif keyword == "GNU.sparse.size":
setattr(self, "size", int(value))
elif keyword == "GNU.sparse.realsize":
setattr(self, "size", int(value))
elif keyword in PAX_FIELDS:
if keyword in PAX_NUMBER_FIELDS:
try:
value = PAX_NUMBER_FIELDS[keyword](value)
except ValueError:
value = 0
if keyword == "path":
value = value.rstrip("/")
setattr(self, keyword, value)
self.pax_headers = pax_headers.copy()
def _decode_pax_field(self, value, encoding, fallback_encoding, fallback_errors):
"""Decode a single field from a pax record.
"""
try:
return value.decode(encoding, "strict")
except UnicodeDecodeError:
return value.decode(fallback_encoding, fallback_errors)
def _block(self, count):
"""Round up a byte count by BLOCKSIZE and return it,
e.g. _block(834) => 1024.
"""
blocks, remainder = divmod(count, BLOCKSIZE)
if remainder:
blocks += 1
return blocks * BLOCKSIZE
def isreg(self):
return self.type in REGULAR_TYPES
def isfile(self):
return self.isreg()
def isdir(self):
return self.type == DIRTYPE
def issym(self):
return self.type == SYMTYPE
def islnk(self):
return self.type == LNKTYPE
def ischr(self):
return self.type == CHRTYPE
def isblk(self):
return self.type == BLKTYPE
def isfifo(self):
return self.type == FIFOTYPE
def issparse(self):
return self.sparse is not None
def isdev(self):
return self.type in (CHRTYPE, BLKTYPE, FIFOTYPE)
# class TarInfo
class TarFile(object):
"""The TarFile Class provides an interface to tar archives.
"""
debug = 0 # May be set from 0 (no msgs) to 3 (all msgs)
dereference = False # If true, add content of linked file to the
# tar file, else the link.
ignore_zeros = False # If true, skips empty or invalid blocks and
# continues processing.
errorlevel = 1 # If 0, fatal errors only appear in debug
# messages (if debug >= 0). If > 0, errors
# are passed to the caller as exceptions.
format = DEFAULT_FORMAT # The format to use when creating an archive.
encoding = ENCODING # Encoding for 8-bit character strings.
errors = None # Error handler for unicode conversion.
tarinfo = TarInfo # The default TarInfo class to use.
fileobject = ExFileObject # The default ExFileObject class to use.
def __init__(self, name=None, mode="r", fileobj=None, format=None,
tarinfo=None, dereference=None, ignore_zeros=None, encoding=None,
errors="surrogateescape", pax_headers=None, debug=None, errorlevel=None):
"""Open an (uncompressed) tar archive `name'. `mode' is either 'r' to
read from an existing archive, 'a' to append data to an existing
file or 'w' to create a new file overwriting an existing one. `mode'
defaults to 'r'.
If `fileobj' is given, it is used for reading or writing data. If it
can be determined, `mode' is overridden by `fileobj's mode.
`fileobj' is not closed, when TarFile is closed.
"""
if len(mode) > 1 or mode not in "raw":
raise ValueError("mode must be 'r', 'a' or 'w'")
self.mode = mode
self._mode = {"r": "rb", "a": "r+b", "w": "wb"}[mode]
if not fileobj:
if self.mode == "a" and not os.path.exists(name):
# Create nonexistent files in append mode.
self.mode = "w"
self._mode = "wb"
fileobj = bltn_open(name, self._mode)
self._extfileobj = False
else:
if name is None and hasattr(fileobj, "name"):
name = fileobj.name
if hasattr(fileobj, "mode"):
self._mode = fileobj.mode
self._extfileobj = True
self.name = os.path.abspath(name) if name else None
self.fileobj = fileobj
# Init attributes.
if format is not None:
self.format = format
if tarinfo is not None:
self.tarinfo = tarinfo
if dereference is not None:
self.dereference = dereference
if ignore_zeros is not None:
self.ignore_zeros = ignore_zeros
if encoding is not None:
self.encoding = encoding
self.errors = errors
if pax_headers is not None and self.format == PAX_FORMAT:
self.pax_headers = pax_headers
else:
self.pax_headers = {}
if debug is not None:
self.debug = debug
if errorlevel is not None:
self.errorlevel = errorlevel
# Init datastructures.
self.closed = False
self.members = [] # list of members as TarInfo objects
self._loaded = False # flag if all members have been read
self.offset = self.fileobj.tell()
# current position in the archive file
self.inodes = {} # dictionary caching the inodes of
# archive members already added
try:
if self.mode == "r":
self.firstmember = None
self.firstmember = self.next()
if self.mode == "a":
# Move to the end of the archive,
# before the first empty block.
while True:
self.fileobj.seek(self.offset)
try:
tarinfo = self.tarinfo.fromtarfile(self)
self.members.append(tarinfo)
except EOFHeaderError:
self.fileobj.seek(self.offset)
break
except HeaderError as e:
raise ReadError(str(e))
if self.mode in "aw":
self._loaded = True
if self.pax_headers:
buf = self.tarinfo.create_pax_global_header(self.pax_headers.copy())
self.fileobj.write(buf)
self.offset += len(buf)
except:
if not self._extfileobj:
self.fileobj.close()
self.closed = True
raise
#--------------------------------------------------------------------------
# Below are the classmethods which act as alternate constructors to the
# TarFile class. The open() method is the only one that is needed for
# public use; it is the "super"-constructor and is able to select an
# adequate "sub"-constructor for a particular compression using the mapping
# from OPEN_METH.
#
# This concept allows one to subclass TarFile without losing the comfort of
# the super-constructor. A sub-constructor is registered and made available
# by adding it to the mapping in OPEN_METH.
@classmethod
def open(cls, name=None, mode="r", fileobj=None, bufsize=RECORDSIZE, **kwargs):
"""Open a tar archive for reading, writing or appending. Return
an appropriate TarFile class.
mode:
'r' or 'r:*' open for reading with transparent compression
'r:' open for reading exclusively uncompressed
'r:gz' open for reading with gzip compression
'r:bz2' open for reading with bzip2 compression
'a' or 'a:' open for appending, creating the file if necessary
'w' or 'w:' open for writing without compression
'w:gz' open for writing with gzip compression
'w:bz2' open for writing with bzip2 compression
'r|*' open a stream of tar blocks with transparent compression
'r|' open an uncompressed stream of tar blocks for reading
'r|gz' open a gzip compressed stream of tar blocks
'r|bz2' open a bzip2 compressed stream of tar blocks
'w|' open an uncompressed stream for writing
'w|gz' open a gzip compressed stream for writing
'w|bz2' open a bzip2 compressed stream for writing
"""
if not name and not fileobj:
raise ValueError("nothing to open")
if mode in ("r", "r:*"):
# Find out which *open() is appropriate for opening the file.
for comptype in cls.OPEN_METH:
func = getattr(cls, cls.OPEN_METH[comptype])
if fileobj is not None:
saved_pos = fileobj.tell()
try:
return func(name, "r", fileobj, **kwargs)
except (ReadError, CompressionError) as e:
if fileobj is not None:
fileobj.seek(saved_pos)
continue
raise ReadError("file could not be opened successfully")
elif ":" in mode:
filemode, comptype = mode.split(":", 1)
filemode = filemode or "r"
comptype = comptype or "tar"
# Select the *open() function according to
# given compression.
if comptype in cls.OPEN_METH:
func = getattr(cls, cls.OPEN_METH[comptype])
else:
raise CompressionError("unknown compression type %r" % comptype)
return func(name, filemode, fileobj, **kwargs)
elif "|" in mode:
filemode, comptype = mode.split("|", 1)
filemode = filemode or "r"
comptype = comptype or "tar"
if filemode not in "rw":
raise ValueError("mode must be 'r' or 'w'")
stream = _Stream(name, filemode, comptype, fileobj, bufsize)
try:
t = cls(name, filemode, stream, **kwargs)
except:
stream.close()
raise
t._extfileobj = False
return t
elif mode in "aw":
return cls.taropen(name, mode, fileobj, **kwargs)
raise ValueError("undiscernible mode")
@classmethod
def taropen(cls, name, mode="r", fileobj=None, **kwargs):
"""Open uncompressed tar archive name for reading or writing.
"""
if len(mode) > 1 or mode not in "raw":
raise ValueError("mode must be 'r', 'a' or 'w'")
return cls(name, mode, fileobj, **kwargs)
@classmethod
def gzopen(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs):
"""Open gzip compressed tar archive name for reading or writing.
Appending is not allowed.
"""
if len(mode) > 1 or mode not in "rw":
raise ValueError("mode must be 'r' or 'w'")
try:
import gzip
gzip.GzipFile
except (ImportError, AttributeError):
raise CompressionError("gzip module is not available")
extfileobj = fileobj is not None
try:
fileobj = gzip.GzipFile(name, mode + "b", compresslevel, fileobj)
t = cls.taropen(name, mode, fileobj, **kwargs)
except IOError:
if not extfileobj and fileobj is not None:
fileobj.close()
if fileobj is None:
raise
raise ReadError("not a gzip file")
except:
if not extfileobj and fileobj is not None:
fileobj.close()
raise
t._extfileobj = extfileobj
return t
@classmethod
def bz2open(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs):
"""Open bzip2 compressed tar archive name for reading or writing.
Appending is not allowed.
"""
if len(mode) > 1 or mode not in "rw":
raise ValueError("mode must be 'r' or 'w'.")
try:
import bz2
except ImportError:
raise CompressionError("bz2 module is not available")
if fileobj is not None:
fileobj = _BZ2Proxy(fileobj, mode)
else:
fileobj = bz2.BZ2File(name, mode, compresslevel=compresslevel)
try:
t = cls.taropen(name, mode, fileobj, **kwargs)
except (IOError, EOFError):
fileobj.close()
raise ReadError("not a bzip2 file")
t._extfileobj = False
return t
# All *open() methods are registered here.
OPEN_METH = {
"tar": "taropen", # uncompressed tar
"gz": "gzopen", # gzip compressed tar
"bz2": "bz2open" # bzip2 compressed tar
}
#--------------------------------------------------------------------------
# The public methods which TarFile provides:
def close(self):
"""Close the TarFile. In write-mode, two finishing zero blocks are
appended to the archive.
"""
if self.closed:
return
if self.mode in "aw":
self.fileobj.write(NUL * (BLOCKSIZE * 2))
self.offset += (BLOCKSIZE * 2)
# fill up the end with zero-blocks
# (like option -b20 for tar does)
blocks, remainder = divmod(self.offset, RECORDSIZE)
if remainder > 0:
self.fileobj.write(NUL * (RECORDSIZE - remainder))
if not self._extfileobj:
self.fileobj.close()
self.closed = True
def getmember(self, name):
"""Return a TarInfo object for member `name'. If `name' can not be
found in the archive, KeyError is raised. If a member occurs more
than once in the archive, its last occurrence is assumed to be the
most up-to-date version.
"""
tarinfo = self._getmember(name)
if tarinfo is None:
raise KeyError("filename %r not found" % name)
return tarinfo
def getmembers(self):
"""Return the members of the archive as a list of TarInfo objects. The
list has the same order as the members in the archive.
"""
self._check()
if not self._loaded: # if we want to obtain a list of
self._load() # all members, we first have to
# scan the whole archive.
return self.members
def getnames(self):
"""Return the members of the archive as a list of their names. It has
the same order as the list returned by getmembers().
"""
return [tarinfo.name for tarinfo in self.getmembers()]
def gettarinfo(self, name=None, arcname=None, fileobj=None):
"""Create a TarInfo object for either the file `name' or the file
object `fileobj' (using os.fstat on its file descriptor). You can
modify some of the TarInfo's attributes before you add it using
addfile(). If given, `arcname' specifies an alternative name for the
file in the archive.
"""
self._check("aw")
# When fileobj is given, replace name by
# fileobj's real name.
if fileobj is not None:
name = fileobj.name
# Building the name of the member in the archive.
# Backward slashes are converted to forward slashes,
# Absolute paths are turned to relative paths.
if arcname is None:
arcname = name
drv, arcname = os.path.splitdrive(arcname)
arcname = arcname.replace(os.sep, "/")
arcname = arcname.lstrip("/")
# Now, fill the TarInfo object with
# information specific for the file.
tarinfo = self.tarinfo()
tarinfo.tarfile = self
# Use os.stat or os.lstat, depending on platform
# and if symlinks shall be resolved.
if fileobj is None:
if hasattr(os, "lstat") and not self.dereference:
statres = os.lstat(name)
else:
statres = os.stat(name)
else:
statres = os.fstat(fileobj.fileno())
linkname = ""
stmd = statres.st_mode
if stat.S_ISREG(stmd):
inode = (statres.st_ino, statres.st_dev)
if not self.dereference and statres.st_nlink > 1 and \
inode in self.inodes and arcname != self.inodes[inode]:
# Is it a hardlink to an already
# archived file?
type = LNKTYPE
linkname = self.inodes[inode]
else:
# The inode is added only if its valid.
# For win32 it is always 0.
type = REGTYPE
if inode[0]:
self.inodes[inode] = arcname
elif stat.S_ISDIR(stmd):
type = DIRTYPE
elif stat.S_ISFIFO(stmd):
type = FIFOTYPE
elif stat.S_ISLNK(stmd):
type = SYMTYPE
linkname = os.readlink(name)
elif stat.S_ISCHR(stmd):
type = CHRTYPE
elif stat.S_ISBLK(stmd):
type = BLKTYPE
else:
return None
# Fill the TarInfo object with all
# information we can get.
tarinfo.name = arcname
tarinfo.mode = stmd
tarinfo.uid = statres.st_uid
tarinfo.gid = statres.st_gid
if type == REGTYPE:
tarinfo.size = statres.st_size
else:
tarinfo.size = 0
tarinfo.mtime = statres.st_mtime
tarinfo.type = type
tarinfo.linkname = linkname
if pwd:
try:
tarinfo.uname = pwd.getpwuid(tarinfo.uid)[0]
except KeyError:
pass
if grp:
try:
tarinfo.gname = grp.getgrgid(tarinfo.gid)[0]
except KeyError:
pass
if type in (CHRTYPE, BLKTYPE):
if hasattr(os, "major") and hasattr(os, "minor"):
tarinfo.devmajor = os.major(statres.st_rdev)
tarinfo.devminor = os.minor(statres.st_rdev)
return tarinfo
def list(self, verbose=True):
"""Print a table of contents to sys.stdout. If `verbose' is False, only
the names of the members are printed. If it is True, an `ls -l'-like
output is produced.
"""
self._check()
for tarinfo in self:
if verbose:
print(filemode(tarinfo.mode), end=' ')
print("%s/%s" % (tarinfo.uname or tarinfo.uid,
tarinfo.gname or tarinfo.gid), end=' ')
if tarinfo.ischr() or tarinfo.isblk():
print("%10s" % ("%d,%d" \
% (tarinfo.devmajor, tarinfo.devminor)), end=' ')
else:
print("%10d" % tarinfo.size, end=' ')
print("%d-%02d-%02d %02d:%02d:%02d" \
% time.localtime(tarinfo.mtime)[:6], end=' ')
print(tarinfo.name + ("/" if tarinfo.isdir() else ""), end=' ')
if verbose:
if tarinfo.issym():
print("->", tarinfo.linkname, end=' ')
if tarinfo.islnk():
print("link to", tarinfo.linkname, end=' ')
print()
def add(self, name, arcname=None, recursive=True, exclude=None, filter=None):
"""Add the file `name' to the archive. `name' may be any type of file
(directory, fifo, symbolic link, etc.). If given, `arcname'
specifies an alternative name for the file in the archive.
Directories are added recursively by default. This can be avoided by
setting `recursive' to False. `exclude' is a function that should
return True for each filename to be excluded. `filter' is a function
that expects a TarInfo object argument and returns the changed
TarInfo object, if it returns None the TarInfo object will be
excluded from the archive.
"""
self._check("aw")
if arcname is None:
arcname = name
# Exclude pathnames.
if exclude is not None:
import warnings
warnings.warn("use the filter argument instead",
DeprecationWarning, 2)
if exclude(name):
self._dbg(2, "tarfile: Excluded %r" % name)
return
# Skip if somebody tries to archive the archive...
if self.name is not None and os.path.abspath(name) == self.name:
self._dbg(2, "tarfile: Skipped %r" % name)
return
self._dbg(1, name)
# Create a TarInfo object from the file.
tarinfo = self.gettarinfo(name, arcname)
if tarinfo is None:
self._dbg(1, "tarfile: Unsupported type %r" % name)
return
# Change or exclude the TarInfo object.
if filter is not None:
tarinfo = filter(tarinfo)
if tarinfo is None:
self._dbg(2, "tarfile: Excluded %r" % name)
return
# Append the tar header and data to the archive.
if tarinfo.isreg():
f = bltn_open(name, "rb")
self.addfile(tarinfo, f)
f.close()
elif tarinfo.isdir():
self.addfile(tarinfo)
if recursive:
for f in os.listdir(name):
self.add(os.path.join(name, f), os.path.join(arcname, f),
recursive, exclude, filter=filter)
else:
self.addfile(tarinfo)
def addfile(self, tarinfo, fileobj=None):
"""Add the TarInfo object `tarinfo' to the archive. If `fileobj' is
given, tarinfo.size bytes are read from it and added to the archive.
You can create TarInfo objects using gettarinfo().
On Windows platforms, `fileobj' should always be opened with mode
'rb' to avoid irritation about the file size.
"""
self._check("aw")
tarinfo = copy.copy(tarinfo)
buf = tarinfo.tobuf(self.format, self.encoding, self.errors)
self.fileobj.write(buf)
self.offset += len(buf)
# If there's data to follow, append it.
if fileobj is not None:
copyfileobj(fileobj, self.fileobj, tarinfo.size)
blocks, remainder = divmod(tarinfo.size, BLOCKSIZE)
if remainder > 0:
self.fileobj.write(NUL * (BLOCKSIZE - remainder))
blocks += 1
self.offset += blocks * BLOCKSIZE
self.members.append(tarinfo)
def extractall(self, path=".", members=None):
"""Extract all members from the archive to the current working
directory and set owner, modification time and permissions on
directories afterwards. `path' specifies a different directory
to extract to. `members' is optional and must be a subset of the
list returned by getmembers().
"""
directories = []
if members is None:
members = self
for tarinfo in members:
if tarinfo.isdir():
# Extract directories with a safe mode.
directories.append(tarinfo)
tarinfo = copy.copy(tarinfo)
tarinfo.mode = 0o700
# Do not set_attrs directories, as we will do that further down
self.extract(tarinfo, path, set_attrs=not tarinfo.isdir())
# Reverse sort directories.
directories.sort(key=lambda a: a.name)
directories.reverse()
# Set correct owner, mtime and filemode on directories.
for tarinfo in directories:
dirpath = os.path.join(path, tarinfo.name)
try:
self.chown(tarinfo, dirpath)
self.utime(tarinfo, dirpath)
self.chmod(tarinfo, dirpath)
except ExtractError as e:
if self.errorlevel > 1:
raise
else:
self._dbg(1, "tarfile: %s" % e)
def extract(self, member, path="", set_attrs=True):
"""Extract a member from the archive to the current working directory,
using its full name. Its file information is extracted as accurately
as possible. `member' may be a filename or a TarInfo object. You can
specify a different directory using `path'. File attributes (owner,
mtime, mode) are set unless `set_attrs' is False.
"""
self._check("r")
if isinstance(member, str):
tarinfo = self.getmember(member)
else:
tarinfo = member
# Prepare the link target for makelink().
if tarinfo.islnk():
tarinfo._link_target = os.path.join(path, tarinfo.linkname)
try:
self._extract_member(tarinfo, os.path.join(path, tarinfo.name),
set_attrs=set_attrs)
except EnvironmentError as e:
if self.errorlevel > 0:
raise
else:
if e.filename is None:
self._dbg(1, "tarfile: %s" % e.strerror)
else:
self._dbg(1, "tarfile: %s %r" % (e.strerror, e.filename))
except ExtractError as e:
if self.errorlevel > 1:
raise
else:
self._dbg(1, "tarfile: %s" % e)
def extractfile(self, member):
"""Extract a member from the archive as a file object. `member' may be
a filename or a TarInfo object. If `member' is a regular file, a
file-like object is returned. If `member' is a link, a file-like
object is constructed from the link's target. If `member' is none of
the above, None is returned.
The file-like object is read-only and provides the following
methods: read(), readline(), readlines(), seek() and tell()
"""
self._check("r")
if isinstance(member, str):
tarinfo = self.getmember(member)
else:
tarinfo = member
if tarinfo.isreg():
return self.fileobject(self, tarinfo)
elif tarinfo.type not in SUPPORTED_TYPES:
# If a member's type is unknown, it is treated as a
# regular file.
return self.fileobject(self, tarinfo)
elif tarinfo.islnk() or tarinfo.issym():
if isinstance(self.fileobj, _Stream):
# A small but ugly workaround for the case that someone tries
# to extract a (sym)link as a file-object from a non-seekable
# stream of tar blocks.
raise StreamError("cannot extract (sym)link as file object")
else:
# A (sym)link's file object is its target's file object.
return self.extractfile(self._find_link_target(tarinfo))
else:
# If there's no data associated with the member (directory, chrdev,
# blkdev, etc.), return None instead of a file object.
return None
def _extract_member(self, tarinfo, targetpath, set_attrs=True):
"""Extract the TarInfo object tarinfo to a physical
file called targetpath.
"""
# Fetch the TarInfo object for the given name
# and build the destination pathname, replacing
# forward slashes to platform specific separators.
targetpath = targetpath.rstrip("/")
targetpath = targetpath.replace("/", os.sep)
# Create all upper directories.
upperdirs = os.path.dirname(targetpath)
if upperdirs and not os.path.exists(upperdirs):
# Create directories that are not part of the archive with
# default permissions.
os.makedirs(upperdirs)
if tarinfo.islnk() or tarinfo.issym():
self._dbg(1, "%s -> %s" % (tarinfo.name, tarinfo.linkname))
else:
self._dbg(1, tarinfo.name)
if tarinfo.isreg():
self.makefile(tarinfo, targetpath)
elif tarinfo.isdir():
self.makedir(tarinfo, targetpath)
elif tarinfo.isfifo():
self.makefifo(tarinfo, targetpath)
elif tarinfo.ischr() or tarinfo.isblk():
self.makedev(tarinfo, targetpath)
elif tarinfo.islnk() or tarinfo.issym():
self.makelink(tarinfo, targetpath)
elif tarinfo.type not in SUPPORTED_TYPES:
self.makeunknown(tarinfo, targetpath)
else:
self.makefile(tarinfo, targetpath)
if set_attrs:
self.chown(tarinfo, targetpath)
if not tarinfo.issym():
self.chmod(tarinfo, targetpath)
self.utime(tarinfo, targetpath)
#--------------------------------------------------------------------------
# Below are the different file methods. They are called via
# _extract_member() when extract() is called. They can be replaced in a
# subclass to implement other functionality.
def makedir(self, tarinfo, targetpath):
"""Make a directory called targetpath.
"""
try:
# Use a safe mode for the directory, the real mode is set
# later in _extract_member().
os.mkdir(targetpath, 0o700)
except EnvironmentError as e:
if e.errno != errno.EEXIST:
raise
def makefile(self, tarinfo, targetpath):
"""Make a file called targetpath.
"""
source = self.fileobj
source.seek(tarinfo.offset_data)
target = bltn_open(targetpath, "wb")
if tarinfo.sparse is not None:
for offset, size in tarinfo.sparse:
target.seek(offset)
copyfileobj(source, target, size)
else:
copyfileobj(source, target, tarinfo.size)
target.seek(tarinfo.size)
target.truncate()
target.close()
def makeunknown(self, tarinfo, targetpath):
"""Make a file from a TarInfo object with an unknown type
at targetpath.
"""
self.makefile(tarinfo, targetpath)
self._dbg(1, "tarfile: Unknown file type %r, " \
"extracted as regular file." % tarinfo.type)
def makefifo(self, tarinfo, targetpath):
"""Make a fifo called targetpath.
"""
if hasattr(os, "mkfifo"):
os.mkfifo(targetpath)
else:
raise ExtractError("fifo not supported by system")
def makedev(self, tarinfo, targetpath):
"""Make a character or block device called targetpath.
"""
if not hasattr(os, "mknod") or not hasattr(os, "makedev"):
raise ExtractError("special devices not supported by system")
mode = tarinfo.mode
if tarinfo.isblk():
mode |= stat.S_IFBLK
else:
mode |= stat.S_IFCHR
os.mknod(targetpath, mode,
os.makedev(tarinfo.devmajor, tarinfo.devminor))
def makelink(self, tarinfo, targetpath):
"""Make a (symbolic) link called targetpath. If it cannot be created
(platform limitation), we try to make a copy of the referenced file
instead of a link.
"""
try:
# For systems that support symbolic and hard links.
if tarinfo.issym():
os.symlink(tarinfo.linkname, targetpath)
else:
# See extract().
if os.path.exists(tarinfo._link_target):
os.link(tarinfo._link_target, targetpath)
else:
self._extract_member(self._find_link_target(tarinfo),
targetpath)
except symlink_exception:
if tarinfo.issym():
linkpath = os.path.join(os.path.dirname(tarinfo.name),
tarinfo.linkname)
else:
linkpath = tarinfo.linkname
else:
try:
self._extract_member(self._find_link_target(tarinfo),
targetpath)
except KeyError:
raise ExtractError("unable to resolve link inside archive")
def chown(self, tarinfo, targetpath):
"""Set owner of targetpath according to tarinfo.
"""
if pwd and hasattr(os, "geteuid") and os.geteuid() == 0:
# We have to be root to do so.
try:
g = grp.getgrnam(tarinfo.gname)[2]
except KeyError:
g = tarinfo.gid
try:
u = pwd.getpwnam(tarinfo.uname)[2]
except KeyError:
u = tarinfo.uid
try:
if tarinfo.issym() and hasattr(os, "lchown"):
os.lchown(targetpath, u, g)
else:
if sys.platform != "os2emx":
os.chown(targetpath, u, g)
except EnvironmentError as e:
raise ExtractError("could not change owner")
def chmod(self, tarinfo, targetpath):
"""Set file permissions of targetpath according to tarinfo.
"""
if hasattr(os, 'chmod'):
try:
os.chmod(targetpath, tarinfo.mode)
except EnvironmentError as e:
raise ExtractError("could not change mode")
def utime(self, tarinfo, targetpath):
"""Set modification time of targetpath according to tarinfo.
"""
if not hasattr(os, 'utime'):
return
try:
os.utime(targetpath, (tarinfo.mtime, tarinfo.mtime))
except EnvironmentError as e:
raise ExtractError("could not change modification time")
#--------------------------------------------------------------------------
def next(self):
"""Return the next member of the archive as a TarInfo object, when
TarFile is opened for reading. Return None if there is no more
available.
"""
self._check("ra")
if self.firstmember is not None:
m = self.firstmember
self.firstmember = None
return m
# Read the next block.
self.fileobj.seek(self.offset)
tarinfo = None
while True:
try:
tarinfo = self.tarinfo.fromtarfile(self)
except EOFHeaderError as e:
if self.ignore_zeros:
self._dbg(2, "0x%X: %s" % (self.offset, e))
self.offset += BLOCKSIZE
continue
except InvalidHeaderError as e:
if self.ignore_zeros:
self._dbg(2, "0x%X: %s" % (self.offset, e))
self.offset += BLOCKSIZE
continue
elif self.offset == 0:
raise ReadError(str(e))
except EmptyHeaderError:
if self.offset == 0:
raise ReadError("empty file")
except TruncatedHeaderError as e:
if self.offset == 0:
raise ReadError(str(e))
except SubsequentHeaderError as e:
raise ReadError(str(e))
break
if tarinfo is not None:
self.members.append(tarinfo)
else:
self._loaded = True
return tarinfo
#--------------------------------------------------------------------------
# Little helper methods:
def _getmember(self, name, tarinfo=None, normalize=False):
"""Find an archive member by name from bottom to top.
If tarinfo is given, it is used as the starting point.
"""
# Ensure that all members have been loaded.
members = self.getmembers()
# Limit the member search list up to tarinfo.
if tarinfo is not None:
members = members[:members.index(tarinfo)]
if normalize:
name = os.path.normpath(name)
for member in reversed(members):
if normalize:
member_name = os.path.normpath(member.name)
else:
member_name = member.name
if name == member_name:
return member
def _load(self):
"""Read through the entire archive file and look for readable
members.
"""
while True:
tarinfo = self.next()
if tarinfo is None:
break
self._loaded = True
def _check(self, mode=None):
"""Check if TarFile is still open, and if the operation's mode
corresponds to TarFile's mode.
"""
if self.closed:
raise IOError("%s is closed" % self.__class__.__name__)
if mode is not None and self.mode not in mode:
raise IOError("bad operation for mode %r" % self.mode)
def _find_link_target(self, tarinfo):
"""Find the target member of a symlink or hardlink member in the
archive.
"""
if tarinfo.issym():
# Always search the entire archive.
linkname = os.path.dirname(tarinfo.name) + "/" + tarinfo.linkname
limit = None
else:
# Search the archive before the link, because a hard link is
# just a reference to an already archived file.
linkname = tarinfo.linkname
limit = tarinfo
member = self._getmember(linkname, tarinfo=limit, normalize=True)
if member is None:
raise KeyError("linkname %r not found" % linkname)
return member
def __iter__(self):
"""Provide an iterator object.
"""
if self._loaded:
return iter(self.members)
else:
return TarIter(self)
def _dbg(self, level, msg):
"""Write debugging output to sys.stderr.
"""
if level <= self.debug:
print(msg, file=sys.stderr)
def __enter__(self):
self._check()
return self
def __exit__(self, type, value, traceback):
if type is None:
self.close()
else:
# An exception occurred. We must not call close() because
# it would try to write end-of-archive blocks and padding.
if not self._extfileobj:
self.fileobj.close()
self.closed = True
# class TarFile
class TarIter(object):
"""Iterator Class.
for tarinfo in TarFile(...):
suite...
"""
def __init__(self, tarfile):
"""Construct a TarIter object.
"""
self.tarfile = tarfile
self.index = 0
def __iter__(self):
"""Return iterator object.
"""
return self
def __next__(self):
"""Return the next item using TarFile's next() method.
When all members have been read, set TarFile as _loaded.
"""
# Fix for SF #1100429: Under rare circumstances it can
# happen that getmembers() is called during iteration,
# which will cause TarIter to stop prematurely.
if not self.tarfile._loaded:
tarinfo = self.tarfile.next()
if not tarinfo:
self.tarfile._loaded = True
raise StopIteration
else:
try:
tarinfo = self.tarfile.members[self.index]
except IndexError:
raise StopIteration
self.index += 1
return tarinfo
next = __next__ # for Python 2.x
#--------------------
# exported functions
#--------------------
def is_tarfile(name):
"""Return True if name points to a tar archive that we
are able to handle, else return False.
"""
try:
t = open(name)
t.close()
return True
except TarError:
return False
bltn_open = open
open = TarFile.open
| mit |
llaera/namebench | libnamebench/geoip.py | 171 | 4650 | # Copyright 2010 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Class used for determining GeoIP location."""
import csv
import re
import tempfile
# external dependencies (from nb_third_party)
import httplib2
import math
import simplejson
import util
def GetFromGoogleLocAPI():
"""Use the Google Loc JSON API from Google Gears.
Returns:
A dictionary containing geolocation information
NOTE: This is in violation of the Gears Terms of Service. See:
http://code.google.com/p/gears/wiki/GeolocationAPI
"""
h = httplib2.Http(tempfile.gettempdir(), timeout=10)
url = 'http://www.google.com/loc/json'
post_data = {'request_address': 'true', 'version': '1.1.0', 'source': 'namebench'}
unused_resp, content = h.request(url, 'POST', simplejson.dumps(post_data))
try:
data = simplejson.loads(content)['location']
return {
'region_name': data['address'].get('region'),
'country_name': data['address'].get('country'),
'country_code': data['address'].get('country_code'),
'city': data['address'].get('city'),
'latitude': data['latitude'],
'longitude': data['longitude'],
'source': 'gloc'
}
except:
print '* Failed to use GoogleLocAPI: %s (content: %s)' % (util.GetLastExceptionString(), content)
return {}
def GetFromMaxmindJSAPI():
h = httplib2.Http(tempfile.gettempdir(), timeout=10)
unused_resp, content = h.request('http://j.maxmind.com/app/geoip.js', 'GET')
keep = ['region_name', 'country_name', 'city', 'latitude', 'longitude', 'country_code']
results = dict([x for x in re.findall("geoip_(.*?)\(.*?\'(.*?)\'", content) if x[0] in keep])
results.update({'source': 'mmind'})
if results:
return results
else:
return {}
def GetGeoData():
"""Get geodata from any means necessary. Sanitize as necessary."""
try:
json_data = GetFromGoogleLocAPI()
if not json_data:
json_data = GetFromMaxmindJSAPI()
# Make our data less accurate. We don't need any more than that.
json_data['latitude'] = '%.3f' % float(json_data['latitude'])
json_data['longitude'] = '%.3f' % float(json_data['longitude'])
return json_data
except:
print 'Failed to get Geodata: %s' % util.GetLastExceptionString()
return {}
def GetInfoForCountry(country_name_or_code):
"""Get code, name, lat and lon for a given country name or code."""
match = False
partial_match = False
if not country_name_or_code:
return None
if len(country_name_or_code) == 2:
country_code = country_name_or_code.upper()
country_name = False
else:
country_name = country_name_or_code
country_code = False
for row in ReadCountryData():
lat, lon = row['coords'].split(',')
if country_code:
if row['code'] == country_code:
return row['code'], row['name'], lat, lon
elif country_name:
if re.match("^%s$" % country_name, row['name'], re.I):
return row['code'], row['name'], lat, lon
elif re.search('^%s \(' % country_name, row['name'], re.I):
return row['code'], row['name'], lat, lon
elif re.search('\(%s\)' % country_name, row['name'], re.I):
return row['code'], row['name'], lat, lon
elif re.match("^%s" % country_name, row['name'], re.I):
match = (row['code'], row['name'], lat, lon)
elif re.search(country_name, row['name'], re.I):
partial_match = (row['code'], row['name'], lat, lon)
if match:
print "Could not find explicit entry for '%s', good match: %s" % (country_name_or_code, match)
return match
elif partial_match:
print "Could not find explicit entry for '%s', partial match: %s" % (country_name_or_code, partial_match)
return partial_match
else:
print "'%s' does not match any countries in our list." % country_name_or_code
return (None, None, None, None)
def ReadCountryData(filename='data/countries.csv'):
"""Read country data file, yielding rows of information."""
country_file = util.FindDataFile(filename)
for row in csv.DictReader(open(country_file), fieldnames=['name', 'code', 'coords']):
yield row
| apache-2.0 |
Goggin/djeasyroute | djeasyroute/__init__.py | 1 | 2026 | from django.conf.urls import url
from functools import wraps
import re, inspect
def route(path, name=None):
def wrap(f):
@wraps(f)
def wrapper(*args, **kwargs):
return f(*args, **kwargs)
if not hasattr(wrapper, '__routes__'):
wrapper.__routes__ = []
wrapper.__routes__.insert(0, dict(path=path, name=name))
return wrapper
return wrap
class EasyRoute(object):
_repl = {
'int': r'(?P<{paramname}>\d+)',
'str': r'(?P<{paramname}>[^/]+)',
'all': r'(?P<{paramname}>.*)',
'float': r'(?P<{paramname}>\d+(\.\d+)?)',
'bool': r'(?P<{paramname}>[01tfTF]|[Tt][Rr][Uu][Ee]|[Ff][Aa][Ll][Ss][Ee])',
}
_syntax = re.compile(r'\<(?P<paramname>[A-Za-z0-9_]+)(:(?P<type>[A-Za-z0-9]+))?\>')
@property
def urls(self):
if not hasattr(self, '_urls'):
self._build_urls()
return self._urls
def _build_urls(self):
self._urls = []
members = inspect.getmembers(self, predicate=inspect.ismethod)
for n, m in members:
if hasattr(m, '__routes__'):
self.__register(getattr(self, n))
def __register(self, fn):
routes = fn.__routes__
for route in routes:
path = route.get('path', None)
name = route.get('name', None)
items = path.split('/')
r = []
for i in items:
m = EasyRoute._syntax.match(i)
if m is not None:
paramname = m.group("paramname")
typ = m.group('type') or "str"
typ = typ.lower()
if not typ in EasyRoute._repl:
raise TypeError("{} is not a supported type for EasyRoute".format(typ))
r.append(EasyRoute._repl[typ].format(paramname=paramname))
else:
r.append(i)
self._urls.append(url(r'^' + r'/'.join(r) + r'$', fn, name=name))
| mit |
mxOBS/deb-pkg_trusty_chromium-browser | tools/perf/measurements/thread_times.py | 11 | 2012 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from measurements import timeline_controller
from metrics import timeline
from telemetry.core.platform import tracing_category_filter
from telemetry.page import page_test
from telemetry.web_perf.metrics import layout
class ThreadTimes(page_test.PageTest):
def __init__(self, report_silk_details=False,
action_name_to_run='RunPageInteractions'):
super(ThreadTimes, self).__init__(action_name_to_run=action_name_to_run)
self._timeline_controller = None
self._report_silk_details = report_silk_details
def WillNavigateToPage(self, page, tab):
self._timeline_controller = timeline_controller.TimelineController()
if self._report_silk_details:
# We need the other traces in order to have any details to report.
self._timeline_controller.trace_categories = None
else:
self._timeline_controller.trace_categories = \
tracing_category_filter.CreateNoOverheadFilter().filter_string
self._timeline_controller.SetUp(page, tab)
def WillRunActions(self, page, tab):
self._timeline_controller.Start(tab)
def ValidateAndMeasurePage(self, page, tab, results):
self._timeline_controller.Stop(tab, results)
metric = timeline.ThreadTimesTimelineMetric()
renderer_thread = \
self._timeline_controller.model.GetRendererThreadFromTabId(tab.id)
if self._report_silk_details:
metric.details_to_report = timeline.ReportSilkDetails
metric.AddResults(self._timeline_controller.model, renderer_thread,
self._timeline_controller.smooth_records, results)
layout_metric = layout.LayoutMetric()
layout_metric.AddResults(self._timeline_controller.model, renderer_thread,
self._timeline_controller.smooth_records, results)
def CleanUpAfterPage(self, _, tab):
self._timeline_controller.CleanUp(tab)
| bsd-3-clause |
shsingh/ansible | lib/ansible/modules/network/f5/bigip_snmp_community.py | 38 | 29233 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_snmp_community
short_description: Manages SNMP communities on a BIG-IP.
description:
- Assists in managing SNMP communities on a BIG-IP. Different SNMP versions are supported
by this module. Take note of the different parameters offered by this module, as different
parameters work for different versions of SNMP. Typically this becomes an interest if you
are mixing versions C(v2c) and C(3).
version_added: 2.6
options:
state:
description:
- When C(present), ensures that the address list and entries exists.
- When C(absent), ensures the address list is removed.
type: str
choices:
- present
- absent
default: present
version:
description:
- Specifies to which Simple Network Management Protocol (SNMP) version the trap destination applies.
type: str
choices:
- v1
- v2c
- v3
default: v2c
name:
description:
- Name that identifies the SNMP community.
- When C(version) is C(v1) or C(v2c), this parameter is required.
- The name C(public) is a reserved name on the BIG-IP. This module handles that name differently
than others. Functionally, you should not see a difference however.
type: str
community:
description:
- Specifies the community string (password) for access to the MIB.
- This parameter is only relevant when C(version) is C(v1), or C(v2c). If C(version) is
something else, this parameter is ignored.
type: str
source:
description:
- Specifies the source address for access to the MIB.
- This parameter can accept a value of C(all).
- If this parameter is not specified, the value C(all) is used.
- This parameter is only relevant when C(version) is C(v1), or C(v2c). If C(version) is
something else, this parameter is ignored.
- If C(source) is set to C(all), then it is not possible to specify an C(oid). This will
raise an error.
- This parameter should be provided when C(state) is C(absent), so that the correct community
is removed. To remove the C(public) SNMP community that comes with a BIG-IP, this parameter
should be set to C(default).
type: str
port:
description:
- Specifies the port for the trap destination.
- This parameter is only relevant when C(version) is C(v1), or C(v2c). If C(version) is
something else, this parameter is ignored.
type: int
oid:
description:
- Specifies the object identifier (OID) for the record.
- When C(version) is C(v3), this parameter is required.
- When C(version) is either C(v1) or C(v2c), if this value is specified, then C(source)
must not be set to C(all).
type: str
access:
description:
- Specifies the user's access level to the MIB.
- When creating a new community, if this parameter is not specified, the default is C(ro).
- When C(ro), specifies that the user can view the MIB, but cannot modify the MIB.
- When C(rw), specifies that the user can view and modify the MIB.
type: str
choices:
- ro
- rw
- read-only
- read-write
ip_version:
description:
- Specifies whether the record applies to IPv4 or IPv6 addresses.
- When creating a new community, if this value is not specified, the default of C(4) will
be used.
- This parameter is only relevant when C(version) is C(v1), or C(v2c). If C(version) is
something else, this parameter is ignored.
type: str
choices:
- '4'
- '6'
snmp_username:
description:
- Specifies the name of the user for whom you want to grant access to the SNMP v3 MIB.
- This parameter is only relevant when C(version) is C(v3). If C(version) is something
else, this parameter is ignored.
- When creating a new SNMP C(v3) community, this parameter is required.
- This parameter cannot be changed once it has been set.
type: str
snmp_auth_protocol:
description:
- Specifies the authentication method for the user.
- When C(md5), specifies that the system uses the MD5 algorithm to authenticate the user.
- When C(sha), specifies that the secure hash algorithm (SHA) to authenticate the user.
- When C(none), specifies that user does not require authentication.
- When creating a new SNMP C(v3) community, if this parameter is not specified, the default
of C(sha) will be used.
type: str
choices:
- md5
- sha
- none
snmp_auth_password:
description:
- Specifies the password for the user.
- When creating a new SNMP C(v3) community, this parameter is required.
- This value must be at least 8 characters long.
type: str
snmp_privacy_protocol:
description:
- Specifies the encryption protocol.
- When C(aes), specifies that the system encrypts the user information using AES
(Advanced Encryption Standard).
- When C(des), specifies that the system encrypts the user information using DES
(Data Encryption Standard).
- When C(none), specifies that the system does not encrypt the user information.
- When creating a new SNMP C(v3) community, if this parameter is not specified, the
default of C(aes) will be used.
type: str
choices:
- aes
- des
- none
snmp_privacy_password:
description:
- Specifies the password for the user.
- When creating a new SNMP C(v3) community, this parameter is required.
- This value must be at least 8 characters long.
type: str
update_password:
description:
- C(always) will allow to update passwords if the user chooses to do so.
C(on_create) will only set the password for newly created resources.
type: str
choices:
- always
- on_create
default: always
partition:
description:
- Device partition to manage resources on.
type: str
default: Common
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
- Wojciech Wypior (@wojtek0806)
'''
EXAMPLES = r'''
- name: Create an SMNP v2c read-only community
bigip_snmp_community:
name: foo
version: v2c
source: all
oid: .1
access: ro
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
- name: Create an SMNP v3 read-write community
bigip_snmp_community:
name: foo
version: v3
snmp_username: foo
snmp_auth_protocol: sha
snmp_auth_password: secret
snmp_privacy_protocol: aes
snmp_privacy_password: secret
oid: .1
access: rw
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
- name: Remove the default 'public' SNMP community
bigip_snmp_community:
name: public
source: default
state: absent
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
'''
RETURN = r'''
community:
description: The new community value.
returned: changed
type: str
sample: community1
oid:
description: The new OID value.
returned: changed
type: str
sample: .1
ip_version:
description: The new IP version value.
returned: changed
type: str
sample: .1
snmp_auth_protocol:
description: The new SNMP auth protocol.
returned: changed
type: str
sample: sha
snmp_privacy_protocol:
description: The new SNMP privacy protocol.
returned: changed
type: str
sample: aes
access:
description: The new access level for the MIB.
returned: changed
type: str
sample: ro
source:
description: The new source address to access the MIB.
returned: changed
type: str
sample: 1.1.1.1
snmp_username:
description: The new SNMP username.
returned: changed
type: str
sample: user1
snmp_auth_password:
description: The new password of the given snmp_username.
returned: changed
type: str
sample: secret1
snmp_privacy_password:
description: The new password of the given snmp_username.
returned: changed
type: str
sample: secret2
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
try:
from library.module_utils.network.f5.bigip import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import transform_name
except ImportError:
from ansible.module_utils.network.f5.bigip import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import transform_name
class Parameters(AnsibleF5Parameters):
api_map = {
'communityName': 'community',
'oidSubset': 'oid',
'ipv6': 'ip_version',
'authProtocol': 'snmp_auth_protocol',
'privacyProtocol': 'snmp_privacy_protocol',
'username': 'snmp_username',
'securityLevel': 'security_level',
'authPassword': 'snmp_auth_password',
'privacyPassword': 'snmp_privacy_password',
}
api_attributes = [
'source',
'oidSubset',
'ipv6',
'communityName',
'access',
'authPassword',
'authProtocol',
'username',
'securityLevel',
'privacyProtocol',
'privacyPassword',
]
returnables = [
'community',
'oid',
'ip_version',
'snmp_auth_protocol',
'snmp_privacy_protocol',
'access',
'source',
'snmp_username',
'snmp_auth_password',
'snmp_privacy_password',
]
updatables = [
'community',
'oid',
'ip_version',
'snmp_auth_protocol',
'snmp_privacy_protocol',
'access',
'source',
'snmp_auth_password',
'snmp_privacy_password',
'security_level',
'snmp_username',
]
@property
def port(self):
if self._values['port'] is None:
return None
return int(self._values['port'])
class ApiParameters(Parameters):
@property
def ip_version(self):
if self._values['ip_version'] is None:
return None
if self._values['ip_version'] == 'enabled':
return 6
return 4
@property
def source(self):
if self._values['source'] is None:
return 'all'
return self._values['source']
class ModuleParameters(Parameters):
@property
def ip_version(self):
if self._values['ip_version'] is None:
return None
return int(self._values['ip_version'])
@property
def source(self):
if self._values['source'] is None:
return None
if self._values['source'] == '':
return 'all'
return self._values['source']
@property
def access(self):
if self._values['access'] is None:
return None
elif self._values['access'] in ['ro', 'read-only']:
return 'ro'
elif self._values['access'] in ['rw', 'read-write']:
return 'rw'
else:
raise F5ModuleError(
"Unknown access format specified: '{0}'.".format(self._values['access'])
)
@property
def snmp_auth_password(self):
if self._values['snmp_auth_password'] is None:
return None
if len(self._values['snmp_auth_password']) < 8:
raise F5ModuleError(
"snmp_auth_password must be at least 8 characters long."
)
return self._values['snmp_auth_password']
@property
def snmp_privacy_password(self):
if self._values['snmp_privacy_password'] is None:
return None
if len(self._values['snmp_privacy_password']) < 8:
raise F5ModuleError(
"snmp_privacy_password must be at least 8 characters long."
)
return self._values['snmp_privacy_password']
@property
def name(self):
if self._values['name'] == 'public':
return 'comm-public'
return self._values['name']
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
@property
def ip_version(self):
if self._values['ip_version'] is None:
return None
elif self._values['ip_version'] == 4:
return 'disabled'
return 'enabled'
@property
def source(self):
if self._values['source'] is None:
return None
if self._values['source'] == 'all':
return ''
return self._values['source']
class ReportableChanges(Changes):
pass
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
def _check_source_and_oid(self):
if self.have.oid is not None:
if self.want.source == 'all' and self.want.oid != '':
raise F5ModuleError(
"When specifying an 'all' source for a resource with an existing OID, you must specify a new, empty, OID."
)
if self.want.source == 'all' and self.want.oid != '':
raise F5ModuleError(
"When specifying an 'all' source for a resource, you may not specify an OID."
)
@property
def source(self):
self._check_source_and_oid()
if self.want.source != self.have.source:
return self.want.source
@property
def oid(self):
self._check_source_and_oid()
if self.want.oid != self.have.oid:
return self.want.oid
@property
def snmp_privacy_password(self):
if self.want.update_password == 'always' and self.want.snmp_privacy_password is not None:
return self.want.snmp_privacy_password
@property
def snmp_auth_password(self):
if self.want.update_password == 'always' and self.want.snmp_auth_password is not None:
return self.want.snmp_auth_password
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.kwargs = kwargs
self.client = kwargs.get('client', None)
self.module = kwargs.get('module', None)
def exec_module(self):
if self.version_is_less_than_3():
manager = self.get_manager('v1')
else:
manager = self.get_manager('v2')
return manager.exec_module()
def get_manager(self, type):
if type == 'v1':
return V1Manager(**self.kwargs)
elif type == 'v2':
return V2Manager(**self.kwargs)
def version_is_less_than_3(self):
version = self.module.params.get('version')
if version == 'v3':
return False
else:
return True
class BaseManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = F5RestClient(**self.module.params)
self.want = ModuleParameters(params=self.module.params)
self.have = ApiParameters()
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def exec_module(self):
changed = False
result = dict()
state = self.want.state
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.client.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the resource.")
return True
def absent(self):
if self.exists():
return self.remove()
return False
class V1Manager(BaseManager):
"""Handles SNMP v1 and v2c
"""
def create(self):
if self.want.ip_version is None:
self.want.update({'ip_version': 4})
if self.want.access is None:
self.want.update({'access': 'ro'})
self._set_changed_options()
if self.want.oid is not None and self.want.source == 'all':
raise F5ModuleError(
"When specify an oid, source may not be set to 'all'."
)
if self.module.check_mode:
return True
self.create_on_device()
return True
def exists(self):
uri = "https://{0}:{1}/mgmt/tm/sys/snmp/communities/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError:
return False
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
return True
def create_on_device(self):
params = self.changes.api_params()
params['name'] = self.want.name
params['partition'] = self.want.partition
uri = "https://{0}:{1}/mgmt/tm/sys/snmp/communities/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def update_on_device(self):
params = self.changes.api_params()
uri = "https://{0}:{1}/mgmt/tm/sys/snmp/communities/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def remove_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/sys/snmp/communities/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.delete(uri)
if resp.status == 200:
return True
def read_current_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/sys/snmp/communities/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return ApiParameters(params=response)
class V2Manager(BaseManager):
"""Handles SNMP v3
SNMP v3 has (almost) a completely separate set of variables than v2c or v1.
The functionality is placed in this separate class to handle these differences.
"""
def create(self):
if self.want.access is None:
self.want.update({'access': 'ro'})
if self.want.snmp_auth_protocol is None:
self.want.update({'snmp_auth_protocol': 'sha'})
if self.want.snmp_privacy_protocol is None:
self.want.update({'snmp_privacy_protocol': 'aes'})
self._set_changed_options()
if self.want.snmp_username is None:
raise F5ModuleError(
"snmp_username must be specified when creating a new v3 community."
)
if self.want.snmp_auth_password is None:
raise F5ModuleError(
"snmp_auth_password must be specified when creating a new v3 community."
)
if self.want.snmp_privacy_password is None:
raise F5ModuleError(
"snmp_privacy_password must be specified when creating a new v3 community."
)
if self.want.oid is None:
raise F5ModuleError(
"oid must be specified when creating a new v3 community."
)
if self.module.check_mode:
return True
self.create_on_device()
return True
def exists(self):
uri = "https://{0}:{1}/mgmt/tm/sys/snmp/users/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.snmp_username)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError:
return False
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
return True
def create_on_device(self):
params = self.changes.api_params()
params['name'] = self.want.snmp_username
params['partition'] = self.want.partition
uri = "https://{0}:{1}/mgmt/tm/sys/snmp/users/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def update_on_device(self):
params = self.changes.api_params()
uri = "https://{0}:{1}/mgmt/tm/sys/snmp/users/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.snmp_username)
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def remove_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/sys/snmp/users/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.snmp_username)
)
resp = self.client.api.delete(uri)
if resp.status == 200:
return True
def read_current_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/sys/snmp/users/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.snmp_username)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return ApiParameters(params=response)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
version=dict(
default='v2c',
choices=['v1', 'v2c', 'v3']
),
name=dict(),
community=dict(),
source=dict(),
port=dict(type='int'),
oid=dict(),
access=dict(
choices=['ro', 'rw', 'read-only', 'read-write']
),
ip_version=dict(
choices=['4', '6']
),
snmp_username=dict(),
snmp_auth_protocol=dict(
choices=['md5', 'sha', 'none']
),
snmp_auth_password=dict(no_log=True),
snmp_privacy_protocol=dict(
choices=['aes', 'des', 'none']
),
snmp_privacy_password=dict(no_log=True),
update_password=dict(
default='always',
choices=['always', 'on_create']
),
state=dict(default='present', choices=['absent', 'present']),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
),
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
self.required_if = [
['version', 'v1', ['name']],
['version', 'v2', ['name']],
['version', 'v3', ['snmp_username']]
]
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
required_if=spec.required_if
)
try:
mm = ModuleManager(module=module)
results = mm.exec_module()
module.exit_json(**results)
except F5ModuleError as ex:
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
| gpl-3.0 |
lycantropos/cetus | cetus/queries/utils.py | 1 | 2170 | from datetime import datetime
from typing import (Optional,
List,
Dict)
from cetus.types import (ColumnValueType,
OrderingType,
UpdatesType)
from cetus.utils import join_str
ALL_COLUMNS_ALIAS = '*'
NULL_VALUE = 'NULL'
ORDERS_ALIASES = dict(ascending='ASC',
descending='DESC')
def add_orderings(query: str, *,
orderings: List[OrderingType]
) -> str:
if orderings:
orderings = join_str(map(' '.join, orderings))
query += f'ORDER BY {orderings} '
return query
def add_groupings(query: str, *,
groupings: Optional[List[str]] = None
) -> str:
if groupings:
groupings = join_str(groupings)
query += f'GROUP BY {groupings} '
return query
def add_pagination(query: str, *,
limit: Optional[int],
offset: Optional[int]
) -> str:
if limit is not None:
query += f'LIMIT {limit} '
if offset is not None:
query += f'OFFSET {offset} '
return query
def add_updates(query: str, *,
updates: UpdatesType
) -> str:
keys = updates.keys()
values = map(normalize_value, updates.values())
updates_str = join_str(f'{key} = {value}'
for key, value in zip(keys, values))
return query + f'SET {updates_str} '
def normalize_value(value: ColumnValueType
) -> str:
if isinstance(value, (str, datetime)):
return f"'{value}'"
if value is None:
return NULL_VALUE
return str(value)
def check_query_parameters(**query_parameters: Dict[str, List[str]]
) -> None:
for parameter_key, parameter_value in query_parameters.items():
if not parameter_value:
err_msg = ('Invalid query parameter: '
f'"{parameter_key}" should be '
'non-empty list of strings '
f'but found: "{parameter_value}".')
raise ValueError(err_msg)
| mit |
MasterJuan/warply | drupal/sites/all/themes/warply/node_modules/node-gyp/gyp/pylib/gyp/MSVSToolFile.py | 2736 | 1804 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Visual Studio project reader/writer."""
import gyp.common
import gyp.easy_xml as easy_xml
class Writer(object):
"""Visual Studio XML tool file writer."""
def __init__(self, tool_file_path, name):
"""Initializes the tool file.
Args:
tool_file_path: Path to the tool file.
name: Name of the tool file.
"""
self.tool_file_path = tool_file_path
self.name = name
self.rules_section = ['Rules']
def AddCustomBuildRule(self, name, cmd, description,
additional_dependencies,
outputs, extensions):
"""Adds a rule to the tool file.
Args:
name: Name of the rule.
description: Description of the rule.
cmd: Command line of the rule.
additional_dependencies: other files which may trigger the rule.
outputs: outputs of the rule.
extensions: extensions handled by the rule.
"""
rule = ['CustomBuildRule',
{'Name': name,
'ExecutionDescription': description,
'CommandLine': cmd,
'Outputs': ';'.join(outputs),
'FileExtensions': ';'.join(extensions),
'AdditionalDependencies':
';'.join(additional_dependencies)
}]
self.rules_section.append(rule)
def WriteIfChanged(self):
"""Writes the tool file."""
content = ['VisualStudioToolFile',
{'Version': '8.00',
'Name': self.name
},
self.rules_section
]
easy_xml.WriteXmlIfChanged(content, self.tool_file_path,
encoding="Windows-1252")
| gpl-3.0 |
xe1gyq/eekmex | sandbox/ppobserver.py | 1 | 2519 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""http://code.activestate.com/recipes/131499-observer-pattern/"""
class Subject(object):
def __init__(self):
self._observers = []
def attach(self, observer):
if observer not in self._observers:
self._observers.append(observer)
def detach(self, observer):
try:
self._observers.remove(observer)
except ValueError:
pass
def notify(self, modifier=None):
for observer in self._observers:
if modifier != observer:
observer.update(self)
# Example usage
class Data(Subject):
def __init__(self, name=''):
Subject.__init__(self)
self.name = name
self._data = 0
@property
def data(self):
return self._data
@data.setter
def data(self, value):
self._data = value
self.notify()
class HexViewer:
def update(self, subject):
print('HexViewer: Subject %s has data 0x%x' %
(subject.name, subject.data))
class DecimalViewer:
def update(self, subject):
print('DecimalViewer: Subject %s has data %d' %
(subject.name, subject.data))
# Example usage...
def main():
data1 = Data('Data 1')
data2 = Data('Data 2')
view1 = DecimalViewer()
view2 = HexViewer()
data1.attach(view1)
data1.attach(view2)
data2.attach(view2)
data2.attach(view1)
print("Setting Data 1 = 10")
data1.data = 10
print("Setting Data 2 = 15")
data2.data = 15
print("Setting Data 1 = 3")
data1.data = 3
print("Setting Data 2 = 5")
data2.data = 5
print("Detach HexViewer from data1 and data2.")
data1.detach(view2)
data2.detach(view2)
print("Setting Data 1 = 10")
data1.data = 10
print("Setting Data 2 = 15")
data2.data = 15
if __name__ == '__main__':
main()
### OUTPUT ###
# Setting Data 1 = 10
# DecimalViewer: Subject Data 1 has data 10
# HexViewer: Subject Data 1 has data 0xa
# Setting Data 2 = 15
# HexViewer: Subject Data 2 has data 0xf
# DecimalViewer: Subject Data 2 has data 15
# Setting Data 1 = 3
# DecimalViewer: Subject Data 1 has data 3
# HexViewer: Subject Data 1 has data 0x3
# Setting Data 2 = 5
# HexViewer: Subject Data 2 has data 0x5
# DecimalViewer: Subject Data 2 has data 5
# Detach HexViewer from data1 and data2.
# Setting Data 1 = 10
# DecimalViewer: Subject Data 1 has data 10
# Setting Data 2 = 15
# DecimalViewer: Subject Data 2 has data 15
| apache-2.0 |
j5shi/Thruster | pylibs/lib2to3/fixes/fix_types.py | 57 | 1868 | # Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Fixer for removing uses of the types module.
These work for only the known names in the types module. The forms above
can include types. or not. ie, It is assumed the module is imported either as:
import types
from types import ... # either * or specific types
The import statements are not modified.
There should be another fixer that handles at least the following constants:
type([]) -> list
type(()) -> tuple
type('') -> str
"""
# Local imports
from ..pgen2 import token
from .. import fixer_base
from ..fixer_util import Name
_TYPE_MAPPING = {
'BooleanType' : 'bool',
'BufferType' : 'memoryview',
'ClassType' : 'type',
'ComplexType' : 'complex',
'DictType': 'dict',
'DictionaryType' : 'dict',
'EllipsisType' : 'type(Ellipsis)',
#'FileType' : 'io.IOBase',
'FloatType': 'float',
'IntType': 'int',
'ListType': 'list',
'LongType': 'int',
'ObjectType' : 'object',
'NoneType': 'type(None)',
'NotImplementedType' : 'type(NotImplemented)',
'SliceType' : 'slice',
'StringType': 'bytes', # XXX ?
'StringTypes' : 'str', # XXX ?
'TupleType': 'tuple',
'TypeType' : 'type',
'UnicodeType': 'str',
'XRangeType' : 'range',
}
_pats = ["power< 'types' trailer< '.' name='%s' > >" % t for t in _TYPE_MAPPING]
class FixTypes(fixer_base.BaseFix):
BM_compatible = True
PATTERN = '|'.join(_pats)
def transform(self, node, results):
new_value = unicode(_TYPE_MAPPING.get(results["name"].value))
if new_value:
return Name(new_value, prefix=node.prefix)
return None
| gpl-2.0 |
cmouse/buildbot | master/buildbot/test/unit/util/test_private_tempdir.py | 6 | 1893 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import os
import shutil
import tempfile
from twisted.trial import unittest
from buildbot.test.util.decorators import skipUnlessPlatformIs
from buildbot.util.private_tempdir import PrivateTemporaryDirectory
class TestTemporaryDirectory(unittest.TestCase):
# In this test we want to also check potential platform differences, so
# we don't mock the filesystem access
def setUp(self):
self.tempdir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tempdir)
def test_simple(self):
with PrivateTemporaryDirectory(dir=self.tempdir) as dir:
self.assertTrue(os.path.isdir(dir))
self.assertFalse(os.path.isdir(dir))
@skipUnlessPlatformIs('posix')
def test_mode(self):
with PrivateTemporaryDirectory(dir=self.tempdir, mode=0o700) as dir:
self.assertEqual(0o40700, os.stat(dir).st_mode)
def test_cleanup(self):
ctx = PrivateTemporaryDirectory(dir=self.tempdir)
self.assertTrue(os.path.isdir(ctx.name))
ctx.cleanup()
self.assertFalse(os.path.isdir(ctx.name))
ctx.cleanup() # also check whether multiple calls don't throw
ctx.cleanup()
| gpl-2.0 |
xadahiya/django | django/utils/deconstruct.py | 502 | 2047 | from importlib import import_module
from django.utils.version import get_docs_version
def deconstructible(*args, **kwargs):
"""
Class decorator that allow the decorated class to be serialized
by the migrations subsystem.
Accepts an optional kwarg `path` to specify the import path.
"""
path = kwargs.pop('path', None)
def decorator(klass):
def __new__(cls, *args, **kwargs):
# We capture the arguments to make returning them trivial
obj = super(klass, cls).__new__(cls)
obj._constructor_args = (args, kwargs)
return obj
def deconstruct(obj):
"""
Returns a 3-tuple of class import path, positional arguments,
and keyword arguments.
"""
# Python 2/fallback version
if path:
module_name, _, name = path.rpartition('.')
else:
module_name = obj.__module__
name = obj.__class__.__name__
# Make sure it's actually there and not an inner class
module = import_module(module_name)
if not hasattr(module, name):
raise ValueError(
"Could not find object %s in %s.\n"
"Please note that you cannot serialize things like inner "
"classes. Please move the object into the main module "
"body to use migrations.\n"
"For more information, see "
"https://docs.djangoproject.com/en/%s/topics/migrations/#serializing-values"
% (name, module_name, get_docs_version()))
return (
path or '%s.%s' % (obj.__class__.__module__, name),
obj._constructor_args[0],
obj._constructor_args[1],
)
klass.__new__ = staticmethod(__new__)
klass.deconstruct = deconstruct
return klass
if not args:
return decorator
return decorator(*args, **kwargs)
| bsd-3-clause |
marc-sensenich/ansible | lib/ansible/plugins/action/net_banner.py | 756 | 1058 | # (c) 2017, Ansible Inc,
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.plugins.action.net_base import ActionModule as _ActionModule
class ActionModule(_ActionModule):
def run(self, tmp=None, task_vars=None):
result = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
return result
| gpl-3.0 |
JioEducation/edx-platform | cms/djangoapps/contentstore/management/commands/tests/test_migrate_to_split.py | 11 | 4012 | """
Unittests for migrating a course to split mongo
"""
import unittest
from django.core.management import CommandError, call_command
from contentstore.management.commands.migrate_to_split import Command
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
from xmodule.modulestore.django import modulestore
class TestArgParsing(unittest.TestCase):
"""
Tests for parsing arguments for the `migrate_to_split` management command
"""
def setUp(self):
super(TestArgParsing, self).setUp()
self.command = Command()
def test_no_args(self):
"""
Test the arg length error
"""
errstring = "migrate_to_split requires at least two arguments"
with self.assertRaisesRegexp(CommandError, errstring):
self.command.handle()
def test_invalid_location(self):
"""
Test passing an unparsable course id
"""
errstring = "Invalid location string"
with self.assertRaisesRegexp(CommandError, errstring):
self.command.handle("foo", "bar")
def test_nonexistent_user_id(self):
"""
Test error for using an unknown user primary key
"""
errstring = "No user found identified by 99"
with self.assertRaisesRegexp(CommandError, errstring):
self.command.handle("org/course/name", "99")
def test_nonexistent_user_email(self):
"""
Test error for using an unknown user email
"""
errstring = "No user found identified by fake@example.com"
with self.assertRaisesRegexp(CommandError, errstring):
self.command.handle("org/course/name", "fake@example.com")
# pylint: disable=no-member, protected-access
class TestMigrateToSplit(ModuleStoreTestCase):
"""
Unit tests for migrating a course from old mongo to split mongo
"""
def setUp(self):
super(TestMigrateToSplit, self).setUp()
self.course = CourseFactory(default_store=ModuleStoreEnum.Type.mongo)
def test_user_email(self):
"""
Test migration for real as well as testing using an email addr to id the user
"""
call_command(
"migrate_to_split",
str(self.course.id),
str(self.user.email),
)
split_store = modulestore()._get_modulestore_by_type(ModuleStoreEnum.Type.split)
new_key = split_store.make_course_key(self.course.id.org, self.course.id.course, self.course.id.run)
self.assertTrue(
split_store.has_course(new_key),
"Could not find course"
)
# I put this in but realized that the migrator doesn't make the new course the
# default mapping in mixed modulestore. I left the test here so we can debate what it ought to do.
# self.assertEqual(
# ModuleStoreEnum.Type.split,
# modulestore()._get_modulestore_for_courselike(new_key).get_modulestore_type(),
# "Split is not the new default for the course"
# )
def test_user_id(self):
"""
Test that the command accepts the user's primary key
"""
# lack of error implies success
call_command(
"migrate_to_split",
str(self.course.id),
str(self.user.id),
)
def test_locator_string(self):
"""
Test importing to a different course id
"""
call_command(
"migrate_to_split",
str(self.course.id),
str(self.user.id),
"org.dept", "name", "run",
)
split_store = modulestore()._get_modulestore_by_type(ModuleStoreEnum.Type.split)
locator = split_store.make_course_key(self.course.id.org, self.course.id.course, self.course.id.run)
course_from_split = modulestore().get_course(locator)
self.assertIsNotNone(course_from_split)
| agpl-3.0 |
jdelight/django | django/middleware/common.py | 55 | 6385 | import hashlib
import logging
import re
from django import http
from django.conf import settings
from django.core import urlresolvers
from django.core.exceptions import PermissionDenied
from django.core.mail import mail_managers
from django.utils.encoding import force_text
logger = logging.getLogger('django.request')
class CommonMiddleware(object):
"""
"Common" middleware for taking care of some basic operations:
- Forbids access to User-Agents in settings.DISALLOWED_USER_AGENTS
- URL rewriting: Based on the APPEND_SLASH and PREPEND_WWW settings,
this middleware appends missing slashes and/or prepends missing
"www."s.
- If APPEND_SLASH is set and the initial URL doesn't end with a
slash, and it is not found in urlpatterns, a new URL is formed by
appending a slash at the end. If this new URL is found in
urlpatterns, then an HTTP-redirect is returned to this new URL;
otherwise the initial URL is processed as usual.
This behavior can be customized by subclassing CommonMiddleware and
overriding the response_redirect_class attribute.
- ETags: If the USE_ETAGS setting is set, ETags will be calculated from
the entire page content and Not Modified responses will be returned
appropriately.
"""
response_redirect_class = http.HttpResponsePermanentRedirect
def process_request(self, request):
"""
Check for denied User-Agents and rewrite the URL based on
settings.APPEND_SLASH and settings.PREPEND_WWW
"""
# Check for denied User-Agents
if 'HTTP_USER_AGENT' in request.META:
for user_agent_regex in settings.DISALLOWED_USER_AGENTS:
if user_agent_regex.search(request.META['HTTP_USER_AGENT']):
raise PermissionDenied('Forbidden user agent')
# Check for a redirect based on settings.APPEND_SLASH
# and settings.PREPEND_WWW
host = request.get_host()
old_url = [host, request.get_full_path()]
new_url = old_url[:]
if (settings.PREPEND_WWW and old_url[0] and
not old_url[0].startswith('www.')):
new_url[0] = 'www.' + old_url[0]
# Append a slash if APPEND_SLASH is set and the URL doesn't have a
# trailing slash and there is no pattern for the current path
if settings.APPEND_SLASH and (not old_url[1].endswith('/')):
urlconf = getattr(request, 'urlconf', None)
if (not urlresolvers.is_valid_path(request.path_info, urlconf) and
urlresolvers.is_valid_path("%s/" % request.path_info, urlconf)):
new_url[1] = request.get_full_path(force_append_slash=True)
if settings.DEBUG and request.method in ('POST', 'PUT', 'PATCH'):
raise RuntimeError((""
"You called this URL via %(method)s, but the URL doesn't end "
"in a slash and you have APPEND_SLASH set. Django can't "
"redirect to the slash URL while maintaining %(method)s data. "
"Change your form to point to %(url)s (note the trailing "
"slash), or set APPEND_SLASH=False in your Django "
"settings.") % {'method': request.method, 'url': ''.join(new_url)})
if new_url == old_url:
# No redirects required.
return
if new_url[0] != old_url[0]:
newurl = "%s://%s%s" % (
request.scheme,
new_url[0], new_url[1])
else:
newurl = new_url[1]
return self.response_redirect_class(newurl)
def process_response(self, request, response):
"""
Calculate the ETag, if needed.
"""
if settings.USE_ETAGS:
if response.has_header('ETag'):
etag = response['ETag']
elif response.streaming:
etag = None
else:
etag = '"%s"' % hashlib.md5(response.content).hexdigest()
if etag is not None:
if (200 <= response.status_code < 300
and request.META.get('HTTP_IF_NONE_MATCH') == etag):
cookies = response.cookies
response = http.HttpResponseNotModified()
response.cookies = cookies
else:
response['ETag'] = etag
return response
class BrokenLinkEmailsMiddleware(object):
def process_response(self, request, response):
"""
Send broken link emails for relevant 404 NOT FOUND responses.
"""
if response.status_code == 404 and not settings.DEBUG:
domain = request.get_host()
path = request.get_full_path()
referer = force_text(request.META.get('HTTP_REFERER', ''), errors='replace')
if not self.is_ignorable_request(request, path, domain, referer):
ua = force_text(request.META.get('HTTP_USER_AGENT', '<none>'), errors='replace')
ip = request.META.get('REMOTE_ADDR', '<none>')
mail_managers(
"Broken %slink on %s" % (
('INTERNAL ' if self.is_internal_request(domain, referer) else ''),
domain
),
"Referrer: %s\nRequested URL: %s\nUser agent: %s\n"
"IP address: %s\n" % (referer, path, ua, ip),
fail_silently=True)
return response
def is_internal_request(self, domain, referer):
"""
Returns True if the referring URL is the same domain as the current request.
"""
# Different subdomains are treated as different domains.
return bool(re.match("^https?://%s/" % re.escape(domain), referer))
def is_ignorable_request(self, request, uri, domain, referer):
"""
Returns True if the given request *shouldn't* notify the site managers.
"""
# '?' in referer is identified as search engine source
if (not referer or
(not self.is_internal_request(domain, referer) and '?' in referer)):
return True
return any(pattern.search(uri) for pattern in settings.IGNORABLE_404_URLS)
| bsd-3-clause |
mobstac/django-guardian | guardian/shortcuts.py | 1 | 21571 | """
Convenient shortcuts to manage or check object permissions.
"""
from __future__ import unicode_literals
from django.contrib.auth.models import Group
from django.contrib.auth.models import Permission
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.db.models import Q
from django.shortcuts import _get_queryset
from itertools import groupby
from guardian.compat import get_user_model
from guardian.compat import basestring
from guardian.core import ObjectPermissionChecker
from guardian.exceptions import MixedContentTypeError
from guardian.exceptions import WrongAppError
from guardian.utils import get_identity
from guardian.utils import get_user_obj_perms_model
from guardian.utils import get_group_obj_perms_model
import warnings
def assign_perm(perm, user_or_group, obj=None):
"""
Assigns permission to user/group and object pair.
:param perm: proper permission for given ``obj``, as string (in format:
``app_label.codename`` or ``codename``). If ``obj`` is not given, must
be in format ``app_label.codename``.
:param user_or_group: instance of ``User``, ``AnonymousUser`` or ``Group``;
passing any other object would raise
``guardian.exceptions.NotUserNorGroup`` exception
:param obj: persisted Django's ``Model`` instance or ``None`` if assigning
global permission. Default is ``None``.
We can assign permission for ``Model`` instance for specific user:
>>> from django.contrib.sites.models import Site
>>> from guardian.models import User
>>> from guardian.shortcuts import assign_perm
>>> site = Site.objects.get_current()
>>> user = User.objects.create(username='joe')
>>> assign_perm("change_site", user, site)
<UserObjectPermission: example.com | joe | change_site>
>>> user.has_perm("change_site", site)
True
... or we can assign permission for group:
>>> group = Group.objects.create(name='joe-group')
>>> user.groups.add(group)
>>> assign_perm("delete_site", group, site)
<GroupObjectPermission: example.com | joe-group | delete_site>
>>> user.has_perm("delete_site", site)
True
**Global permissions**
This function may also be used to assign standard, *global* permissions if
``obj`` parameter is omitted. Added Permission would be returned in that
case:
>>> assign_perm("sites.change_site", user)
<Permission: sites | site | Can change site>
"""
user, group = get_identity(user_or_group)
# If obj is None we try to operate on global permissions
if obj is None:
try:
app_label, codename = perm.split('.', 1)
except ValueError:
raise ValueError("For global permissions, first argument must be in"
" format: 'app_label.codename' (is %r)" % perm)
perm = Permission.objects.get(content_type__app_label=app_label,
codename=codename)
if user:
user.user_permissions.add(perm)
return perm
if group:
group.permissions.add(perm)
return perm
perm = perm.split('.')[-1]
if user:
model = get_user_obj_perms_model(obj)
return model.objects.assign_perm(perm, user, obj)
if group:
model = get_group_obj_perms_model(obj)
return model.objects.assign_perm(perm, group, obj)
def assign(perm, user_or_group, obj=None):
""" Depreciated function name left in for compatibility"""
warnings.warn("Shortcut function 'assign' is being renamed to 'assign_perm'. Update your code accordingly as old name will be depreciated in 1.0.5 version.", DeprecationWarning)
return assign_perm(perm, user_or_group, obj)
def remove_perm(perm, user_or_group=None, obj=None):
"""
Removes permission from user/group and object pair.
:param perm: proper permission for given ``obj``, as string (in format:
``app_label.codename`` or ``codename``). If ``obj`` is not given, must
be in format ``app_label.codename``.
:param user_or_group: instance of ``User``, ``AnonymousUser`` or ``Group``;
passing any other object would raise
``guardian.exceptions.NotUserNorGroup`` exception
:param obj: persisted Django's ``Model`` instance or ``None`` if assigning
global permission. Default is ``None``.
"""
user, group = get_identity(user_or_group)
if obj is None:
try:
app_label, codename = perm.split('.', 1)
except ValueError:
raise ValueError("For global permissions, first argument must be in"
" format: 'app_label.codename' (is %r)" % perm)
perm = Permission.objects.get(content_type__app_label=app_label,
codename=codename)
if user:
user.user_permissions.remove(perm)
return
elif group:
group.permissions.remove(perm)
return
perm = perm.split('.')[-1]
if user:
model = get_user_obj_perms_model(obj)
model.objects.remove_perm(perm, user, obj)
if group:
model = get_group_obj_perms_model(obj)
model.objects.remove_perm(perm, group, obj)
def get_perms(user_or_group, obj):
"""
Returns permissions for given user/group and object pair, as list of
strings.
"""
check = ObjectPermissionChecker(user_or_group)
return check.get_perms(obj)
def get_perms_for_model(cls):
"""
Returns queryset of all Permission objects for the given class. It is
possible to pass Model as class or instance.
"""
if isinstance(cls, basestring):
app_label, model_name = cls.split('.')
model = models.get_model(app_label, model_name)
else:
model = cls
ctype = ContentType.objects.get_for_model(model)
return Permission.objects.filter(content_type=ctype)
def get_users_with_perms(obj, attach_perms=False, with_superusers=False,
with_group_users=True):
"""
Returns queryset of all ``User`` objects with *any* object permissions for
the given ``obj``.
:param obj: persisted Django's ``Model`` instance
:param attach_perms: Default: ``False``. If set to ``True`` result would be
dictionary of ``User`` instances with permissions' codenames list as
values. This would fetch users eagerly!
:param with_superusers: Default: ``False``. If set to ``True`` result would
contain all superusers.
:param with_group_users: Default: ``True``. If set to ``False`` result would
**not** contain those users who have only group permissions for given
``obj``.
Example::
>>> from django.contrib.flatpages.models import FlatPage
>>> from django.contrib.auth.models import User
>>> from guardian.shortcuts import assign_perm, get_users_with_perms
>>>
>>> page = FlatPage.objects.create(title='Some page', path='/some/page/')
>>> joe = User.objects.create_user('joe', 'joe@example.com', 'joesecret')
>>> assign_perm('change_flatpage', joe, page)
>>>
>>> get_users_with_perms(page)
[<User: joe>]
>>>
>>> get_users_with_perms(page, attach_perms=True)
{<User: joe>: [u'change_flatpage']}
"""
ctype = ContentType.objects.get_for_model(obj)
if not attach_perms:
# It's much easier without attached perms so we do it first if that is
# the case
user_model = get_user_obj_perms_model(obj)
related_name = user_model.user.field.related_query_name()
if user_model.objects.is_generic():
user_filters = {
'%s__content_type' % related_name: ctype,
'%s__object_pk' % related_name: obj.pk,
}
else:
user_filters = {'%s__content_object' % related_name: obj}
qset = Q(**user_filters)
if with_group_users:
group_model = get_group_obj_perms_model(obj)
group_rel_name = group_model.group.field.related_query_name()
if group_model.objects.is_generic():
group_filters = {
'groups__%s__content_type' % group_rel_name: ctype,
'groups__%s__object_pk' % group_rel_name: obj.pk,
}
else:
group_filters = {
'groups__%s__content_object' % group_rel_name: obj,
}
qset = qset | Q(**group_filters)
if with_superusers:
qset = qset | Q(is_superuser=True)
return get_user_model().objects.filter(qset).distinct()
else:
# TODO: Do not hit db for each user!
users = {}
for user in get_users_with_perms(obj,
with_group_users=with_group_users):
users[user] = sorted(get_perms(user, obj))
return users
def get_groups_with_perms(obj, attach_perms=False):
"""
Returns queryset of all ``Group`` objects with *any* object permissions for
the given ``obj``.
:param obj: persisted Django's ``Model`` instance
:param attach_perms: Default: ``False``. If set to ``True`` result would be
dictionary of ``Group`` instances with permissions' codenames list as
values. This would fetch groups eagerly!
Example::
>>> from django.contrib.flatpages.models import FlatPage
>>> from guardian.shortcuts import assign_perm, get_groups_with_perms
>>> from guardian.models import Group
>>>
>>> page = FlatPage.objects.create(title='Some page', path='/some/page/')
>>> admins = Group.objects.create(name='Admins')
>>> assign_perm('change_flatpage', admins, page)
>>>
>>> get_groups_with_perms(page)
[<Group: admins>]
>>>
>>> get_groups_with_perms(page, attach_perms=True)
{<Group: admins>: [u'change_flatpage']}
"""
ctype = ContentType.objects.get_for_model(obj)
if not attach_perms:
# It's much easier without attached perms so we do it first if that is
# the case
group_model = get_group_obj_perms_model(obj)
group_rel_name = group_model.group.field.related_query_name()
if group_model.objects.is_generic():
group_filters = {
'%s__content_type' % group_rel_name: ctype,
'%s__object_pk' % group_rel_name: obj.pk,
}
else:
group_filters = {'%s__content_object' % group_rel_name: obj}
groups = Group.objects.filter(**group_filters).distinct()
return groups
else:
# TODO: Do not hit db for each group!
groups = {}
for group in get_groups_with_perms(obj):
if not group in groups:
groups[group] = sorted(get_perms(group, obj))
return groups
def get_objects_for_user(user, perms, klass=None, use_groups=True, any_perm=False):
"""
Returns queryset of objects for which a given ``user`` has *all*
permissions present at ``perms``.
:param user: ``User`` instance for which objects would be returned
:param perms: single permission string, or sequence of permission strings
which should be checked.
If ``klass`` parameter is not given, those should be full permission
names rather than only codenames (i.e. ``auth.change_user``). If more than
one permission is present within sequence, their content type **must** be
the same or ``MixedContentTypeError`` exception would be raised.
:param klass: may be a Model, Manager or QuerySet object. If not given
this parameter would be computed based on given ``params``.
:param use_groups: if ``False``, wouldn't check user's groups object
permissions. Default is ``True``.
:param any_perm: if True, any of permission in sequence is accepted
:raises MixedContentTypeError: when computed content type for ``perms``
and/or ``klass`` clashes.
:raises WrongAppError: if cannot compute app label for given ``perms``/
``klass``.
Example::
>>> from django.contrib.auth.models import User
>>> from guardian.shortcuts import get_objects_for_user
>>> joe = User.objects.get(username='joe')
>>> get_objects_for_user(joe, 'auth.change_group')
[]
>>> from guardian.shortcuts import assign_perm
>>> group = Group.objects.create('some group')
>>> assign_perm('auth.change_group', joe, group)
>>> get_objects_for_user(joe, 'auth.change_group')
[<Group some group>]
The permission string can also be an iterable. Continuing with the previous example:
>>> get_objects_for_user(joe, ['auth.change_group', 'auth.delete_group'])
[]
>>> get_objects_for_user(joe, ['auth.change_group', 'auth.delete_group'], any_perm=True)
[<Group some group>]
>>> assign_perm('auth.delete_group', joe, group)
>>> get_objects_for_user(joe, ['auth.change_group', 'auth.delete_group'])
[<Group some group>]
"""
if isinstance(perms, basestring):
perms = [perms]
ctype = None
app_label = None
codenames = set()
# Compute codenames set and ctype if possible
for perm in perms:
if '.' in perm:
new_app_label, codename = perm.split('.', 1)
if app_label is not None and app_label != new_app_label:
raise MixedContentTypeError("Given perms must have same app "
"label (%s != %s)" % (app_label, new_app_label))
else:
app_label = new_app_label
else:
codename = perm
codenames.add(codename)
if app_label is not None:
new_ctype = ContentType.objects.get(app_label=app_label,
permission__codename=codename)
if ctype is not None and ctype != new_ctype:
raise MixedContentTypeError("ContentType was once computed "
"to be %s and another one %s" % (ctype, new_ctype))
else:
ctype = new_ctype
# Compute queryset and ctype if still missing
if ctype is None and klass is None:
raise WrongAppError("Cannot determine content type")
elif ctype is None and klass is not None:
queryset = _get_queryset(klass)
ctype = ContentType.objects.get_for_model(queryset.model)
elif ctype is not None and klass is None:
queryset = _get_queryset(ctype.model_class())
else:
queryset = _get_queryset(klass)
if ctype.model_class() != queryset.model:
raise MixedContentTypeError("Content type for given perms and "
"klass differs")
# At this point, we should have both ctype and queryset and they should
# match which means: ctype.model_class() == queryset.model
# we should also have ``codenames`` list
# First check if user is superuser and if so, return queryset immediately
if user.is_superuser:
return queryset
# Now we should extract list of pk values for which we would filter queryset
user_model = get_user_obj_perms_model(queryset.model)
user_obj_perms_queryset = (user_model.objects
.filter(user=user)
.filter(permission__content_type=ctype)
.filter(permission__codename__in=codenames))
if user_model.objects.is_generic():
fields = ['object_pk', 'permission__codename']
else:
fields = ['content_object__pk', 'permission__codename']
user_obj_perms = user_obj_perms_queryset.values_list(*fields)
data = list(user_obj_perms)
if use_groups:
group_model = get_group_obj_perms_model(queryset.model)
group_filters = {
'permission__content_type': ctype,
'permission__codename__in': codenames,
'group__%s' % get_user_model().groups.field.related_query_name(): user,
}
groups_obj_perms_queryset = group_model.objects.filter(**group_filters)
if group_model.objects.is_generic():
fields = ['object_pk', 'permission__codename']
else:
fields = ['content_object__pk', 'permission__codename']
groups_obj_perms = groups_obj_perms_queryset.values_list(*fields)
data += list(groups_obj_perms)
keyfunc = lambda t: t[0] # sorting/grouping by pk (first in result tuple)
data = sorted(data, key=keyfunc)
pk_list = []
for pk, group in groupby(data, keyfunc):
obj_codenames = set((e[1] for e in group))
if any_perm or codenames.issubset(obj_codenames):
pk_list.append(pk)
objects = queryset.filter(pk__in=pk_list)
return objects
def get_objects_for_group(group, perms, klass=None, any_perm=False):
"""
Returns queryset of objects for which a given ``group`` has *all*
permissions present at ``perms``.
:param group: ``Group`` instance for which objects would be returned.
:param perms: single permission string, or sequence of permission strings
which should be checked.
If ``klass`` parameter is not given, those should be full permission
names rather than only codenames (i.e. ``auth.change_user``). If more than
one permission is present within sequence, their content type **must** be
the same or ``MixedContentTypeError`` exception would be raised.
:param klass: may be a Model, Manager or QuerySet object. If not given
this parameter would be computed based on given ``params``.
:param any_perm: if True, any of permission in sequence is accepted
:raises MixedContentTypeError: when computed content type for ``perms``
and/or ``klass`` clashes.
:raises WrongAppError: if cannot compute app label for given ``perms``/
``klass``.
Example:
Let's assume we have a ``Task`` model belonging to the ``tasker`` app with
the default add_task, change_task and delete_task permissions provided
by Django::
>>> from guardian.shortcuts import get_objects_for_group
>>> from tasker import Task
>>> group = Group.objects.create('some group')
>>> task = Task.objects.create('some task')
>>> get_objects_for_group(group, 'tasker.add_task')
[]
>>> from guardian.shortcuts import assign_perm
>>> assign_perm('tasker.add_task', group, task)
>>> get_objects_for_group(group, 'tasker.add_task')
[<Task some task>]
The permission string can also be an iterable. Continuing with the previous example:
>>> get_objects_for_group(group, ['tasker.add_task', 'tasker.delete_task'])
[]
>>> assign_perm('tasker.delete_task', group, task)
>>> get_objects_for_group(group, ['tasker.add_task', 'tasker.delete_task'])
[<Task some task>]
"""
if isinstance(perms, basestring):
perms = [perms]
ctype = None
app_label = None
codenames = set()
# Compute codenames set and ctype if possible
for perm in perms:
if '.' in perm:
new_app_label, codename = perm.split('.', 1)
if app_label is not None and app_label != new_app_label:
raise MixedContentTypeError("Given perms must have same app "
"label (%s != %s)" % (app_label, new_app_label))
else:
app_label = new_app_label
else:
codename = perm
codenames.add(codename)
if app_label is not None:
new_ctype = ContentType.objects.get(app_label=app_label,
permission__codename=codename)
if ctype is not None and ctype != new_ctype:
raise MixedContentTypeError("ContentType was once computed "
"to be %s and another one %s" % (ctype, new_ctype))
else:
ctype = new_ctype
# Compute queryset and ctype if still missing
if ctype is None and klass is None:
raise WrongAppError("Cannot determine content type")
elif ctype is None and klass is not None:
queryset = _get_queryset(klass)
ctype = ContentType.objects.get_for_model(queryset.model)
elif ctype is not None and klass is None:
queryset = _get_queryset(ctype.model_class())
else:
queryset = _get_queryset(klass)
if ctype.model_class() != queryset.model:
raise MixedContentTypeError("Content type for given perms and "
"klass differs")
# At this point, we should have both ctype and queryset and they should
# match which means: ctype.model_class() == queryset.model
# we should also have ``codenames`` list
# Now we should extract list of pk values for which we would filter queryset
group_model = get_group_obj_perms_model(queryset.model)
groups_obj_perms_queryset = (group_model.objects
.filter(group=group)
.filter(permission__content_type=ctype)
.filter(permission__codename__in=codenames))
if group_model.objects.is_generic():
fields = ['object_pk', 'permission__codename']
else:
fields = ['content_object__pk', 'permission__codename']
groups_obj_perms = groups_obj_perms_queryset.values_list(*fields)
data = list(groups_obj_perms)
keyfunc = lambda t: t[0] # sorting/grouping by pk (first in result tuple)
data = sorted(data, key=keyfunc)
pk_list = []
for pk, group in groupby(data, keyfunc):
obj_codenames = set((e[1] for e in group))
if any_perm or codenames.issubset(obj_codenames):
pk_list.append(pk)
objects = queryset.filter(pk__in=pk_list)
return objects
| bsd-2-clause |
severr/severr-python | test/test_error.py | 6 | 1294 | # coding: utf-8
"""
Trakerr API
Get your application events and errors to Trakerr via the *Trakerr API*.
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import os
import sys
import unittest
import trakerr_client
from trakerr_client.rest import ApiException
from trakerr_client.models.error import Error
class TestError(unittest.TestCase):
""" Error unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testError(self):
"""
Test Error
"""
model = trakerr_client.models.error.Error()
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
nathania/pysal | pysal/core/IOHandlers/arcgis_swm.py | 8 | 7138 | import pysal
import os.path
import numpy as np
from struct import pack, unpack
import pysal.core.FileIO as FileIO
from pysal.weights import W
from pysal.weights.util import remap_ids
from warnings import warn
__author__ = "Myunghwa Hwang <mhwang4@gmail.com>"
__all__ = ["ArcGISSwmIO"]
class ArcGISSwmIO(FileIO.FileIO):
"""
Opens, reads, and writes weights file objects in ArcGIS swm format.
Spatial weights objects in the ArcGIS swm format are used in
ArcGIS Spatial Statistics tools.
Particularly, this format can be directly used with the tools under
the category of Mapping Clusters.
The values for [ORG_i] and [DST_i] should be integers,
as ArcGIS Spatial Statistics tools support only unique integer IDs.
For the case where a weights object uses non-integer IDs,
ArcGISSwmIO allows users to use internal ids corresponding to record numbers,
instead of original ids.
The specifics of each part of the above structure is as follows.
.. table:: ArcGIS SWM Components
============ ============ ==================================== ================================
Part Data type Description Length
============ ============ ==================================== ================================
ID_VAR_NAME ASCII TEXT ID variable name Flexible (Up to the 1st ;)
ESRI_SRS ASCII TEXT ESRI spatial reference system Flexible (Btw the 1st ; and \\n)
NO_OBS l.e. int Number of observations 4
ROW_STD l.e. int Whether or not row-standardized 4
WGT_i
ORG_i l.e. int ID of observaiton i 4
NO_NGH_i l.e. int Number of neighbors for obs. i (m) 4
NGHS_i
DSTS_i l.e. int IDs of all neighbors of obs. i 4*m
WS_i l.e. float Weights for obs. i and its neighbors 8*m
W_SUM_i l.e. float Sum of weights for " 8
============ ============ ==================================== ================================
"""
FORMATS = ['swm']
MODES = ['r', 'w']
def __init__(self, *args, **kwargs):
self._varName = 'Unknown'
FileIO.FileIO.__init__(self, *args, **kwargs)
self.file = open(self.dataPath, self.mode + 'b')
def _set_varName(self, val):
if issubclass(type(val), basestring):
self._varName = val
def _get_varName(self):
return self._varName
varName = property(fget=_get_varName, fset=_set_varName)
def read(self, n=-1):
self._complain_ifclosed(self.closed)
return self._read()
def seek(self, pos):
if pos == 0:
self.file.seek(0)
self.pos = 0
def _read(self):
"""
Reads ArcGIS swm file.
Returns a pysal.weights.weights.W object
Examples
--------
Type 'dir(w)' at the interpreter to see what methods are supported.
Open an ArcGIS swm file and read it into a pysal weights object
>>> w = pysal.open(pysal.examples.get_path('ohio.swm'),'r').read()
Get the number of observations from the header
>>> w.n
88
Get the mean number of neighbors
>>> w.mean_neighbors
5.25
Get neighbor distances for a single observation
>>> w[1]
{2: 1.0, 11: 1.0, 6: 1.0, 7: 1.0}
"""
if self.pos > 0:
raise StopIteration
header01 = self.file.readline()
id_var, srs = header01[:-1].split(';')
self.varName = id_var
self.header_len = len(header01) + 8
no_obs, row_std = tuple(unpack('<2l', self.file.read(8)))
neighbors = {}
weights = {}
for i in xrange(no_obs):
origin, no_nghs = tuple(unpack('<2l', self.file.read(8)))
neighbors[origin] = []
weights[origin] = []
if no_nghs > 0:
neighbors[origin] = list(unpack('<%il' %
no_nghs, self.file.read(4 * no_nghs)))
weights[origin] = list(unpack('<%id' %
no_nghs, self.file.read(8 * no_nghs)))
w_sum = list(unpack('<d', self.file.read(8)))[0]
self.pos += 1
return W(neighbors, weights)
def write(self, obj, useIdIndex=False):
"""
Writes a spatial weights matrix data file in swm format.
Parameters
----------
.write(weightsObject)
accepts a weights object
Returns
-------
an ArcGIS swm file
write a weights object to the opened swm file.
Examples
--------
>>> import tempfile, pysal, os
>>> testfile = pysal.open(pysal.examples.get_path('ohio.swm'),'r')
>>> w = testfile.read()
Create a temporary file for this example
>>> f = tempfile.NamedTemporaryFile(suffix='.swm')
Reassign to new var
>>> fname = f.name
Close the temporary named file
>>> f.close()
Open the new file in write mode
>>> o = pysal.open(fname,'w')
Write the Weights object into the open file
>>> o.write(w)
>>> o.close()
Read in the newly created text file
>>> wnew = pysal.open(fname,'r').read()
Compare values from old to new
>>> wnew.pct_nonzero == w.pct_nonzero
True
Clean up temporary file created for this example
>>> os.remove(fname) """
self._complain_ifclosed(self.closed)
if issubclass(type(obj), W):
if not (type(obj.id_order[0]) in (np.int32, np.int64, int)) and not useIdIndex:
raise TypeError("ArcGIS SWM files support only integer IDs")
if useIdIndex:
id2i = obj.id2i
obj = remap_ids(obj, id2i)
self.file.write('%s;Unknown\n' % self.varName)
self.file.write(pack('<l', obj.n))
self.file.write(pack('<l', obj.transform.upper() == 'R'))
for obs in obj.weights:
self.file.write(pack('<l', obs))
no_nghs = len(obj.weights[obs])
self.file.write(pack('<l', no_nghs))
self.file.write(pack('<%il' % no_nghs, *obj.neighbors[obs]))
self.file.write(pack('<%id' % no_nghs, *obj.weights[obs]))
self.file.write(pack('<d', sum(obj.weights[obs])))
self.pos += 1
else:
raise TypeError("Expected a pysal weights object, got: %s" % (
type(obj)))
def close(self):
self.file.close()
FileIO.FileIO.close(self)
| bsd-3-clause |
koparasy/gemfi | src/arch/x86/isa/insts/general_purpose/rotate_and_shift/shift.py | 91 | 6764 | # Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
def macroop SAL_R_I
{
slli reg, reg, imm, flags=(CF,OF,SF,ZF,PF)
};
def macroop SAL_M_I
{
ldst t1, seg, sib, disp
slli t1, t1, imm, flags=(CF,OF,SF,ZF,PF)
st t1, seg, sib, disp
};
def macroop SAL_P_I
{
rdip t7
ldst t1, seg, riprel, disp
slli t1, t1, imm, flags=(CF,OF,SF,ZF,PF)
st t1, seg, riprel, disp
};
def macroop SAL_1_R
{
slli reg, reg, 1, flags=(CF,OF,SF,ZF,PF)
};
def macroop SAL_1_M
{
ldst t1, seg, sib, disp
slli t1, t1, 1, flags=(CF,OF,SF,ZF,PF)
st t1, seg, sib, disp
};
def macroop SAL_1_P
{
rdip t7
ldst t1, seg, riprel, disp
slli t1, t1, 1, flags=(CF,OF,SF,ZF,PF)
st t1, seg, riprel, disp
};
def macroop SAL_R_R
{
sll reg, reg, regm, flags=(CF,OF,SF,ZF,PF)
};
def macroop SAL_M_R
{
ldst t1, seg, sib, disp
sll t1, t1, reg, flags=(CF,OF,SF,ZF,PF)
st t1, seg, sib, disp
};
def macroop SAL_P_R
{
rdip t7
ldst t1, seg, riprel, disp
sll t1, t1, reg, flags=(CF,OF,SF,ZF,PF)
st t1, seg, riprel, disp
};
def macroop SHLD_R_R
{
mdbi regm, 0
sld reg, reg, rcx, flags=(CF,OF,SF,ZF,PF)
};
def macroop SHLD_M_R
{
ldst t1, seg, sib, disp
mdbi reg, 0
sld t1, t1, rcx, flags=(CF,OF,SF,ZF,PF)
st t1, seg, sib, disp
};
def macroop SHLD_P_R
{
rdip t7
ldst t1, seg, riprel, disp
mdbi reg, 0
sld t1, t1, rcx, flags=(CF,OF,SF,ZF,PF)
st t1, seg, riprel, disp
};
def macroop SHLD_R_R_I
{
mdbi regm, 0
sldi reg, reg, imm, flags=(CF,OF,SF,ZF,PF)
};
def macroop SHLD_M_R_I
{
ldst t1, seg, sib, disp
mdbi reg, 0
sldi t1, t1, imm, flags=(CF,OF,SF,ZF,PF)
st t1, seg, sib, disp
};
def macroop SHLD_P_R_I
{
rdip t7
ldst t1, seg, riprel, disp
mdbi reg, 0
sldi t1, t1, imm, flags=(CF,OF,SF,ZF,PF)
st t1, seg, riprel, disp
};
def macroop SHR_R_I
{
srli reg, reg, imm, flags=(CF,OF,SF,ZF,PF)
};
def macroop SHR_M_I
{
ldst t1, seg, sib, disp
srli t1, t1, imm, flags=(CF,OF,SF,ZF,PF)
st t1, seg, sib, disp
};
def macroop SHR_P_I
{
rdip t7
ldst t1, seg, riprel, disp
srli t1, t1, imm, flags=(CF,OF,SF,ZF,PF)
st t1, seg, riprel, disp
};
def macroop SHR_1_R
{
srli reg, reg, 1, flags=(CF,OF,SF,ZF,PF)
};
def macroop SHR_1_M
{
ldst t1, seg, sib, disp
srli t1, t1, 1, flags=(CF,OF,SF,ZF,PF)
st t1, seg, sib, disp
};
def macroop SHR_1_P
{
rdip t7
ldst t1, seg, riprel, disp
srli t1, t1, 1, flags=(CF,OF,SF,ZF,PF)
st t1, seg, riprel, disp
};
def macroop SHR_R_R
{
srl reg, reg, regm, flags=(CF,OF,SF,ZF,PF)
};
def macroop SHR_M_R
{
ldst t1, seg, sib, disp
srl t1, t1, reg, flags=(CF,OF,SF,ZF,PF)
st t1, seg, sib, disp
};
def macroop SHR_P_R
{
rdip t7
ldst t1, seg, riprel, disp
srl t1, t1, reg, flags=(CF,OF,SF,ZF,PF)
st t1, seg, riprel, disp
};
def macroop SHRD_R_R
{
mdbi regm, 0
srd reg, reg, rcx, flags=(CF,OF,SF,ZF,PF)
};
def macroop SHRD_M_R
{
ldst t1, seg, sib, disp
mdbi reg, 0
srd t1, t1, rcx, flags=(CF,OF,SF,ZF,PF)
st t1, seg, sib, disp
};
def macroop SHRD_P_R
{
rdip t7
ldst t1, seg, riprel, disp
mdbi reg, 0
srd t1, t1, rcx, flags=(CF,OF,SF,ZF,PF)
st t1, seg, riprel, disp
};
def macroop SHRD_R_R_I
{
mdbi regm, 0
srdi reg, reg, imm, flags=(CF,OF,SF,ZF,PF)
};
def macroop SHRD_M_R_I
{
ldst t1, seg, sib, disp
mdbi reg, 0
srdi t1, t1, imm, flags=(CF,OF,SF,ZF,PF)
st t1, seg, sib, disp
};
def macroop SHRD_P_R_I
{
rdip t7
ldst t1, seg, riprel, disp
mdbi reg, 0
srdi t1, t1, imm, flags=(CF,OF,SF,ZF,PF)
st t1, seg, riprel, disp
};
def macroop SAR_R_I
{
srai reg, reg, imm, flags=(CF,OF,SF,ZF,PF)
};
def macroop SAR_M_I
{
ldst t1, seg, sib, disp
srai t1, t1, imm, flags=(CF,OF,SF,ZF,PF)
st t1, seg, sib, disp
};
def macroop SAR_P_I
{
rdip t7
ldst t1, seg, riprel, disp
srai t1, t1, imm, flags=(CF,OF,SF,ZF,PF)
st t1, seg, riprel, disp
};
def macroop SAR_1_R
{
srai reg, reg, 1, flags=(CF,OF,SF,ZF,PF)
};
def macroop SAR_1_M
{
ldst t1, seg, sib, disp
srai t1, t1, 1, flags=(CF,OF,SF,ZF,PF)
st t1, seg, sib, disp
};
def macroop SAR_1_P
{
rdip t7
ldst t1, seg, riprel, disp
srai t1, t1, 1, flags=(CF,OF,SF,ZF,PF)
st t1, seg, riprel, disp
};
def macroop SAR_R_R
{
sra reg, reg, regm, flags=(CF,OF,SF,ZF,PF)
};
def macroop SAR_M_R
{
ldst t1, seg, sib, disp
sra t1, t1, reg, flags=(CF,OF,SF,ZF,PF)
st t1, seg, sib, disp
};
def macroop SAR_P_R
{
rdip t7
ldst t1, seg, riprel, disp
sra t1, t1, reg, flags=(CF,OF,SF,ZF,PF)
st t1, seg, riprel, disp
};
'''
| bsd-3-clause |
rodrigolucianocosta/ControleEstoque | rOne/Storage101/models.py | 1 | 1042 | from __future__ import unicode_literals
from django.db import models
# Create your models here.
class Cliente(models.Model):
nome = models.CharField('nome',max_length=100)
sobrenome = models.CharField('sobrenome',max_length=100,null=True)
email = models.EmailField('email',max_length=50,null=True)
# data_nascimento = models.DateTime('data nascimento') //verificar formatos de data
# cpf = models.IntegerField('cpf')
# cnpj = BRCNPJField(null=True,blank=True)
rg = models.CharField(max_length=20)
# def __unicode__(self):
# return u"cpf: %s ,cnpj: %s" % (self.cpf or u", self.cnpj or u")
class Meta:
verbose_name = "cliente"
verbose_name = "clientes"
def __unicode__(self):
return u"%s %s %s %s"%(self.nome,self.sobrenome,self.email,self.rg)
class Endereco(models.Model):
rua = models.CharField(max_length=100)
numero = models.IntegerField(default=0)
bairro = models.CharField(max_length=100)
cep = models.CharField(max_length=100)
cidade = models.CharField(max_length=100)
estado = models.CharField(max_length=100)
| gpl-3.0 |
josecolella/PLD | bin/osx/treasurehunters.app/Contents/Resources/lib/python3.4/numpy/distutils/command/config.py | 10 | 17212 | # Added Fortran compiler support to config. Currently useful only for
# try_compile call. try_run works but is untested for most of Fortran
# compilers (they must define linker_exe first).
# Pearu Peterson
from __future__ import division, absolute_import, print_function
import os, signal
import warnings
import sys
from distutils.command.config import config as old_config
from distutils.command.config import LANG_EXT
from distutils import log
from distutils.file_util import copy_file
from distutils.ccompiler import CompileError, LinkError
import distutils
from numpy.distutils.exec_command import exec_command
from numpy.distutils.mingw32ccompiler import generate_manifest
from numpy.distutils.command.autodist import check_inline, check_compiler_gcc4
from numpy.distutils.compat import get_exception
LANG_EXT['f77'] = '.f'
LANG_EXT['f90'] = '.f90'
class config(old_config):
old_config.user_options += [
('fcompiler=', None, "specify the Fortran compiler type"),
]
def initialize_options(self):
self.fcompiler = None
old_config.initialize_options(self)
def try_run(self, body, headers=None, include_dirs=None,
libraries=None, library_dirs=None, lang="c"):
warnings.warn("\n+++++++++++++++++++++++++++++++++++++++++++++++++\n" \
"Usage of try_run is deprecated: please do not \n" \
"use it anymore, and avoid configuration checks \n" \
"involving running executable on the target machine.\n" \
"+++++++++++++++++++++++++++++++++++++++++++++++++\n",
DeprecationWarning)
return old_config.try_run(self, body, headers, include_dirs, libraries,
library_dirs, lang)
def _check_compiler (self):
old_config._check_compiler(self)
from numpy.distutils.fcompiler import FCompiler, new_fcompiler
if sys.platform == 'win32' and self.compiler.compiler_type == 'msvc':
# XXX: hack to circumvent a python 2.6 bug with msvc9compiler:
# initialize call query_vcvarsall, which throws an IOError, and
# causes an error along the way without much information. We try to
# catch it here, hoping it is early enough, and print an helpful
# message instead of Error: None.
if not self.compiler.initialized:
try:
self.compiler.initialize()
except IOError:
e = get_exception()
msg = """\
Could not initialize compiler instance: do you have Visual Studio
installed ? If you are trying to build with mingw, please use python setup.py
build -c mingw32 instead ). If you have Visual Studio installed, check it is
correctly installed, and the right version (VS 2008 for python 2.6, VS 2003 for
2.5, etc...). Original exception was: %s, and the Compiler
class was %s
============================================================================""" \
% (e, self.compiler.__class__.__name__)
print ("""\
============================================================================""")
raise distutils.errors.DistutilsPlatformError(msg)
if not isinstance(self.fcompiler, FCompiler):
self.fcompiler = new_fcompiler(compiler=self.fcompiler,
dry_run=self.dry_run, force=1,
c_compiler=self.compiler)
if self.fcompiler is not None:
self.fcompiler.customize(self.distribution)
if self.fcompiler.get_version():
self.fcompiler.customize_cmd(self)
self.fcompiler.show_customization()
def _wrap_method(self, mth, lang, args):
from distutils.ccompiler import CompileError
from distutils.errors import DistutilsExecError
save_compiler = self.compiler
if lang in ['f77', 'f90']:
self.compiler = self.fcompiler
try:
ret = mth(*((self,)+args))
except (DistutilsExecError, CompileError):
msg = str(get_exception())
self.compiler = save_compiler
raise CompileError
self.compiler = save_compiler
return ret
def _compile (self, body, headers, include_dirs, lang):
return self._wrap_method(old_config._compile, lang,
(body, headers, include_dirs, lang))
def _link (self, body,
headers, include_dirs,
libraries, library_dirs, lang):
if self.compiler.compiler_type=='msvc':
libraries = (libraries or [])[:]
library_dirs = (library_dirs or [])[:]
if lang in ['f77', 'f90']:
lang = 'c' # always use system linker when using MSVC compiler
if self.fcompiler:
for d in self.fcompiler.library_dirs or []:
# correct path when compiling in Cygwin but with
# normal Win Python
if d.startswith('/usr/lib'):
s, o = exec_command(['cygpath', '-w', d],
use_tee=False)
if not s: d = o
library_dirs.append(d)
for libname in self.fcompiler.libraries or []:
if libname not in libraries:
libraries.append(libname)
for libname in libraries:
if libname.startswith('msvc'): continue
fileexists = False
for libdir in library_dirs or []:
libfile = os.path.join(libdir, '%s.lib' % (libname))
if os.path.isfile(libfile):
fileexists = True
break
if fileexists: continue
# make g77-compiled static libs available to MSVC
fileexists = False
for libdir in library_dirs:
libfile = os.path.join(libdir, 'lib%s.a' % (libname))
if os.path.isfile(libfile):
# copy libname.a file to name.lib so that MSVC linker
# can find it
libfile2 = os.path.join(libdir, '%s.lib' % (libname))
copy_file(libfile, libfile2)
self.temp_files.append(libfile2)
fileexists = True
break
if fileexists: continue
log.warn('could not find library %r in directories %s' \
% (libname, library_dirs))
elif self.compiler.compiler_type == 'mingw32':
generate_manifest(self)
return self._wrap_method(old_config._link, lang,
(body, headers, include_dirs,
libraries, library_dirs, lang))
def check_header(self, header, include_dirs=None, library_dirs=None, lang='c'):
self._check_compiler()
return self.try_compile(
"/* we need a dummy line to make distutils happy */",
[header], include_dirs)
def check_decl(self, symbol,
headers=None, include_dirs=None):
self._check_compiler()
body = """
int main()
{
#ifndef %s
(void) %s;
#endif
;
return 0;
}""" % (symbol, symbol)
return self.try_compile(body, headers, include_dirs)
def check_macro_true(self, symbol,
headers=None, include_dirs=None):
self._check_compiler()
body = """
int main()
{
#if %s
#else
#error false or undefined macro
#endif
;
return 0;
}""" % (symbol,)
return self.try_compile(body, headers, include_dirs)
def check_type(self, type_name, headers=None, include_dirs=None,
library_dirs=None):
"""Check type availability. Return True if the type can be compiled,
False otherwise"""
self._check_compiler()
# First check the type can be compiled
body = r"""
int main() {
if ((%(name)s *) 0)
return 0;
if (sizeof (%(name)s))
return 0;
}
""" % {'name': type_name}
st = False
try:
try:
self._compile(body % {'type': type_name},
headers, include_dirs, 'c')
st = True
except distutils.errors.CompileError:
st = False
finally:
self._clean()
return st
def check_type_size(self, type_name, headers=None, include_dirs=None, library_dirs=None, expected=None):
"""Check size of a given type."""
self._check_compiler()
# First check the type can be compiled
body = r"""
typedef %(type)s npy_check_sizeof_type;
int main ()
{
static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) >= 0)];
test_array [0] = 0
;
return 0;
}
"""
self._compile(body % {'type': type_name},
headers, include_dirs, 'c')
self._clean()
if expected:
body = r"""
typedef %(type)s npy_check_sizeof_type;
int main ()
{
static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) == %(size)s)];
test_array [0] = 0
;
return 0;
}
"""
for size in expected:
try:
self._compile(body % {'type': type_name, 'size': size},
headers, include_dirs, 'c')
self._clean()
return size
except CompileError:
pass
# this fails to *compile* if size > sizeof(type)
body = r"""
typedef %(type)s npy_check_sizeof_type;
int main ()
{
static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) <= %(size)s)];
test_array [0] = 0
;
return 0;
}
"""
# The principle is simple: we first find low and high bounds of size
# for the type, where low/high are looked up on a log scale. Then, we
# do a binary search to find the exact size between low and high
low = 0
mid = 0
while True:
try:
self._compile(body % {'type': type_name, 'size': mid},
headers, include_dirs, 'c')
self._clean()
break
except CompileError:
#log.info("failure to test for bound %d" % mid)
low = mid + 1
mid = 2 * mid + 1
high = mid
# Binary search:
while low != high:
mid = (high - low) // 2 + low
try:
self._compile(body % {'type': type_name, 'size': mid},
headers, include_dirs, 'c')
self._clean()
high = mid
except CompileError:
low = mid + 1
return low
def check_func(self, func,
headers=None, include_dirs=None,
libraries=None, library_dirs=None,
decl=False, call=False, call_args=None):
# clean up distutils's config a bit: add void to main(), and
# return a value.
self._check_compiler()
body = []
if decl:
body.append("int %s (void);" % func)
# Handle MSVC intrinsics: force MS compiler to make a function call.
# Useful to test for some functions when built with optimization on, to
# avoid build error because the intrinsic and our 'fake' test
# declaration do not match.
body.append("#ifdef _MSC_VER")
body.append("#pragma function(%s)" % func)
body.append("#endif")
body.append("int main (void) {")
if call:
if call_args is None:
call_args = ''
body.append(" %s(%s);" % (func, call_args))
else:
body.append(" %s;" % func)
body.append(" return 0;")
body.append("}")
body = '\n'.join(body) + "\n"
return self.try_link(body, headers, include_dirs,
libraries, library_dirs)
def check_funcs_once(self, funcs,
headers=None, include_dirs=None,
libraries=None, library_dirs=None,
decl=False, call=False, call_args=None):
"""Check a list of functions at once.
This is useful to speed up things, since all the functions in the funcs
list will be put in one compilation unit.
Arguments
---------
funcs : seq
list of functions to test
include_dirs : seq
list of header paths
libraries : seq
list of libraries to link the code snippet to
libraru_dirs : seq
list of library paths
decl : dict
for every (key, value), the declaration in the value will be
used for function in key. If a function is not in the
dictionay, no declaration will be used.
call : dict
for every item (f, value), if the value is True, a call will be
done to the function f.
"""
self._check_compiler()
body = []
if decl:
for f, v in decl.items():
if v:
body.append("int %s (void);" % f)
# Handle MS intrinsics. See check_func for more info.
body.append("#ifdef _MSC_VER")
for func in funcs:
body.append("#pragma function(%s)" % func)
body.append("#endif")
body.append("int main (void) {")
if call:
for f in funcs:
if f in call and call[f]:
if not (call_args and f in call_args and call_args[f]):
args = ''
else:
args = call_args[f]
body.append(" %s(%s);" % (f, args))
else:
body.append(" %s;" % f)
else:
for f in funcs:
body.append(" %s;" % f)
body.append(" return 0;")
body.append("}")
body = '\n'.join(body) + "\n"
return self.try_link(body, headers, include_dirs,
libraries, library_dirs)
def check_inline(self):
"""Return the inline keyword recognized by the compiler, empty string
otherwise."""
return check_inline(self)
def check_compiler_gcc4(self):
"""Return True if the C compiler is gcc >= 4."""
return check_compiler_gcc4(self)
def get_output(self, body, headers=None, include_dirs=None,
libraries=None, library_dirs=None,
lang="c", use_tee=None):
"""Try to compile, link to an executable, and run a program
built from 'body' and 'headers'. Returns the exit status code
of the program and its output.
"""
warnings.warn("\n+++++++++++++++++++++++++++++++++++++++++++++++++\n" \
"Usage of get_output is deprecated: please do not \n" \
"use it anymore, and avoid configuration checks \n" \
"involving running executable on the target machine.\n" \
"+++++++++++++++++++++++++++++++++++++++++++++++++\n",
DeprecationWarning)
from distutils.ccompiler import CompileError, LinkError
self._check_compiler()
exitcode, output = 255, ''
try:
grabber = GrabStdout()
try:
src, obj, exe = self._link(body, headers, include_dirs,
libraries, library_dirs, lang)
grabber.restore()
except:
output = grabber.data
grabber.restore()
raise
exe = os.path.join('.', exe)
exitstatus, output = exec_command(exe, execute_in='.',
use_tee=use_tee)
if hasattr(os, 'WEXITSTATUS'):
exitcode = os.WEXITSTATUS(exitstatus)
if os.WIFSIGNALED(exitstatus):
sig = os.WTERMSIG(exitstatus)
log.error('subprocess exited with signal %d' % (sig,))
if sig == signal.SIGINT:
# control-C
raise KeyboardInterrupt
else:
exitcode = exitstatus
log.info("success!")
except (CompileError, LinkError):
log.info("failure.")
self._clean()
return exitcode, output
class GrabStdout(object):
def __init__(self):
self.sys_stdout = sys.stdout
self.data = ''
sys.stdout = self
def write (self, data):
self.sys_stdout.write(data)
self.data += data
def flush (self):
self.sys_stdout.flush()
def restore(self):
sys.stdout = self.sys_stdout
| mit |
AnishShah/tensorflow | tensorflow/contrib/estimator/python/estimator/dnn_test.py | 17 | 6164 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for dnn.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import tempfile
import numpy as np
import six
from tensorflow.contrib.estimator.python.estimator import dnn
from tensorflow.contrib.estimator.python.estimator import head as head_lib
from tensorflow.python.estimator.canned import dnn_testing_utils
from tensorflow.python.estimator.canned import prediction_keys
from tensorflow.python.estimator.export import export
from tensorflow.python.estimator.inputs import numpy_io
from tensorflow.python.feature_column import feature_column
from tensorflow.python.framework import ops
from tensorflow.python.ops.losses import losses
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.summary.writer import writer_cache
def _dnn_estimator_fn(weight_column=None, label_dimension=1, *args, **kwargs): # pylint: disable=keyword-arg-before-vararg
"""Returns a DNNEstimator that uses regression_head."""
return dnn.DNNEstimator(
head=head_lib.regression_head(
weight_column=weight_column, label_dimension=label_dimension,
# Tests in core (from which this test inherits) test the sum loss.
loss_reduction=losses.Reduction.SUM),
*args, **kwargs)
def _dnn_estimator_classifier_fn(n_classes=3, *args, **kwargs): # pylint: disable=keyword-arg-before-vararg
"""Returns a DNNEstimator that uses multi_class_head."""
return dnn.DNNEstimator(head=head_lib.multi_class_head(n_classes=n_classes),
*args, **kwargs)
class DNNEstimatorEvaluateTest(
dnn_testing_utils.BaseDNNRegressorEvaluateTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNRegressorEvaluateTest.__init__(
self, _dnn_estimator_fn)
class DNNEstimatorPredictTest(
dnn_testing_utils.BaseDNNRegressorPredictTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNRegressorPredictTest.__init__(
self, _dnn_estimator_fn)
class DNNEstimatorTrainTest(
dnn_testing_utils.BaseDNNRegressorTrainTest, test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNRegressorTrainTest.__init__(
self, _dnn_estimator_fn)
class DNNEstimatorWarmStartingTest(dnn_testing_utils.BaseDNNWarmStartingTest,
test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
test.TestCase.__init__(self, methodName)
dnn_testing_utils.BaseDNNWarmStartingTest.__init__(
self, _dnn_estimator_classifier_fn, _dnn_estimator_fn)
class DNNEstimatorIntegrationTest(test.TestCase):
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
writer_cache.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _test_complete_flow(
self, train_input_fn, eval_input_fn, predict_input_fn, input_dimension,
label_dimension, batch_size):
feature_columns = [
feature_column.numeric_column('x', shape=(input_dimension,))]
est = dnn.DNNEstimator(
head=head_lib.regression_head(label_dimension=label_dimension),
hidden_units=(2, 2),
feature_columns=feature_columns,
model_dir=self._model_dir)
# TRAIN
num_steps = 10
est.train(train_input_fn, steps=num_steps)
# EVALUTE
scores = est.evaluate(eval_input_fn)
self.assertEqual(num_steps, scores[ops.GraphKeys.GLOBAL_STEP])
self.assertIn('loss', six.iterkeys(scores))
# PREDICT
predictions = np.array([
x[prediction_keys.PredictionKeys.PREDICTIONS]
for x in est.predict(predict_input_fn)
])
self.assertAllEqual((batch_size, label_dimension), predictions.shape)
# EXPORT
feature_spec = feature_column.make_parse_example_spec(feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_savedmodel(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(gfile.Exists(export_dir))
def test_numpy_input_fn(self):
"""Tests complete flow with numpy_input_fn."""
label_dimension = 2
batch_size = 10
data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
data = data.reshape(batch_size, label_dimension)
# learn y = x
train_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
batch_size=batch_size,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=label_dimension,
label_dimension=label_dimension,
batch_size=batch_size)
if __name__ == '__main__':
test.main()
| apache-2.0 |
takluyver/conda-build | conda_build/tarcheck.py | 2 | 1892 | from __future__ import absolute_import, division, print_function
import json
import tarfile
from os.path import basename
def dist_fn(fn):
if fn.endswith('.tar'):
return fn[:-4]
elif fn.endswith('.tar.bz2'):
return fn[:-8]
else:
raise Exception('did not expect filename: %r' % fn)
class TarCheck(object):
def __init__(self, path):
self.t = tarfile.open(path)
self.paths = set(m.path for m in self.t.getmembers())
self.dist = dist_fn(basename(path))
self.name, self.version, self.build = self.dist.rsplit('-', 2)
def info_files(self):
lista = [p.strip().decode('utf-8') for p in
self.t.extractfile('info/files').readlines()]
seta = set(lista)
if len(lista) != len(seta):
raise Exception('info/files: duplicates')
listb = [m.path for m in self.t.getmembers()
if not (m.path.startswith('info/') or m.isdir())]
setb = set(listb)
if len(listb) != len(setb):
raise Exception('info_files: duplicate members')
if seta == setb:
return
for p in sorted(seta | setb):
if p not in seta:
print('%r not in info/files' % p)
if p not in setb:
print('%r not in tarball' % p)
raise Exception('info/files')
def index_json(self):
info = json.loads(self.t.extractfile('info/index.json').read().decode('utf-8'))
for varname in 'name', 'version', 'build':
if info[varname] != getattr(self, varname):
raise Exception('%s: %r != %r' % (varname, info[varname],
getattr(self, varname)))
assert isinstance(info['build_number'], int)
def check_all(path):
x = TarCheck(path)
x.info_files()
x.index_json()
x.t.close()
| bsd-3-clause |
LalatenduMohanty/imagefactory | imagefactory_plugins/RHEVM/RHEVM.py | 2 | 11081 | #
# Copyright 2011 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import zope
import oz.GuestFactory
import oz.TDL
import guestfs
import libxml2
import traceback
import json
import ConfigParser
import subprocess
import logging
from time import *
from tempfile import *
from imgfac.ApplicationConfiguration import ApplicationConfiguration
from imgfac.ImageFactoryException import ImageFactoryException
from imgfac.CloudDelegate import CloudDelegate
from imgfac.FactoryUtils import launch_inspect_and_mount, shutdown_and_close, remove_net_persist, create_cloud_info
from imgfac.FactoryUtils import check_qcow_size, qemu_convert_cmd
from xml.etree.ElementTree import fromstring
from RHEVMHelper import RHEVMHelper
def subprocess_check_output(*popenargs, **kwargs):
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = subprocess.Popen(stdout=subprocess.PIPE, stderr=subprocess.STDOUT, *popenargs, **kwargs)
stdout, stderr = process.communicate()
retcode = process.poll()
if retcode:
cmd = ' '.join(*popenargs)
raise ImageFactoryException("'%s' failed(%d): %s\nstdout: %s" % (cmd, retcode, stderr, stdout))
return (stdout, stderr, retcode)
class RHEVM(object):
zope.interface.implements(CloudDelegate)
def __init__(self):
super(RHEVM, self).__init__()
self.app_config = ApplicationConfiguration().configuration
self.log = logging.getLogger('%s.%s' % (__name__, self.__class__.__name__))
def builder_should_create_target_image(self, builder, target, image_id, template, parameters):
self.log.info('builder_should_create_target_image() called on RHEVM plugin - returning True')
return True
def builder_will_create_target_image(self, builder, target, image_id, template, parameters):
# Nothing really to do here
pass
def activity(self, activity):
# Simple helper function
# Activity should be a one line human-readable string indicating the task in progress
# We log it at DEBUG and also set it as the status_detail on our active image
self.log.debug(activity)
self.active_image.status_detail['activity'] = activity
def log_exc(self):
self.log.debug("Exception caught in ImageFactory")
self.log.debug(traceback.format_exc())
def build_image(self, build_id=None):
try:
self.build_upload(build_id)
except:
self.log_exc()
self.status="FAILED"
raise
def builder_did_create_target_image(self, builder, target, image_id, template, parameters):
self.log.info('builder_did_create_target_image() called in RHEVM plugin')
self.status="BUILDING"
# TODO: This is a convenience variable for refactoring - rename
self.new_image_id = builder.target_image.identifier
# TODO: More convenience vars - revisit
self.template = template
self.target = target
self.builder = builder
# This lets our logging helper know what image is being operated on
self.active_image = self.builder.target_image
self.tdlobj = oz.TDL.TDL(xmlstring=self.template.xml, rootpw_required=self.app_config["tdl_require_root_pw"])
# Add in target specific content
#TODO-URGENT: Work out how to do this in the new framework
#self.add_target_content()
# Oz assumes unique names - TDL built for multiple backends guarantees
# they are not unique. We don't really care about the name so just
# force uniqueness
# Oz now uses the tdlobject name property directly in several places
# so we must change it
self.tdlobj.name = "factory-build-" + self.new_image_id
# In contrast to our original builders, we enter the cloud plugins with a KVM file already
# created as the base_image. As a result, all the Oz building steps are gone (and can be found
# in the OS plugin(s)
# OS plugin has already provided the initial file for us to work with
# which we can currently assume is a raw KVM compatible image
self.image = builder.target_image.data
# Add the cloud-info file
self.modify_oz_filesystem()
# Finally, if our format is qcow2, do the transformation here if needed
if ("rhevm_image_format" in self.app_config) and (self.app_config["rhevm_image_format"] == "qcow2") \
and not check_qcow_size(self.image):
self.log.debug("Converting RAW image to qcow2 format")
# TODO: When RHEV adds support, use the -c option to compress these images to save space
# (at that point, remove the qcow check as we always want to compress)
qemu_img_cmd = qemu_convert_cmd( self.image, self.image + ".tmp.qcow2" )
(stdout, stderr, retcode) = subprocess_check_output(qemu_img_cmd)
os.unlink(self.image)
os.rename(self.image + ".tmp.qcow2", self.image)
self.percent_complete=100
self.status="COMPLETED"
def modify_oz_filesystem(self):
self.log.debug("Doing further Factory specific modification of Oz image")
guestfs_handle = launch_inspect_and_mount(self.builder.target_image.data)
remove_net_persist(guestfs_handle)
create_cloud_info(guestfs_handle, self.target)
shutdown_and_close(guestfs_handle)
def push_image_to_provider(self, builder, provider, credentials, target, target_image, parameters):
self.log.info('push_image_to_provider() called in RHEVM')
# TODO: This is a convenience variable for refactoring - rename
self.new_image_id = builder.provider_image.identifier
self.tdlobj = oz.TDL.TDL(xmlstring=builder.target_image.template, rootpw_required=self.app_config["tdl_require_root_pw"])
self.builder = builder
self.active_image = self.builder.provider_image
self.push_image(target_image, provider, credentials)
def push_image(self, target_image_id, provider, credentials):
try:
self.status = "PUSHING"
self.percent_complete = 0
self.rhevm_push_image_upload(target_image_id, provider, credentials)
except:
self.log_exc()
self.status="FAILED"
raise
self.status = "COMPLETED"
def rhevm_push_image_upload(self, target_image_id, provider, credentials):
provider_data = self.get_dynamic_provider_data(provider)
if provider_data is None:
raise ImageFactoryException("RHEV-M instance not found in XML or JSON provided")
self.generic_decode_credentials(credentials, provider_data, "rhevm")
self.log.debug("Username: %s" % (self.username))
helper = RHEVMHelper(url=provider_data['api-url'], username=self.username, password=self.password)
# Image is always here and it is the target_image datafile
input_image = self.builder.target_image.data
rhevm_uuid = helper.import_template(input_image, provider_data['nfs-host'], provider_data['nfs-path'],
provider_data['nfs-dir'], provider_data['cluster'], ovf_name=str(self.new_image_id),
ovf_desc = "Template name (%s) from base image (%s)" % (self.tdlobj.name, str(self.builder.base_image.identifier)) )
if rhevm_uuid is None:
raise ImageFactoryException("Failed to obtain RHEV-M UUID from helper")
self.log.debug("New RHEVM Template UUID: %s " % (rhevm_uuid))
self.builder.provider_image.identifier_on_provider = rhevm_uuid
self.builder.provider_image.provider_account_identifier = self.username
self.percent_complete = 100
def delete_from_provider(self, builder, provider, credentials, target, parameters):
self.log.debug("Deleting RHEVM template (%s)" % (self.builder.provider_image.identifier_on_provider))
provider_data = self.get_dynamic_provider_data(provider)
if provider_data is None:
raise ImageFactoryException("RHEV-M instance not found in XML or JSON provided")
self.generic_decode_credentials(credentials, provider_data, "rhevm")
self.log.debug("Username: %s" % (self.username))
helper = RHEVMHelper(url=provider_data['api-url'], username=self.username, password=self.password)
if not helper.delete_template(self.builder.provider_image.identifier_on_provider):
raise ImageFactoryException("Delete of template failed")
def generic_decode_credentials(self, credentials, provider_data, target):
# convenience function for simple creds (rhev-m and vmware currently)
doc = libxml2.parseDoc(credentials)
self.username = None
_usernodes = doc.xpathEval("//provider_credentials/%s_credentials/username" % (target))
if len(_usernodes) > 0:
self.username = _usernodes[0].content
else:
try:
self.username = provider_data['username']
except KeyError:
raise ImageFactoryException("No username specified in config file or in push call")
self.provider_account_identifier = self.username
_passnodes = doc.xpathEval("//provider_credentials/%s_credentials/password" % (target))
if len(_passnodes) > 0:
self.password = _passnodes[0].content
else:
try:
self.password = provider_data['password']
except KeyError:
raise ImageFactoryException("No password specified in config file or in push call")
doc.freeDoc()
def get_dynamic_provider_data(self, provider):
# Get provider details for RHEV-M or VSphere
# First try to interpret this as an ad-hoc/dynamic provider def
# If this fails, try to find it in one or the other of the config files
# If this all fails return None
# We use this in the builders as well so I have made it "public"
try:
xml_et = fromstring(provider)
return xml_et.attrib
except Exception as e:
self.log.debug('Testing provider for XML: %s' % e)
pass
try:
jload = json.loads(provider)
return jload
except ValueError as e:
self.log.debug('Testing provider for JSON: %s' % e)
pass
return None
def abort(self):
pass
| apache-2.0 |
mclois/iteexe | formless/configurable.py | 10 | 6757 | # Copyright (c) 2004 Divmod.
# See LICENSE for details.
from formless.iformless import IConfigurable, IActionableType, IBinding
from formless.annotate import TypedInterface, Argument
from nevow import inevow
from nevow import compy
from nevow.context import WovenContext
class Configurable(object):
__implements__ = IConfigurable,
bindingDict = None
def __init__(self, original):
self.original = original
self.boundTo = self
def getBindingNames(self, context):
## Todo: remove this getattr
ifs = compy.getInterfaces(getattr(self, 'boundTo', self))
ifs = [
x for x in ifs if x is not IConfigurable and x is not TypedInterface
]
bindingNames = []
self.bindingDict = bindingDict = {}
for interface in ifs:
## TypedInterfaces have a __spec__ attribute which is a list of all Typed properties and
## autocallable methods
for binding in getattr(interface, '__spec__', []):
bindingDict[binding.name] = binding
if binding.name not in bindingNames:
bindingNames.append(binding.name)
if compy.implements(binding.typedValue, IActionableType):
acts = binding.typedValue.actions
if acts is None:
acts = []
for action in acts:
bindingDict[action.name] = action
return bindingNames
def getDefault(self, forBinding):
## TODO: Clean this up, it's a mess
if not isinstance(forBinding, Argument):
name = forBinding.name
if hasattr(self, name):
return getattr(self, name)
## Todo: Only do this in ConfigurableAdapter instead of Configurable
if hasattr(self.boundTo, name):
return getattr(self.boundTo, name)
if self.original is not self.boundTo and hasattr(self.original, name):
return getattr(self.original, name)
return forBinding.default
def getBinding(self, context, name):
if self.bindingDict is None:
self.getBindingNames(context)
if self.bindingDict is None:
self.bindingDict = {}
binding = getattr(self, 'bind_%s' % name, getattr(self.boundTo, 'bind_%s' % name, None))
if binding is not None:
binding = binding(context)
else:
try:
binding = self.bindingDict[name]
except KeyError:
raise RuntimeError, "%s is not an exposed binding on object %s." % (name, self.boundTo)
binding.boundTo = self.boundTo
return binding
def postForm(self, ctx, bindingName, args):
"""Accept a form post to the given bindingName. The bindingName
can be dotted to indicate an attribute of this Configurable, eg
addresses.0.changeEmail. The post arguments are given in args.
Return a Resource which will be rendered in response.
"""
from formless import iformless
from nevow.tags import invisible
request = ctx.locate(inevow.IRequest)
pathSegs = bindingName.split('.')
configurable = self
cf = ctx.locate(iformless.IConfigurableFactory)
## Get the first binding
firstSeg = pathSegs.pop(0)
binding = configurable.getBinding(ctx, firstSeg)
ctx.remember(binding, IBinding)
ctx.remember(configurable, IConfigurable)
## I don't think this works right now, it needs to be fixed.
## Most cases it won't be triggered, because we're just traversing a
## single binding name
for seg in pathSegs:
assert 1 == 0, "Sorry, this doesn't work right now"
binding = configurable.getBinding(ctx, seg)
child = self.boundTo
if not isinstance(binding, GroupBinding):
accessor = inevow.IContainer(configurable.boundTo, None)
if accessor is None:
child = getattr(configurable.boundTo, binding.name)
else:
child = accessor.child(ctx, binding.name)
## If it's a groupbinding, we don't do anything at all for this path segment
## This won't work right now. We need to push the previous configurable
## as the configurableFactory somehow and ask that for hte next binding
## we also need to support deferreds coming back from locateConfigurable
assert 'black' is 'white', "Deferred support is pending"
configurable = cf.locateConfigurable(ctx, child)
ctx = WovenContext(ctx, invisible(key=seg))
ctx.remember(binding, IBinding)
ctx.remember(configurable, IConfigurable)
bindingProcessor = iformless.IInputProcessor(binding)
rv = bindingProcessor.process(ctx, binding.boundTo, args)
ctx.remember(rv, inevow.IHand)
ctx.remember('%r success.' % bindingName, inevow.IStatusMessage)
return rv
def summary(self):
return "An instance of %s" % self.__class__.__name__
postLocation = None
class NotFoundConfigurable(Configurable):
def getBinding(self, context, name):
raise RuntimeError, self.original
class TypedInterfaceConfigurable(Configurable):
def __init__(self, original):
self.original = original
self.boundTo = original
def summary(self):
return "An instance of %s" % self.original.__class__.__name__
def __repr__(self):
return "TypedInterfaceConfigurable(%r)" % self.original
class ListConfigurable(TypedInterfaceConfigurable):
def getBinding(self, context, name):
eb = ElementBinding(name, Object())
eb.boundTo = self.original
return eb
class GroupConfigurable(TypedInterfaceConfigurable):
def __init__(self, original, groupInterface):
TypedInterfaceConfigurable.__init__(self, original)
self.groupInterface = groupInterface
bindingDict = None
def getBindingNames(self, context):
bindingNames = []
self.bindingDict = bindingDict = {}
interface = self.groupInterface
for binding in getattr(interface, '__spec__', []):
bindingDict[binding.name] = binding
if binding.name not in bindingNames:
bindingNames.append(binding.name)
if compy.implements(binding.typedValue, IActionableType):
acts = binding.typedValue.actions
if acts is None:
acts = []
for action in acts:
bindingDict[action.name] = action
return bindingNames
| gpl-2.0 |
shankari/e-mission-server | emission/incomplete_tests/tourModelTests/TestClusterPipeline.py | 3 | 3641 | from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from past.builtins import cmp
from future import standard_library
standard_library.install_aliases()
from builtins import *
import logging
import unittest
import emission.core.get_database as edb
import emission.analysis.modelling.tour_model.cluster_pipeline as cp
import uuid as uu
import emission.tests.common as etc
import emission.analysis.intake.cleaning.filter_accuracy as eaicf
import emission.storage.timeseries.format_hacks.move_filter_field as estfm
import emission.analysis.intake.segmentation.trip_segmentation as eaist
import emission.analysis.intake.segmentation.section_segmentation as eaiss
import emission.analysis.intake.cleaning.clean_and_resample as eaicr
class ClusterPipelineTests(unittest.TestCase):
def setUp(self):
self.clearRelevantSections()
self.import_test_info()
self.RADIUS = 200
def clearRelevantSections(self):
edb.get_analysis_timeseries_db().drop()
def import_test_info(self):
etc.setupRealExample(self, "emission/tests/data/real_examples/shankari_2015-aug-27")
eaicf.filter_accuracy(self.testUUID)
etc.runIntakePipeline(self.testUUID)
def testSanity(self):
cp.main(self.testUUID)
def testReadData(self):
data = cp.read_data(uuid=self.testUUID)
# Test to make sure something is happening
self.assertTrue(len(data) > 5)
# Test to make sure that the trips are mapped to the correct uuid
bad_data = cp.read_data(uuid="FakeUUID")
self.assertEqual(len(bad_data), 0)
def testRemoveNoise(self):
data = cp.read_data(uuid=self.testUUID)
# Test to make sure the code doesn't break on an empty dataset
new_data, bins = cp.remove_noise(None, self.RADIUS)
self.assertTrue(len(new_data) == len(bins) == 0)
#Test to make sure some or no data was filtered out, but that nothing was added after filtering
new_data, bins = cp.remove_noise(None, self.RADIUS)
self.assertTrue(len(new_data) <= len(data))
# Make sure there are not more bins than data; that wouldnt make sense
self.assertTrue(len(bins) <= len(data))
def testCluster(self):
data = cp.read_data(uuid=self.testUUID)
# Test to make sure empty dataset doesn't crash the program
clusters, labels, new_data = cp.cluster([], 10)
self.assertTrue(len(new_data) == clusters == len(labels) == 0)
# Test to make sure clustering with noise works
clusters, labels, new_data = cp.cluster(data, 10)
self.assertEqual(len(labels), len(new_data))
self.assertEqual(cmp(new_data, data), 0)
# Test to make sure clustering without noise works
data, bins = cp.remove_noise(data, self.RADIUS)
clusters, labels, new_data = cp.cluster(data, len(bins))
self.assertTrue(clusters == 0 or len(bins) <= clusters <= len(bins) + 10)
def testClusterToTourModel(self):
# Test to make sure it doesn't crash on a empty dataset
data = cp.cluster_to_tour_model(None, None)
self.assertFalse(data)
# Test with the real dataset
data = cp.read_data(uuid=self.testUUID)
data, bins = cp.remove_noise(data, self.RADIUS)
n, labels, data = cp.cluster(data, len(bins))
tour_dict = cp.main(uuid=self.testUUID)
self.assertTrue(len(tour_dict) <= n)
if __name__ == "__main__":
unittest.main()
| bsd-3-clause |
aimaletdinow/LABS | tools/perf/scripts/python/net_dropmonitor.py | 1812 | 1749 | # Monitor the system for dropped packets and proudce a report of drop locations and counts
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
drop_log = {}
kallsyms = []
def get_kallsyms_table():
global kallsyms
try:
f = open("/proc/kallsyms", "r")
except:
return
for line in f:
loc = int(line.split()[0], 16)
name = line.split()[2]
kallsyms.append((loc, name))
kallsyms.sort()
def get_sym(sloc):
loc = int(sloc)
# Invariant: kallsyms[i][0] <= loc for all 0 <= i <= start
# kallsyms[i][0] > loc for all end <= i < len(kallsyms)
start, end = -1, len(kallsyms)
while end != start + 1:
pivot = (start + end) // 2
if loc < kallsyms[pivot][0]:
end = pivot
else:
start = pivot
# Now (start == -1 or kallsyms[start][0] <= loc)
# and (start == len(kallsyms) - 1 or loc < kallsyms[start + 1][0])
if start >= 0:
symloc, name = kallsyms[start]
return (name, loc - symloc)
else:
return (None, 0)
def print_drop_table():
print "%25s %25s %25s" % ("LOCATION", "OFFSET", "COUNT")
for i in drop_log.keys():
(sym, off) = get_sym(i)
if sym == None:
sym = i
print "%25s %25s %25s" % (sym, off, drop_log[i])
def trace_begin():
print "Starting trace (Ctrl-C to dump results)"
def trace_end():
print "Gathering kallsyms data"
get_kallsyms_table()
print_drop_table()
# called from perf, when it finds a correspoinding event
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm, callchain,
skbaddr, location, protocol):
slocation = str(location)
try:
drop_log[slocation] = drop_log[slocation] + 1
except:
drop_log[slocation] = 1
| gpl-2.0 |
disqus/django-old | django/core/management/commands/dumpdata.py | 1 | 8960 | from django.core.exceptions import ImproperlyConfigured
from django.core.management.base import BaseCommand, CommandError
from django.core import serializers
from django.db import connections, router, DEFAULT_DB_ALIAS
from django.utils.datastructures import SortedDict
from optparse import make_option
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--format', default='json', dest='format',
help='Specifies the output serialization format for fixtures.'),
make_option('--indent', default=None, dest='indent', type='int',
help='Specifies the indent level to use when pretty-printing output'),
make_option('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS, help='Nominates a specific database to dump '
'fixtures from. Defaults to the "default" database.'),
make_option('-e', '--exclude', dest='exclude',action='append', default=[],
help='An appname or appname.ModelName to exclude (use multiple --exclude to exclude multiple apps/models).'),
make_option('-n', '--natural', action='store_true', dest='use_natural_keys', default=False,
help='Use natural keys if they are available.'),
make_option('-a', '--all', action='store_true', dest='use_base_manager', default=False,
help="Use Django's base manager to dump all models stored in the database, including those that would otherwise be filtered or modified by a custom manager."),
)
help = ("Output the contents of the database as a fixture of the given "
"format (using each model's default manager unless --all is "
"specified).")
args = '[appname appname.ModelName ...]'
def handle(self, *app_labels, **options):
from django.db.models import get_app, get_apps, get_models, get_model
format = options.get('format','json')
indent = options.get('indent',None)
using = options.get('database', DEFAULT_DB_ALIAS)
connection = connections[using]
excludes = options.get('exclude',[])
show_traceback = options.get('traceback', False)
use_natural_keys = options.get('use_natural_keys', False)
use_base_manager = options.get('use_base_manager', False)
excluded_apps = set()
excluded_models = set()
for exclude in excludes:
if '.' in exclude:
app_label, model_name = exclude.split('.', 1)
model_obj = get_model(app_label, model_name)
if not model_obj:
raise CommandError('Unknown model in excludes: %s' % exclude)
excluded_models.add(model_obj)
else:
try:
app_obj = get_app(exclude)
excluded_apps.add(app_obj)
except ImproperlyConfigured:
raise CommandError('Unknown app in excludes: %s' % exclude)
if len(app_labels) == 0:
app_list = SortedDict((app, None) for app in get_apps() if app not in excluded_apps)
else:
app_list = SortedDict()
for label in app_labels:
try:
app_label, model_label = label.split('.')
try:
app = get_app(app_label)
except ImproperlyConfigured:
raise CommandError("Unknown application: %s" % app_label)
if app in excluded_apps:
continue
model = get_model(app_label, model_label)
if model is None:
raise CommandError("Unknown model: %s.%s" % (app_label, model_label))
if app in app_list.keys():
if app_list[app] and model not in app_list[app]:
app_list[app].append(model)
else:
app_list[app] = [model]
except ValueError:
# This is just an app - no model qualifier
app_label = label
try:
app = get_app(app_label)
except ImproperlyConfigured:
raise CommandError("Unknown application: %s" % app_label)
if app in excluded_apps:
continue
app_list[app] = None
# Check that the serialization format exists; this is a shortcut to
# avoid collating all the objects and _then_ failing.
if format not in serializers.get_public_serializer_formats():
raise CommandError("Unknown serialization format: %s" % format)
try:
serializers.get_serializer(format)
except KeyError:
raise CommandError("Unknown serialization format: %s" % format)
# Now collate the objects to be serialized.
objects = []
for model in sort_dependencies(app_list.items()):
if model in excluded_models:
continue
if not model._meta.proxy and router.allow_syncdb(using, model):
if use_base_manager:
objects.extend(model._base_manager.using(using).all())
else:
objects.extend(model._default_manager.using(using).all())
try:
return serializers.serialize(format, objects, indent=indent,
use_natural_keys=use_natural_keys)
except Exception, e:
if show_traceback:
raise
raise CommandError("Unable to serialize database: %s" % e)
def sort_dependencies(app_list):
"""Sort a list of app,modellist pairs into a single list of models.
The single list of models is sorted so that any model with a natural key
is serialized before a normal model, and any model with a natural key
dependency has it's dependencies serialized first.
"""
from django.db.models import get_model, get_models
# Process the list of models, and get the list of dependencies
model_dependencies = []
models = set()
for app, model_list in app_list:
if model_list is None:
model_list = get_models(app)
for model in model_list:
models.add(model)
# Add any explicitly defined dependencies
if hasattr(model, 'natural_key'):
deps = getattr(model.natural_key, 'dependencies', [])
if deps:
deps = [get_model(*d.split('.')) for d in deps]
else:
deps = []
# Now add a dependency for any FK or M2M relation with
# a model that defines a natural key
for field in model._meta.fields:
if hasattr(field.rel, 'to'):
rel_model = field.rel.to
if hasattr(rel_model, 'natural_key'):
deps.append(rel_model)
for field in model._meta.many_to_many:
rel_model = field.rel.to
if hasattr(rel_model, 'natural_key'):
deps.append(rel_model)
model_dependencies.append((model, deps))
model_dependencies.reverse()
# Now sort the models to ensure that dependencies are met. This
# is done by repeatedly iterating over the input list of models.
# If all the dependencies of a given model are in the final list,
# that model is promoted to the end of the final list. This process
# continues until the input list is empty, or we do a full iteration
# over the input models without promoting a model to the final list.
# If we do a full iteration without a promotion, that means there are
# circular dependencies in the list.
model_list = []
while model_dependencies:
skipped = []
changed = False
while model_dependencies:
model, deps = model_dependencies.pop()
# If all of the models in the dependency list are either already
# on the final model list, or not on the original serialization list,
# then we've found another model with all it's dependencies satisfied.
found = True
for candidate in ((d not in models or d in model_list) for d in deps):
if not candidate:
found = False
if found:
model_list.append(model)
changed = True
else:
skipped.append((model, deps))
if not changed:
raise CommandError("Can't resolve dependencies for %s in serialized app list." %
', '.join('%s.%s' % (model._meta.app_label, model._meta.object_name)
for model, deps in sorted(skipped, key=lambda obj: obj[0].__name__))
)
model_dependencies = skipped
return model_list
| bsd-3-clause |
kjagoo/wger_stark | wger/exercises/tests/test_exercise_images.py | 2 | 4836 | # This file is part of wger Workout Manager.
#
# wger Workout Manager is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# wger Workout Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
from django.core.files import File
from django.core.urlresolvers import reverse
from wger.core.tests.base_testcase import (
WorkoutManagerTestCase,
WorkoutManagerEditTestCase,
WorkoutManagerAddTestCase,
WorkoutManagerDeleteTestCase
)
from wger.exercises.models import Exercise, ExerciseImage
class MainImageTestCase(WorkoutManagerTestCase):
'''
Tests the methods to make sure there is always a main image per picture
'''
def save_image(self, exercise, filename, db_filename=None):
'''
Helper function to save an image to an exercise
'''
if not db_filename:
db_filename = filename
image = ExerciseImage()
image.exercise = exercise
image.status = ExerciseImage.STATUS_ACCEPTED
image.image.save(
filename,
File(open('wger/exercises/tests/{0}'.format(filename), 'rb'))
)
image.save()
return(image.pk)
def test_auto_main_image(self):
'''
Tests that the first uploaded image is automatically a main image
'''
exercise = Exercise.objects.get(pk=2)
pk = self.save_image(exercise, 'protestschwein.jpg')
image = ExerciseImage.objects.get(pk=pk)
self.assertTrue(image.is_main)
def test_auto_main_image_multiple(self):
'''
Tests that there is always a main image after deleting one
'''
exercise = Exercise.objects.get(pk=2)
pk1 = self.save_image(exercise, 'protestschwein.jpg')
pk2 = self.save_image(exercise, 'wildschwein.jpg')
image = ExerciseImage.objects.get(pk=pk1)
self.assertTrue(image.is_main)
image = ExerciseImage.objects.get(pk=pk2)
self.assertFalse(image.is_main)
def test_delete_main_image(self):
'''
Tests that there is always a main image after deleting one
'''
exercise = Exercise.objects.get(pk=2)
pk1 = self.save_image(exercise, 'protestschwein.jpg')
pk2 = self.save_image(exercise, 'protestschwein.jpg')
pk3 = self.save_image(exercise, 'wildschwein.jpg')
pk4 = self.save_image(exercise, 'wildschwein.jpg')
pk5 = self.save_image(exercise, 'wildschwein.jpg')
image = ExerciseImage.objects.get(pk=pk1)
self.assertTrue(image.is_main)
image.delete()
self.assertTrue(ExerciseImage.objects.get(pk=pk2).is_main)
self.assertFalse(ExerciseImage.objects.get(pk=pk3).is_main)
self.assertFalse(ExerciseImage.objects.get(pk=pk4).is_main)
self.assertFalse(ExerciseImage.objects.get(pk=pk5).is_main)
image = ExerciseImage.objects.get(pk=pk2)
self.assertTrue(image.is_main)
image.delete()
self.assertTrue(ExerciseImage.objects.get(pk=pk3).is_main)
self.assertFalse(ExerciseImage.objects.get(pk=pk4).is_main)
self.assertFalse(ExerciseImage.objects.get(pk=pk5).is_main)
class AddExerciseImageTestCase(WorkoutManagerAddTestCase):
'''
Tests adding an image to an exercise
'''
object_class = ExerciseImage
url = reverse('exercise:image:add', kwargs={'exercise_pk': 1})
user_fail = False
data = {'is_main': True,
'image': open('wger/exercises/tests/protestschwein.jpg', 'rb'),
'license': 1}
class EditExerciseImageTestCase(WorkoutManagerEditTestCase):
'''
Tests editing an image to an exercise
'''
object_class = ExerciseImage
url = 'exercise:image:edit'
pk = 2
data = {'is_main': True,
'license': 1}
class DeleteExerciseImageTestCase(WorkoutManagerDeleteTestCase):
'''
Tests deleting an image to an exercise
'''
object_class = ExerciseImage
url = reverse('exercise:image:delete', kwargs={'exercise_pk': 1, 'pk': 1})
pk = 1
# TODO: fix test
# class ExerciseImagesApiTestCase(api_base_test.ApiBaseResourceTestCase):
# '''
# Tests the exercise image overview resource
# '''
# pk = 1
# resource = ExerciseImage
# private_resource = False
# special_endpoints = ('thumbnails',)
# data = {'is_main': 'true',
# 'exercise': '1',
# 'id': 1}
| agpl-3.0 |
rdblue/Impala | tests/query_test/test_tpch_queries.py | 16 | 2529 | # Copyright (c) 2012 Cloudera, Inc. All rights reserved.
# Functional tests running the TPCH workload.
#
import logging
import pytest
from tests.common.test_vector import *
from tests.common.impala_test_suite import *
class TestTpchQuery(ImpalaTestSuite):
@classmethod
def get_workload(self):
return 'tpch'
@classmethod
def add_test_dimensions(cls):
super(TestTpchQuery, cls).add_test_dimensions()
cls.TestMatrix.add_dimension(create_single_exec_option_dimension())
# The tpch tests take a long time to execute so restrict the combinations they
# execute over
# TODO: the planner tests are based on text and need this.
if cls.exploration_strategy() == 'core':
cls.TestMatrix.add_constraint(lambda v:\
v.get_value('table_format').file_format in ['text'])
def test_tpch_q1(self, vector):
self.run_test_case('tpch-q1', vector)
def test_tpch_q2(self, vector):
self.run_test_case('tpch-q2', vector)
def test_tpch_q3(self, vector):
self.run_test_case('tpch-q3', vector)
def test_tpch_q4(self, vector):
self.run_test_case('tpch-q4', vector)
def test_tpch_q5(self, vector):
self.run_test_case('tpch-q5', vector)
def test_tpch_q6(self, vector):
self.run_test_case('tpch-q6', vector)
def test_tpch_q7(self, vector):
self.run_test_case('tpch-q7', vector)
def test_tpch_q8(self, vector):
self.run_test_case('tpch-q8', vector)
def test_tpch_q9(self, vector):
self.run_test_case('tpch-q9', vector)
def test_tpch_q10(self, vector):
self.run_test_case('tpch-q10', vector)
def test_tpch_q11(self, vector):
self.run_test_case('tpch-q11', vector)
def test_tpch_q12(self, vector):
self.run_test_case('tpch-q12', vector)
def test_tpch_q13(self, vector):
self.run_test_case('tpch-q13', vector)
def test_tpch_q14(self, vector):
self.run_test_case('tpch-q14', vector)
def test_tpch_q15(self, vector):
self.run_test_case('tpch-q15', vector)
def test_tpch_q16(self, vector):
self.run_test_case('tpch-q16', vector)
def test_tpch_q17(self, vector):
self.run_test_case('tpch-q17', vector)
def test_tpch_q18(self, vector):
self.run_test_case('tpch-q18', vector)
def test_tpch_q19(self, vector):
self.run_test_case('tpch-q19', vector)
def test_tpch_q20(self, vector):
self.run_test_case('tpch-q20', vector)
def test_tpch_q21(self, vector):
self.run_test_case('tpch-q21', vector)
def test_tpch_q22(self, vector):
self.run_test_case('tpch-q22', vector)
| apache-2.0 |
PeterWangIntel/chromium-crosswalk | third_party/protobuf/descriptor_pb2.py | 193 | 48695 | # Generated by the protocol buffer compiler. DO NOT EDIT!
from google.protobuf import descriptor
from google.protobuf import message
from google.protobuf import reflection
# @@protoc_insertion_point(imports)
DESCRIPTOR = descriptor.FileDescriptor(
name='google/protobuf/descriptor.proto',
package='google.protobuf',
serialized_pb='\n google/protobuf/descriptor.proto\x12\x0fgoogle.protobuf\"G\n\x11\x46ileDescriptorSet\x12\x32\n\x04\x66ile\x18\x01 \x03(\x0b\x32$.google.protobuf.FileDescriptorProto\"\xdc\x02\n\x13\x46ileDescriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07package\x18\x02 \x01(\t\x12\x12\n\ndependency\x18\x03 \x03(\t\x12\x36\n\x0cmessage_type\x18\x04 \x03(\x0b\x32 .google.protobuf.DescriptorProto\x12\x37\n\tenum_type\x18\x05 \x03(\x0b\x32$.google.protobuf.EnumDescriptorProto\x12\x38\n\x07service\x18\x06 \x03(\x0b\x32\'.google.protobuf.ServiceDescriptorProto\x12\x38\n\textension\x18\x07 \x03(\x0b\x32%.google.protobuf.FieldDescriptorProto\x12-\n\x07options\x18\x08 \x01(\x0b\x32\x1c.google.protobuf.FileOptions\"\xa9\x03\n\x0f\x44\x65scriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x34\n\x05\x66ield\x18\x02 \x03(\x0b\x32%.google.protobuf.FieldDescriptorProto\x12\x38\n\textension\x18\x06 \x03(\x0b\x32%.google.protobuf.FieldDescriptorProto\x12\x35\n\x0bnested_type\x18\x03 \x03(\x0b\x32 .google.protobuf.DescriptorProto\x12\x37\n\tenum_type\x18\x04 \x03(\x0b\x32$.google.protobuf.EnumDescriptorProto\x12H\n\x0f\x65xtension_range\x18\x05 \x03(\x0b\x32/.google.protobuf.DescriptorProto.ExtensionRange\x12\x30\n\x07options\x18\x07 \x01(\x0b\x32\x1f.google.protobuf.MessageOptions\x1a,\n\x0e\x45xtensionRange\x12\r\n\x05start\x18\x01 \x01(\x05\x12\x0b\n\x03\x65nd\x18\x02 \x01(\x05\"\x94\x05\n\x14\x46ieldDescriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0e\n\x06number\x18\x03 \x01(\x05\x12:\n\x05label\x18\x04 \x01(\x0e\x32+.google.protobuf.FieldDescriptorProto.Label\x12\x38\n\x04type\x18\x05 \x01(\x0e\x32*.google.protobuf.FieldDescriptorProto.Type\x12\x11\n\ttype_name\x18\x06 \x01(\t\x12\x10\n\x08\x65xtendee\x18\x02 \x01(\t\x12\x15\n\rdefault_value\x18\x07 \x01(\t\x12.\n\x07options\x18\x08 \x01(\x0b\x32\x1d.google.protobuf.FieldOptions\"\xb6\x02\n\x04Type\x12\x0f\n\x0bTYPE_DOUBLE\x10\x01\x12\x0e\n\nTYPE_FLOAT\x10\x02\x12\x0e\n\nTYPE_INT64\x10\x03\x12\x0f\n\x0bTYPE_UINT64\x10\x04\x12\x0e\n\nTYPE_INT32\x10\x05\x12\x10\n\x0cTYPE_FIXED64\x10\x06\x12\x10\n\x0cTYPE_FIXED32\x10\x07\x12\r\n\tTYPE_BOOL\x10\x08\x12\x0f\n\x0bTYPE_STRING\x10\t\x12\x0e\n\nTYPE_GROUP\x10\n\x12\x10\n\x0cTYPE_MESSAGE\x10\x0b\x12\x0e\n\nTYPE_BYTES\x10\x0c\x12\x0f\n\x0bTYPE_UINT32\x10\r\x12\r\n\tTYPE_ENUM\x10\x0e\x12\x11\n\rTYPE_SFIXED32\x10\x0f\x12\x11\n\rTYPE_SFIXED64\x10\x10\x12\x0f\n\x0bTYPE_SINT32\x10\x11\x12\x0f\n\x0bTYPE_SINT64\x10\x12\"C\n\x05Label\x12\x12\n\x0eLABEL_OPTIONAL\x10\x01\x12\x12\n\x0eLABEL_REQUIRED\x10\x02\x12\x12\n\x0eLABEL_REPEATED\x10\x03\"\x8c\x01\n\x13\x45numDescriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x38\n\x05value\x18\x02 \x03(\x0b\x32).google.protobuf.EnumValueDescriptorProto\x12-\n\x07options\x18\x03 \x01(\x0b\x32\x1c.google.protobuf.EnumOptions\"l\n\x18\x45numValueDescriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0e\n\x06number\x18\x02 \x01(\x05\x12\x32\n\x07options\x18\x03 \x01(\x0b\x32!.google.protobuf.EnumValueOptions\"\x90\x01\n\x16ServiceDescriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x36\n\x06method\x18\x02 \x03(\x0b\x32&.google.protobuf.MethodDescriptorProto\x12\x30\n\x07options\x18\x03 \x01(\x0b\x32\x1f.google.protobuf.ServiceOptions\"\x7f\n\x15MethodDescriptorProto\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x12\n\ninput_type\x18\x02 \x01(\t\x12\x13\n\x0boutput_type\x18\x03 \x01(\t\x12/\n\x07options\x18\x04 \x01(\x0b\x32\x1e.google.protobuf.MethodOptions\"\xa4\x03\n\x0b\x46ileOptions\x12\x14\n\x0cjava_package\x18\x01 \x01(\t\x12\x1c\n\x14java_outer_classname\x18\x08 \x01(\t\x12\"\n\x13java_multiple_files\x18\n \x01(\x08:\x05\x66\x61lse\x12\x46\n\x0coptimize_for\x18\t \x01(\x0e\x32).google.protobuf.FileOptions.OptimizeMode:\x05SPEED\x12!\n\x13\x63\x63_generic_services\x18\x10 \x01(\x08:\x04true\x12#\n\x15java_generic_services\x18\x11 \x01(\x08:\x04true\x12!\n\x13py_generic_services\x18\x12 \x01(\x08:\x04true\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption\":\n\x0cOptimizeMode\x12\t\n\x05SPEED\x10\x01\x12\r\n\tCODE_SIZE\x10\x02\x12\x10\n\x0cLITE_RUNTIME\x10\x03*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02\"\xb8\x01\n\x0eMessageOptions\x12&\n\x17message_set_wire_format\x18\x01 \x01(\x08:\x05\x66\x61lse\x12.\n\x1fno_standard_descriptor_accessor\x18\x02 \x01(\x08:\x05\x66\x61lse\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02\"\x94\x02\n\x0c\x46ieldOptions\x12:\n\x05\x63type\x18\x01 \x01(\x0e\x32#.google.protobuf.FieldOptions.CType:\x06STRING\x12\x0e\n\x06packed\x18\x02 \x01(\x08\x12\x19\n\ndeprecated\x18\x03 \x01(\x08:\x05\x66\x61lse\x12\x1c\n\x14\x65xperimental_map_key\x18\t \x01(\t\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption\"/\n\x05\x43Type\x12\n\n\x06STRING\x10\x00\x12\x08\n\x04\x43ORD\x10\x01\x12\x10\n\x0cSTRING_PIECE\x10\x02*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02\"]\n\x0b\x45numOptions\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02\"b\n\x10\x45numValueOptions\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02\"`\n\x0eServiceOptions\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02\"_\n\rMethodOptions\x12\x43\n\x14uninterpreted_option\x18\xe7\x07 \x03(\x0b\x32$.google.protobuf.UninterpretedOption*\t\x08\xe8\x07\x10\x80\x80\x80\x80\x02\"\x85\x02\n\x13UninterpretedOption\x12;\n\x04name\x18\x02 \x03(\x0b\x32-.google.protobuf.UninterpretedOption.NamePart\x12\x18\n\x10identifier_value\x18\x03 \x01(\t\x12\x1a\n\x12positive_int_value\x18\x04 \x01(\x04\x12\x1a\n\x12negative_int_value\x18\x05 \x01(\x03\x12\x14\n\x0c\x64ouble_value\x18\x06 \x01(\x01\x12\x14\n\x0cstring_value\x18\x07 \x01(\x0c\x1a\x33\n\x08NamePart\x12\x11\n\tname_part\x18\x01 \x02(\t\x12\x14\n\x0cis_extension\x18\x02 \x02(\x08\x42)\n\x13\x63om.google.protobufB\x10\x44\x65scriptorProtosH\x01')
_FIELDDESCRIPTORPROTO_TYPE = descriptor.EnumDescriptor(
name='Type',
full_name='google.protobuf.FieldDescriptorProto.Type',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='TYPE_DOUBLE', index=0, number=1,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='TYPE_FLOAT', index=1, number=2,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='TYPE_INT64', index=2, number=3,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='TYPE_UINT64', index=3, number=4,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='TYPE_INT32', index=4, number=5,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='TYPE_FIXED64', index=5, number=6,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='TYPE_FIXED32', index=6, number=7,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='TYPE_BOOL', index=7, number=8,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='TYPE_STRING', index=8, number=9,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='TYPE_GROUP', index=9, number=10,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='TYPE_MESSAGE', index=10, number=11,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='TYPE_BYTES', index=11, number=12,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='TYPE_UINT32', index=12, number=13,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='TYPE_ENUM', index=13, number=14,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='TYPE_SFIXED32', index=14, number=15,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='TYPE_SFIXED64', index=15, number=16,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='TYPE_SINT32', index=16, number=17,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='TYPE_SINT64', index=17, number=18,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=1187,
serialized_end=1497,
)
_FIELDDESCRIPTORPROTO_LABEL = descriptor.EnumDescriptor(
name='Label',
full_name='google.protobuf.FieldDescriptorProto.Label',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='LABEL_OPTIONAL', index=0, number=1,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='LABEL_REQUIRED', index=1, number=2,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='LABEL_REPEATED', index=2, number=3,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=1499,
serialized_end=1566,
)
_FILEOPTIONS_OPTIMIZEMODE = descriptor.EnumDescriptor(
name='OptimizeMode',
full_name='google.protobuf.FileOptions.OptimizeMode',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='SPEED', index=0, number=1,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='CODE_SIZE', index=1, number=2,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='LITE_RUNTIME', index=2, number=3,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=2449,
serialized_end=2507,
)
_FIELDOPTIONS_CTYPE = descriptor.EnumDescriptor(
name='CType',
full_name='google.protobuf.FieldOptions.CType',
filename=None,
file=DESCRIPTOR,
values=[
descriptor.EnumValueDescriptor(
name='STRING', index=0, number=0,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='CORD', index=1, number=1,
options=None,
type=None),
descriptor.EnumValueDescriptor(
name='STRING_PIECE', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=2926,
serialized_end=2973,
)
_FILEDESCRIPTORSET = descriptor.Descriptor(
name='FileDescriptorSet',
full_name='google.protobuf.FileDescriptorSet',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='file', full_name='google.protobuf.FileDescriptorSet.file', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=53,
serialized_end=124,
)
_FILEDESCRIPTORPROTO = descriptor.Descriptor(
name='FileDescriptorProto',
full_name='google.protobuf.FileDescriptorProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='name', full_name='google.protobuf.FileDescriptorProto.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='package', full_name='google.protobuf.FileDescriptorProto.package', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='dependency', full_name='google.protobuf.FileDescriptorProto.dependency', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='message_type', full_name='google.protobuf.FileDescriptorProto.message_type', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='enum_type', full_name='google.protobuf.FileDescriptorProto.enum_type', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='service', full_name='google.protobuf.FileDescriptorProto.service', index=5,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='extension', full_name='google.protobuf.FileDescriptorProto.extension', index=6,
number=7, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='options', full_name='google.protobuf.FileDescriptorProto.options', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=127,
serialized_end=475,
)
_DESCRIPTORPROTO_EXTENSIONRANGE = descriptor.Descriptor(
name='ExtensionRange',
full_name='google.protobuf.DescriptorProto.ExtensionRange',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='start', full_name='google.protobuf.DescriptorProto.ExtensionRange.start', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='end', full_name='google.protobuf.DescriptorProto.ExtensionRange.end', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=859,
serialized_end=903,
)
_DESCRIPTORPROTO = descriptor.Descriptor(
name='DescriptorProto',
full_name='google.protobuf.DescriptorProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='name', full_name='google.protobuf.DescriptorProto.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='field', full_name='google.protobuf.DescriptorProto.field', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='extension', full_name='google.protobuf.DescriptorProto.extension', index=2,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='nested_type', full_name='google.protobuf.DescriptorProto.nested_type', index=3,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='enum_type', full_name='google.protobuf.DescriptorProto.enum_type', index=4,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='extension_range', full_name='google.protobuf.DescriptorProto.extension_range', index=5,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='options', full_name='google.protobuf.DescriptorProto.options', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_DESCRIPTORPROTO_EXTENSIONRANGE, ],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=478,
serialized_end=903,
)
_FIELDDESCRIPTORPROTO = descriptor.Descriptor(
name='FieldDescriptorProto',
full_name='google.protobuf.FieldDescriptorProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='name', full_name='google.protobuf.FieldDescriptorProto.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='number', full_name='google.protobuf.FieldDescriptorProto.number', index=1,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='label', full_name='google.protobuf.FieldDescriptorProto.label', index=2,
number=4, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='type', full_name='google.protobuf.FieldDescriptorProto.type', index=3,
number=5, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='type_name', full_name='google.protobuf.FieldDescriptorProto.type_name', index=4,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='extendee', full_name='google.protobuf.FieldDescriptorProto.extendee', index=5,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='default_value', full_name='google.protobuf.FieldDescriptorProto.default_value', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='options', full_name='google.protobuf.FieldDescriptorProto.options', index=7,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_FIELDDESCRIPTORPROTO_TYPE,
_FIELDDESCRIPTORPROTO_LABEL,
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=906,
serialized_end=1566,
)
_ENUMDESCRIPTORPROTO = descriptor.Descriptor(
name='EnumDescriptorProto',
full_name='google.protobuf.EnumDescriptorProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='name', full_name='google.protobuf.EnumDescriptorProto.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='value', full_name='google.protobuf.EnumDescriptorProto.value', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='options', full_name='google.protobuf.EnumDescriptorProto.options', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=1569,
serialized_end=1709,
)
_ENUMVALUEDESCRIPTORPROTO = descriptor.Descriptor(
name='EnumValueDescriptorProto',
full_name='google.protobuf.EnumValueDescriptorProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='name', full_name='google.protobuf.EnumValueDescriptorProto.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='number', full_name='google.protobuf.EnumValueDescriptorProto.number', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='options', full_name='google.protobuf.EnumValueDescriptorProto.options', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=1711,
serialized_end=1819,
)
_SERVICEDESCRIPTORPROTO = descriptor.Descriptor(
name='ServiceDescriptorProto',
full_name='google.protobuf.ServiceDescriptorProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='name', full_name='google.protobuf.ServiceDescriptorProto.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='method', full_name='google.protobuf.ServiceDescriptorProto.method', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='options', full_name='google.protobuf.ServiceDescriptorProto.options', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=1822,
serialized_end=1966,
)
_METHODDESCRIPTORPROTO = descriptor.Descriptor(
name='MethodDescriptorProto',
full_name='google.protobuf.MethodDescriptorProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='name', full_name='google.protobuf.MethodDescriptorProto.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='input_type', full_name='google.protobuf.MethodDescriptorProto.input_type', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='output_type', full_name='google.protobuf.MethodDescriptorProto.output_type', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='options', full_name='google.protobuf.MethodDescriptorProto.options', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=1968,
serialized_end=2095,
)
_FILEOPTIONS = descriptor.Descriptor(
name='FileOptions',
full_name='google.protobuf.FileOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='java_package', full_name='google.protobuf.FileOptions.java_package', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='java_outer_classname', full_name='google.protobuf.FileOptions.java_outer_classname', index=1,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='java_multiple_files', full_name='google.protobuf.FileOptions.java_multiple_files', index=2,
number=10, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='optimize_for', full_name='google.protobuf.FileOptions.optimize_for', index=3,
number=9, type=14, cpp_type=8, label=1,
has_default_value=True, default_value=1,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='cc_generic_services', full_name='google.protobuf.FileOptions.cc_generic_services', index=4,
number=16, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='java_generic_services', full_name='google.protobuf.FileOptions.java_generic_services', index=5,
number=17, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='py_generic_services', full_name='google.protobuf.FileOptions.py_generic_services', index=6,
number=18, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=True,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='uninterpreted_option', full_name='google.protobuf.FileOptions.uninterpreted_option', index=7,
number=999, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_FILEOPTIONS_OPTIMIZEMODE,
],
options=None,
is_extendable=True,
extension_ranges=[(1000, 536870912), ],
serialized_start=2098,
serialized_end=2518,
)
_MESSAGEOPTIONS = descriptor.Descriptor(
name='MessageOptions',
full_name='google.protobuf.MessageOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='message_set_wire_format', full_name='google.protobuf.MessageOptions.message_set_wire_format', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='no_standard_descriptor_accessor', full_name='google.protobuf.MessageOptions.no_standard_descriptor_accessor', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='uninterpreted_option', full_name='google.protobuf.MessageOptions.uninterpreted_option', index=2,
number=999, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=True,
extension_ranges=[(1000, 536870912), ],
serialized_start=2521,
serialized_end=2705,
)
_FIELDOPTIONS = descriptor.Descriptor(
name='FieldOptions',
full_name='google.protobuf.FieldOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='ctype', full_name='google.protobuf.FieldOptions.ctype', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=True, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='packed', full_name='google.protobuf.FieldOptions.packed', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='deprecated', full_name='google.protobuf.FieldOptions.deprecated', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=True, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='experimental_map_key', full_name='google.protobuf.FieldOptions.experimental_map_key', index=3,
number=9, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='uninterpreted_option', full_name='google.protobuf.FieldOptions.uninterpreted_option', index=4,
number=999, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_FIELDOPTIONS_CTYPE,
],
options=None,
is_extendable=True,
extension_ranges=[(1000, 536870912), ],
serialized_start=2708,
serialized_end=2984,
)
_ENUMOPTIONS = descriptor.Descriptor(
name='EnumOptions',
full_name='google.protobuf.EnumOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='uninterpreted_option', full_name='google.protobuf.EnumOptions.uninterpreted_option', index=0,
number=999, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=True,
extension_ranges=[(1000, 536870912), ],
serialized_start=2986,
serialized_end=3079,
)
_ENUMVALUEOPTIONS = descriptor.Descriptor(
name='EnumValueOptions',
full_name='google.protobuf.EnumValueOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='uninterpreted_option', full_name='google.protobuf.EnumValueOptions.uninterpreted_option', index=0,
number=999, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=True,
extension_ranges=[(1000, 536870912), ],
serialized_start=3081,
serialized_end=3179,
)
_SERVICEOPTIONS = descriptor.Descriptor(
name='ServiceOptions',
full_name='google.protobuf.ServiceOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='uninterpreted_option', full_name='google.protobuf.ServiceOptions.uninterpreted_option', index=0,
number=999, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=True,
extension_ranges=[(1000, 536870912), ],
serialized_start=3181,
serialized_end=3277,
)
_METHODOPTIONS = descriptor.Descriptor(
name='MethodOptions',
full_name='google.protobuf.MethodOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='uninterpreted_option', full_name='google.protobuf.MethodOptions.uninterpreted_option', index=0,
number=999, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=True,
extension_ranges=[(1000, 536870912), ],
serialized_start=3279,
serialized_end=3374,
)
_UNINTERPRETEDOPTION_NAMEPART = descriptor.Descriptor(
name='NamePart',
full_name='google.protobuf.UninterpretedOption.NamePart',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='name_part', full_name='google.protobuf.UninterpretedOption.NamePart.name_part', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='is_extension', full_name='google.protobuf.UninterpretedOption.NamePart.is_extension', index=1,
number=2, type=8, cpp_type=7, label=2,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=3587,
serialized_end=3638,
)
_UNINTERPRETEDOPTION = descriptor.Descriptor(
name='UninterpretedOption',
full_name='google.protobuf.UninterpretedOption',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='name', full_name='google.protobuf.UninterpretedOption.name', index=0,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='identifier_value', full_name='google.protobuf.UninterpretedOption.identifier_value', index=1,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='positive_int_value', full_name='google.protobuf.UninterpretedOption.positive_int_value', index=2,
number=4, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='negative_int_value', full_name='google.protobuf.UninterpretedOption.negative_int_value', index=3,
number=5, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='double_value', full_name='google.protobuf.UninterpretedOption.double_value', index=4,
number=6, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
descriptor.FieldDescriptor(
name='string_value', full_name='google.protobuf.UninterpretedOption.string_value', index=5,
number=7, type=12, cpp_type=9, label=1,
has_default_value=False, default_value="",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_UNINTERPRETEDOPTION_NAMEPART, ],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=3377,
serialized_end=3638,
)
_FILEDESCRIPTORSET.fields_by_name['file'].message_type = _FILEDESCRIPTORPROTO
_FILEDESCRIPTORPROTO.fields_by_name['message_type'].message_type = _DESCRIPTORPROTO
_FILEDESCRIPTORPROTO.fields_by_name['enum_type'].message_type = _ENUMDESCRIPTORPROTO
_FILEDESCRIPTORPROTO.fields_by_name['service'].message_type = _SERVICEDESCRIPTORPROTO
_FILEDESCRIPTORPROTO.fields_by_name['extension'].message_type = _FIELDDESCRIPTORPROTO
_FILEDESCRIPTORPROTO.fields_by_name['options'].message_type = _FILEOPTIONS
_DESCRIPTORPROTO_EXTENSIONRANGE.containing_type = _DESCRIPTORPROTO;
_DESCRIPTORPROTO.fields_by_name['field'].message_type = _FIELDDESCRIPTORPROTO
_DESCRIPTORPROTO.fields_by_name['extension'].message_type = _FIELDDESCRIPTORPROTO
_DESCRIPTORPROTO.fields_by_name['nested_type'].message_type = _DESCRIPTORPROTO
_DESCRIPTORPROTO.fields_by_name['enum_type'].message_type = _ENUMDESCRIPTORPROTO
_DESCRIPTORPROTO.fields_by_name['extension_range'].message_type = _DESCRIPTORPROTO_EXTENSIONRANGE
_DESCRIPTORPROTO.fields_by_name['options'].message_type = _MESSAGEOPTIONS
_FIELDDESCRIPTORPROTO.fields_by_name['label'].enum_type = _FIELDDESCRIPTORPROTO_LABEL
_FIELDDESCRIPTORPROTO.fields_by_name['type'].enum_type = _FIELDDESCRIPTORPROTO_TYPE
_FIELDDESCRIPTORPROTO.fields_by_name['options'].message_type = _FIELDOPTIONS
_FIELDDESCRIPTORPROTO_TYPE.containing_type = _FIELDDESCRIPTORPROTO;
_FIELDDESCRIPTORPROTO_LABEL.containing_type = _FIELDDESCRIPTORPROTO;
_ENUMDESCRIPTORPROTO.fields_by_name['value'].message_type = _ENUMVALUEDESCRIPTORPROTO
_ENUMDESCRIPTORPROTO.fields_by_name['options'].message_type = _ENUMOPTIONS
_ENUMVALUEDESCRIPTORPROTO.fields_by_name['options'].message_type = _ENUMVALUEOPTIONS
_SERVICEDESCRIPTORPROTO.fields_by_name['method'].message_type = _METHODDESCRIPTORPROTO
_SERVICEDESCRIPTORPROTO.fields_by_name['options'].message_type = _SERVICEOPTIONS
_METHODDESCRIPTORPROTO.fields_by_name['options'].message_type = _METHODOPTIONS
_FILEOPTIONS.fields_by_name['optimize_for'].enum_type = _FILEOPTIONS_OPTIMIZEMODE
_FILEOPTIONS.fields_by_name['uninterpreted_option'].message_type = _UNINTERPRETEDOPTION
_FILEOPTIONS_OPTIMIZEMODE.containing_type = _FILEOPTIONS;
_MESSAGEOPTIONS.fields_by_name['uninterpreted_option'].message_type = _UNINTERPRETEDOPTION
_FIELDOPTIONS.fields_by_name['ctype'].enum_type = _FIELDOPTIONS_CTYPE
_FIELDOPTIONS.fields_by_name['uninterpreted_option'].message_type = _UNINTERPRETEDOPTION
_FIELDOPTIONS_CTYPE.containing_type = _FIELDOPTIONS;
_ENUMOPTIONS.fields_by_name['uninterpreted_option'].message_type = _UNINTERPRETEDOPTION
_ENUMVALUEOPTIONS.fields_by_name['uninterpreted_option'].message_type = _UNINTERPRETEDOPTION
_SERVICEOPTIONS.fields_by_name['uninterpreted_option'].message_type = _UNINTERPRETEDOPTION
_METHODOPTIONS.fields_by_name['uninterpreted_option'].message_type = _UNINTERPRETEDOPTION
_UNINTERPRETEDOPTION_NAMEPART.containing_type = _UNINTERPRETEDOPTION;
_UNINTERPRETEDOPTION.fields_by_name['name'].message_type = _UNINTERPRETEDOPTION_NAMEPART
class FileDescriptorSet(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _FILEDESCRIPTORSET
# @@protoc_insertion_point(class_scope:google.protobuf.FileDescriptorSet)
class FileDescriptorProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _FILEDESCRIPTORPROTO
# @@protoc_insertion_point(class_scope:google.protobuf.FileDescriptorProto)
class DescriptorProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
class ExtensionRange(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _DESCRIPTORPROTO_EXTENSIONRANGE
# @@protoc_insertion_point(class_scope:google.protobuf.DescriptorProto.ExtensionRange)
DESCRIPTOR = _DESCRIPTORPROTO
# @@protoc_insertion_point(class_scope:google.protobuf.DescriptorProto)
class FieldDescriptorProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _FIELDDESCRIPTORPROTO
# @@protoc_insertion_point(class_scope:google.protobuf.FieldDescriptorProto)
class EnumDescriptorProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _ENUMDESCRIPTORPROTO
# @@protoc_insertion_point(class_scope:google.protobuf.EnumDescriptorProto)
class EnumValueDescriptorProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _ENUMVALUEDESCRIPTORPROTO
# @@protoc_insertion_point(class_scope:google.protobuf.EnumValueDescriptorProto)
class ServiceDescriptorProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _SERVICEDESCRIPTORPROTO
# @@protoc_insertion_point(class_scope:google.protobuf.ServiceDescriptorProto)
class MethodDescriptorProto(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _METHODDESCRIPTORPROTO
# @@protoc_insertion_point(class_scope:google.protobuf.MethodDescriptorProto)
class FileOptions(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _FILEOPTIONS
# @@protoc_insertion_point(class_scope:google.protobuf.FileOptions)
class MessageOptions(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _MESSAGEOPTIONS
# @@protoc_insertion_point(class_scope:google.protobuf.MessageOptions)
class FieldOptions(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _FIELDOPTIONS
# @@protoc_insertion_point(class_scope:google.protobuf.FieldOptions)
class EnumOptions(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _ENUMOPTIONS
# @@protoc_insertion_point(class_scope:google.protobuf.EnumOptions)
class EnumValueOptions(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _ENUMVALUEOPTIONS
# @@protoc_insertion_point(class_scope:google.protobuf.EnumValueOptions)
class ServiceOptions(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _SERVICEOPTIONS
# @@protoc_insertion_point(class_scope:google.protobuf.ServiceOptions)
class MethodOptions(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _METHODOPTIONS
# @@protoc_insertion_point(class_scope:google.protobuf.MethodOptions)
class UninterpretedOption(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
class NamePart(message.Message):
__metaclass__ = reflection.GeneratedProtocolMessageType
DESCRIPTOR = _UNINTERPRETEDOPTION_NAMEPART
# @@protoc_insertion_point(class_scope:google.protobuf.UninterpretedOption.NamePart)
DESCRIPTOR = _UNINTERPRETEDOPTION
# @@protoc_insertion_point(class_scope:google.protobuf.UninterpretedOption)
# @@protoc_insertion_point(module_scope)
| bsd-3-clause |
ppanczyk/ansible | lib/ansible/modules/files/unarchive.py | 19 | 34773 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
# Copyright: (c) 2013, Dylan Martin <dmartin@seattlecentral.edu>
# Copyright: (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com>
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'core'}
DOCUMENTATION = r'''
---
module: unarchive
version_added: '1.4'
short_description: Unpacks an archive after (optionally) copying it from the local machine.
extends_documentation_fragment: [ decrypt, files ]
description:
- The C(unarchive) module unpacks an archive.
- By default, it will copy the source file from the local system to the target before unpacking.
- Set C(remote_src=yes) to unpack an archive which already exists on the target.
- For Windows targets, use the M(win_unzip) module instead.
options:
src:
description:
- If C(remote_src=no) (default), local path to archive file to copy to the target server; can be absolute or relative. If C(remote_src=yes), path on the
target server to existing archive file to unpack.
- If C(remote_src=yes) and C(src) contains C(://), the remote machine will download the file from the URL first. (version_added 2.0). This is only for
simple cases, for full download support use the M(get_url) module.
required: true
dest:
description:
- Remote absolute path where the archive should be unpacked.
required: true
copy:
description:
- If true, the file is copied from local 'master' to the target machine, otherwise, the plugin will look for src archive at the target machine.
- This option has been deprecated in favor of C(remote_src).
- This option is mutually exclusive with C(remote_src).
type: 'bool'
default: 'yes'
creates:
description:
- A filename, when it already exists, this step will B(not) be run.
version_added: "1.6"
list_files:
description:
- If set to True, return the list of files that are contained in the tarball.
type: 'bool'
default: 'no'
version_added: "2.0"
exclude:
description:
- List the directory and file entries that you would like to exclude from the unarchive action.
version_added: "2.1"
keep_newer:
description:
- Do not replace existing files that are newer than files from the archive.
type: 'bool'
default: 'no'
version_added: "2.1"
extra_opts:
description:
- Specify additional options by passing in an array.
default: ""
version_added: "2.1"
remote_src:
description:
- Set to C(yes) to indicate the archived file is already on the remote system and not local to the Ansible controller.
- This option is mutually exclusive with C(copy).
type: 'bool'
default: 'no'
version_added: "2.2"
validate_certs:
description:
- This only applies if using a https URL as the source of the file.
- This should only set to C(no) used on personally controlled sites using self-signed certificate.
- Prior to 2.2 the code worked as if this was set to C(yes).
type: 'bool'
default: 'yes'
version_added: "2.2"
author: Michael DeHaan
todo:
- Re-implement tar support using native tarfile module.
- Re-implement zip support using native zipfile module.
notes:
- Requires C(gtar)/C(unzip) command on target host.
- Can handle I(.zip) files using C(unzip) as well as I(.tar), I(.tar.gz), I(.tar.bz2) and I(.tar.xz) files using C(gtar).
- Uses gtar's C(--diff) arg to calculate if changed or not. If this C(arg) is not
supported, it will always unpack the archive.
- Existing files/directories in the destination which are not in the archive
are not touched. This is the same behavior as a normal archive extraction.
- Existing files/directories in the destination which are not in the archive
are ignored for purposes of deciding if the archive should be unpacked or not.
- For Windows targets, use the M(win_unzip) module instead.
'''
EXAMPLES = r'''
- name: Extract foo.tgz into /var/lib/foo
unarchive:
src: foo.tgz
dest: /var/lib/foo
- name: Unarchive a file that is already on the remote machine
unarchive:
src: /tmp/foo.zip
dest: /usr/local/bin
remote_src: yes
- name: Unarchive a file that needs to be downloaded (added in 2.0)
unarchive:
src: https://example.com/example.zip
dest: /usr/local/bin
remote_src: yes
'''
import binascii
import codecs
import datetime
import grp
import os
import platform
import pwd
import re
import stat
import time
import traceback
from zipfile import ZipFile, BadZipfile
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import fetch_url
from ansible.module_utils._text import to_bytes, to_native, to_text
try: # python 3.3+
from shlex import quote
except ImportError: # older python
from pipes import quote
# String from tar that shows the tar contents are different from the
# filesystem
OWNER_DIFF_RE = re.compile(r': Uid differs$')
GROUP_DIFF_RE = re.compile(r': Gid differs$')
MODE_DIFF_RE = re.compile(r': Mode differs$')
MOD_TIME_DIFF_RE = re.compile(r': Mod time differs$')
# NEWER_DIFF_RE = re.compile(r' is newer or same age.$')
EMPTY_FILE_RE = re.compile(r': : Warning: Cannot stat: No such file or directory$')
MISSING_FILE_RE = re.compile(r': Warning: Cannot stat: No such file or directory$')
ZIP_FILE_MODE_RE = re.compile(r'([r-][w-][SsTtx-]){3}')
# When downloading an archive, how much of the archive to download before
# saving to a tempfile (64k)
BUFSIZE = 65536
def crc32(path):
''' Return a CRC32 checksum of a file '''
return binascii.crc32(open(path, 'rb').read()) & 0xffffffff
def shell_escape(string):
''' Quote meta-characters in the args for the unix shell '''
return re.sub(r'([^A-Za-z0-9_])', r'\\\1', string)
class UnarchiveError(Exception):
pass
class ZipArchive(object):
def __init__(self, src, dest, file_args, module):
self.src = src
self.dest = dest
self.file_args = file_args
self.opts = module.params['extra_opts']
self.module = module
self.excludes = module.params['exclude']
self.includes = []
self.cmd_path = self.module.get_bin_path('unzip')
self.zipinfocmd_path = self.module.get_bin_path('zipinfo')
self._files_in_archive = []
self._infodict = dict()
def _permstr_to_octal(self, modestr, umask):
''' Convert a Unix permission string (rw-r--r--) into a mode (0644) '''
revstr = modestr[::-1]
mode = 0
for j in range(0, 3):
for i in range(0, 3):
if revstr[i + 3 * j] in ['r', 'w', 'x', 's', 't']:
mode += 2 ** (i + 3 * j)
# The unzip utility does not support setting the stST bits
# if revstr[i + 3 * j] in ['s', 't', 'S', 'T' ]:
# mode += 2 ** (9 + j)
return (mode & ~umask)
def _legacy_file_list(self, force_refresh=False):
unzip_bin = self.module.get_bin_path('unzip')
if not unzip_bin:
raise UnarchiveError('Python Zipfile cannot read %s and unzip not found' % self.src)
rc, out, err = self.module.run_command([unzip_bin, '-v', self.src])
if rc:
raise UnarchiveError('Neither python zipfile nor unzip can read %s' % self.src)
for line in out.splitlines()[3:-2]:
fields = line.split(None, 7)
self._files_in_archive.append(fields[7])
self._infodict[fields[7]] = int(fields[6])
def _crc32(self, path):
if self._infodict:
return self._infodict[path]
try:
archive = ZipFile(self.src)
except BadZipfile as e:
if e.args[0].lower().startswith('bad magic number'):
# Python2.4 can't handle zipfiles with > 64K files. Try using
# /usr/bin/unzip instead
self._legacy_file_list()
else:
raise
else:
try:
for item in archive.infolist():
self._infodict[item.filename] = int(item.CRC)
except:
archive.close()
raise UnarchiveError('Unable to list files in the archive')
return self._infodict[path]
@property
def files_in_archive(self, force_refresh=False):
if self._files_in_archive and not force_refresh:
return self._files_in_archive
self._files_in_archive = []
try:
archive = ZipFile(self.src)
except BadZipfile as e:
if e.args[0].lower().startswith('bad magic number'):
# Python2.4 can't handle zipfiles with > 64K files. Try using
# /usr/bin/unzip instead
self._legacy_file_list(force_refresh)
else:
raise
else:
try:
for member in archive.namelist():
if member not in self.excludes:
self._files_in_archive.append(to_native(member))
except:
archive.close()
raise UnarchiveError('Unable to list files in the archive')
archive.close()
return self._files_in_archive
def is_unarchived(self):
# BSD unzip doesn't support zipinfo listings with timestamp.
cmd = [self.zipinfocmd_path, '-T', '-s', self.src]
if self.excludes:
cmd.extend(['-x', ] + self.excludes)
rc, out, err = self.module.run_command(cmd)
old_out = out
diff = ''
out = ''
if rc == 0:
unarchived = True
else:
unarchived = False
# Get some information related to user/group ownership
umask = os.umask(0)
os.umask(umask)
systemtype = platform.system()
# Get current user and group information
groups = os.getgroups()
run_uid = os.getuid()
run_gid = os.getgid()
try:
run_owner = pwd.getpwuid(run_uid).pw_name
except:
run_owner = run_uid
try:
run_group = grp.getgrgid(run_gid).gr_name
except:
run_group = run_gid
# Get future user ownership
fut_owner = fut_uid = None
if self.file_args['owner']:
try:
tpw = pwd.getpwname(self.file_args['owner'])
except:
try:
tpw = pwd.getpwuid(self.file_args['owner'])
except:
tpw = pwd.getpwuid(run_uid)
fut_owner = tpw.pw_name
fut_uid = tpw.pw_uid
else:
try:
fut_owner = run_owner
except:
pass
fut_uid = run_uid
# Get future group ownership
fut_group = fut_gid = None
if self.file_args['group']:
try:
tgr = grp.getgrnam(self.file_args['group'])
except:
try:
tgr = grp.getgrgid(self.file_args['group'])
except:
tgr = grp.getgrgid(run_gid)
fut_group = tgr.gr_name
fut_gid = tgr.gr_gid
else:
try:
fut_group = run_group
except:
pass
fut_gid = run_gid
for line in old_out.splitlines():
change = False
pcs = line.split(None, 7)
if len(pcs) != 8:
# Too few fields... probably a piece of the header or footer
continue
# Check first and seventh field in order to skip header/footer
if len(pcs[0]) != 7 and len(pcs[0]) != 10:
continue
if len(pcs[6]) != 15:
continue
# Possible entries:
# -rw-rws--- 1.9 unx 2802 t- defX 11-Aug-91 13:48 perms.2660
# -rw-a-- 1.0 hpf 5358 Tl i4:3 4-Dec-91 11:33 longfilename.hpfs
# -r--ahs 1.1 fat 4096 b- i4:2 14-Jul-91 12:58 EA DATA. SF
# --w------- 1.0 mac 17357 bx i8:2 4-May-92 04:02 unzip.macr
if pcs[0][0] not in 'dl-?' or not frozenset(pcs[0][1:]).issubset('rwxstah-'):
continue
ztype = pcs[0][0]
permstr = pcs[0][1:]
version = pcs[1]
ostype = pcs[2]
size = int(pcs[3])
path = to_text(pcs[7], errors='surrogate_or_strict')
# Skip excluded files
if path in self.excludes:
out += 'Path %s is excluded on request\n' % path
continue
# Itemized change requires L for symlink
if path[-1] == '/':
if ztype != 'd':
err += 'Path %s incorrectly tagged as "%s", but is a directory.\n' % (path, ztype)
ftype = 'd'
elif ztype == 'l':
ftype = 'L'
elif ztype == '-':
ftype = 'f'
elif ztype == '?':
ftype = 'f'
# Some files may be storing FAT permissions, not Unix permissions
# For FAT permissions, we will use a base permissions set of 777 if the item is a directory or has the execute bit set. Otherwise, 666.
# This permission will then be modified by the system UMask.
# BSD always applies the Umask, even to Unix permissions.
# For Unix style permissions on Linux or Mac, we want to use them directly.
# So we set the UMask for this file to zero. That permission set will then be unchanged when calling _permstr_to_octal
if len(permstr) == 6:
if path[-1] == '/':
permstr = 'rwxrwxrwx'
elif permstr == 'rwx---':
permstr = 'rwxrwxrwx'
else:
permstr = 'rw-rw-rw-'
file_umask = umask
elif 'bsd' in systemtype.lower():
file_umask = umask
else:
file_umask = 0
# Test string conformity
if len(permstr) != 9 or not ZIP_FILE_MODE_RE.match(permstr):
raise UnarchiveError('ZIP info perm format incorrect, %s' % permstr)
# DEBUG
# err += "%s%s %10d %s\n" % (ztype, permstr, size, path)
dest = os.path.join(self.dest, path)
try:
st = os.lstat(dest)
except:
change = True
self.includes.append(path)
err += 'Path %s is missing\n' % path
diff += '>%s++++++.?? %s\n' % (ftype, path)
continue
# Compare file types
if ftype == 'd' and not stat.S_ISDIR(st.st_mode):
change = True
self.includes.append(path)
err += 'File %s already exists, but not as a directory\n' % path
diff += 'c%s++++++.?? %s\n' % (ftype, path)
continue
if ftype == 'f' and not stat.S_ISREG(st.st_mode):
change = True
unarchived = False
self.includes.append(path)
err += 'Directory %s already exists, but not as a regular file\n' % path
diff += 'c%s++++++.?? %s\n' % (ftype, path)
continue
if ftype == 'L' and not stat.S_ISLNK(st.st_mode):
change = True
self.includes.append(path)
err += 'Directory %s already exists, but not as a symlink\n' % path
diff += 'c%s++++++.?? %s\n' % (ftype, path)
continue
itemized = list('.%s.......??' % ftype)
# Note: this timestamp calculation has a rounding error
# somewhere... unzip and this timestamp can be one second off
# When that happens, we report a change and re-unzip the file
dt_object = datetime.datetime(*(time.strptime(pcs[6], '%Y%m%d.%H%M%S')[0:6]))
timestamp = time.mktime(dt_object.timetuple())
# Compare file timestamps
if stat.S_ISREG(st.st_mode):
if self.module.params['keep_newer']:
if timestamp > st.st_mtime:
change = True
self.includes.append(path)
err += 'File %s is older, replacing file\n' % path
itemized[4] = 't'
elif stat.S_ISREG(st.st_mode) and timestamp < st.st_mtime:
# Add to excluded files, ignore other changes
out += 'File %s is newer, excluding file\n' % path
self.excludes.append(path)
continue
else:
if timestamp != st.st_mtime:
change = True
self.includes.append(path)
err += 'File %s differs in mtime (%f vs %f)\n' % (path, timestamp, st.st_mtime)
itemized[4] = 't'
# Compare file sizes
if stat.S_ISREG(st.st_mode) and size != st.st_size:
change = True
err += 'File %s differs in size (%d vs %d)\n' % (path, size, st.st_size)
itemized[3] = 's'
# Compare file checksums
if stat.S_ISREG(st.st_mode):
crc = crc32(dest)
if crc != self._crc32(path):
change = True
err += 'File %s differs in CRC32 checksum (0x%08x vs 0x%08x)\n' % (path, self._crc32(path), crc)
itemized[2] = 'c'
# Compare file permissions
# Do not handle permissions of symlinks
if ftype != 'L':
# Use the new mode provided with the action, if there is one
if self.file_args['mode']:
if isinstance(self.file_args['mode'], int):
mode = self.file_args['mode']
else:
try:
mode = int(self.file_args['mode'], 8)
except Exception as e:
try:
mode = AnsibleModule._symbolic_mode_to_octal(st, self.file_args['mode'])
except ValueError as e:
self.module.fail_json(path=path, msg="%s" % to_native(e), exception=traceback.format_exc())
# Only special files require no umask-handling
elif ztype == '?':
mode = self._permstr_to_octal(permstr, 0)
else:
mode = self._permstr_to_octal(permstr, file_umask)
if mode != stat.S_IMODE(st.st_mode):
change = True
itemized[5] = 'p'
err += 'Path %s differs in permissions (%o vs %o)\n' % (path, mode, stat.S_IMODE(st.st_mode))
# Compare file user ownership
owner = uid = None
try:
owner = pwd.getpwuid(st.st_uid).pw_name
except:
uid = st.st_uid
# If we are not root and requested owner is not our user, fail
if run_uid != 0 and (fut_owner != run_owner or fut_uid != run_uid):
raise UnarchiveError('Cannot change ownership of %s to %s, as user %s' % (path, fut_owner, run_owner))
if owner and owner != fut_owner:
change = True
err += 'Path %s is owned by user %s, not by user %s as expected\n' % (path, owner, fut_owner)
itemized[6] = 'o'
elif uid and uid != fut_uid:
change = True
err += 'Path %s is owned by uid %s, not by uid %s as expected\n' % (path, uid, fut_uid)
itemized[6] = 'o'
# Compare file group ownership
group = gid = None
try:
group = grp.getgrgid(st.st_gid).gr_name
except:
gid = st.st_gid
if run_uid != 0 and fut_gid not in groups:
raise UnarchiveError('Cannot change group ownership of %s to %s, as user %s' % (path, fut_group, run_owner))
if group and group != fut_group:
change = True
err += 'Path %s is owned by group %s, not by group %s as expected\n' % (path, group, fut_group)
itemized[6] = 'g'
elif gid and gid != fut_gid:
change = True
err += 'Path %s is owned by gid %s, not by gid %s as expected\n' % (path, gid, fut_gid)
itemized[6] = 'g'
# Register changed files and finalize diff output
if change:
if path not in self.includes:
self.includes.append(path)
diff += '%s %s\n' % (''.join(itemized), path)
if self.includes:
unarchived = False
# DEBUG
# out = old_out + out
return dict(unarchived=unarchived, rc=rc, out=out, err=err, cmd=cmd, diff=diff)
def unarchive(self):
cmd = [self.cmd_path, '-o']
if self.opts:
cmd.extend(self.opts)
cmd.append(self.src)
# NOTE: Including (changed) files as arguments is problematic (limits on command line/arguments)
# if self.includes:
# NOTE: Command unzip has this strange behaviour where it expects quoted filenames to also be escaped
# cmd.extend(map(shell_escape, self.includes))
if self.excludes:
cmd.extend(['-x'] + self.excludes)
cmd.extend(['-d', self.dest])
rc, out, err = self.module.run_command(cmd)
return dict(cmd=cmd, rc=rc, out=out, err=err)
def can_handle_archive(self):
if not self.cmd_path:
return False, 'Command "unzip" not found.'
cmd = [self.cmd_path, '-l', self.src]
rc, out, err = self.module.run_command(cmd)
if rc == 0:
return True, None
return False, 'Command "%s" could not handle archive.' % self.cmd_path
class TgzArchive(object):
def __init__(self, src, dest, file_args, module):
self.src = src
self.dest = dest
self.file_args = file_args
self.opts = module.params['extra_opts']
self.module = module
if self.module.check_mode:
self.module.exit_json(skipped=True, msg="remote module (%s) does not support check mode when using gtar" % self.module._name)
self.excludes = [path.rstrip('/') for path in self.module.params['exclude']]
# Prefer gtar (GNU tar) as it supports the compression options -z, -j and -J
self.cmd_path = self.module.get_bin_path('gtar', None)
if not self.cmd_path:
# Fallback to tar
self.cmd_path = self.module.get_bin_path('tar')
self.zipflag = '-z'
self._files_in_archive = []
if self.cmd_path:
self.tar_type = self._get_tar_type()
else:
self.tar_type = None
def _get_tar_type(self):
cmd = [self.cmd_path, '--version']
(rc, out, err) = self.module.run_command(cmd)
tar_type = None
if out.startswith('bsdtar'):
tar_type = 'bsd'
elif out.startswith('tar') and 'GNU' in out:
tar_type = 'gnu'
return tar_type
@property
def files_in_archive(self, force_refresh=False):
if self._files_in_archive and not force_refresh:
return self._files_in_archive
cmd = [self.cmd_path, '--list', '-C', self.dest]
if self.zipflag:
cmd.append(self.zipflag)
if self.opts:
cmd.extend(['--show-transformed-names'] + self.opts)
if self.excludes:
cmd.extend(['--exclude=' + quote(f) for f in self.excludes])
cmd.extend(['-f', self.src])
rc, out, err = self.module.run_command(cmd, cwd=self.dest, environ_update=dict(LANG='C', LC_ALL='C', LC_MESSAGES='C'))
if rc != 0:
raise UnarchiveError('Unable to list files in the archive')
for filename in out.splitlines():
# Compensate for locale-related problems in gtar output (octal unicode representation) #11348
# filename = filename.decode('string_escape')
filename = codecs.escape_decode(filename)[0]
if filename and filename not in self.excludes:
self._files_in_archive.append(to_native(filename))
return self._files_in_archive
def is_unarchived(self):
cmd = [self.cmd_path, '--diff', '-C', self.dest]
if self.zipflag:
cmd.append(self.zipflag)
if self.opts:
cmd.extend(['--show-transformed-names'] + self.opts)
if self.file_args['owner']:
cmd.append('--owner=' + quote(self.file_args['owner']))
if self.file_args['group']:
cmd.append('--group=' + quote(self.file_args['group']))
if self.module.params['keep_newer']:
cmd.append('--keep-newer-files')
if self.excludes:
cmd.extend(['--exclude=' + quote(f) for f in self.excludes])
cmd.extend(['-f', self.src])
rc, out, err = self.module.run_command(cmd, cwd=self.dest, environ_update=dict(LANG='C', LC_ALL='C', LC_MESSAGES='C'))
# Check whether the differences are in something that we're
# setting anyway
# What is different
unarchived = True
old_out = out
out = ''
run_uid = os.getuid()
# When unarchiving as a user, or when owner/group/mode is supplied --diff is insufficient
# Only way to be sure is to check request with what is on disk (as we do for zip)
# Leave this up to set_fs_attributes_if_different() instead of inducing a (false) change
for line in old_out.splitlines() + err.splitlines():
# FIXME: Remove the bogus lines from error-output as well !
# Ignore bogus errors on empty filenames (when using --split-component)
if EMPTY_FILE_RE.search(line):
continue
if run_uid == 0 and not self.file_args['owner'] and OWNER_DIFF_RE.search(line):
out += line + '\n'
if run_uid == 0 and not self.file_args['group'] and GROUP_DIFF_RE.search(line):
out += line + '\n'
if not self.file_args['mode'] and MODE_DIFF_RE.search(line):
out += line + '\n'
if MOD_TIME_DIFF_RE.search(line):
out += line + '\n'
if MISSING_FILE_RE.search(line):
out += line + '\n'
if out:
unarchived = False
return dict(unarchived=unarchived, rc=rc, out=out, err=err, cmd=cmd)
def unarchive(self):
cmd = [self.cmd_path, '--extract', '-C', self.dest]
if self.zipflag:
cmd.append(self.zipflag)
if self.opts:
cmd.extend(['--show-transformed-names'] + self.opts)
if self.file_args['owner']:
cmd.append('--owner=' + quote(self.file_args['owner']))
if self.file_args['group']:
cmd.append('--group=' + quote(self.file_args['group']))
if self.module.params['keep_newer']:
cmd.append('--keep-newer-files')
if self.excludes:
cmd.extend(['--exclude=' + quote(f) for f in self.excludes])
cmd.extend(['-f', self.src])
rc, out, err = self.module.run_command(cmd, cwd=self.dest, environ_update=dict(LANG='C', LC_ALL='C', LC_MESSAGES='C'))
return dict(cmd=cmd, rc=rc, out=out, err=err)
def can_handle_archive(self):
if not self.cmd_path:
return False, 'Commands "gtar" and "tar" not found.'
if self.tar_type != 'gnu':
return False, 'Command "%s" detected as tar type %s. GNU tar required.' % (self.cmd_path, self.tar_type)
try:
if self.files_in_archive:
return True, None
except UnarchiveError:
return False, 'Command "%s" could not handle archive.' % self.cmd_path
# Errors and no files in archive assume that we weren't able to
# properly unarchive it
return False, 'Command "%s" found no files in archive.' % self.cmd_path
# Class to handle tar files that aren't compressed
class TarArchive(TgzArchive):
def __init__(self, src, dest, file_args, module):
super(TarArchive, self).__init__(src, dest, file_args, module)
# argument to tar
self.zipflag = ''
# Class to handle bzip2 compressed tar files
class TarBzipArchive(TgzArchive):
def __init__(self, src, dest, file_args, module):
super(TarBzipArchive, self).__init__(src, dest, file_args, module)
self.zipflag = '-j'
# Class to handle xz compressed tar files
class TarXzArchive(TgzArchive):
def __init__(self, src, dest, file_args, module):
super(TarXzArchive, self).__init__(src, dest, file_args, module)
self.zipflag = '-J'
# try handlers in order and return the one that works or bail if none work
def pick_handler(src, dest, file_args, module):
handlers = [ZipArchive, TgzArchive, TarArchive, TarBzipArchive, TarXzArchive]
reasons = set()
for handler in handlers:
obj = handler(src, dest, file_args, module)
(can_handle, reason) = obj.can_handle_archive()
if can_handle:
return obj
reasons.add(reason)
reason_msg = ' '.join(reasons)
module.fail_json(msg='Failed to find handler for "%s". Make sure the required command to extract the file is installed. %s' % (src, reason_msg))
def main():
module = AnsibleModule(
# not checking because of daisy chain to file module
argument_spec=dict(
src=dict(type='path', required=True),
original_basename=dict(type='str'), # used to handle 'dest is a directory' via template, a slight hack
dest=dict(type='path', required=True),
remote_src=dict(type='bool', default=False),
creates=dict(type='path'),
list_files=dict(type='bool', default=False),
keep_newer=dict(type='bool', default=False),
exclude=dict(type='list', default=[]),
extra_opts=dict(type='list', default=[]),
validate_certs=dict(type='bool', default=True),
),
add_file_common_args=True,
# check-mode only works for zip files, we cover that later
supports_check_mode=True,
)
src = module.params['src']
dest = module.params['dest']
remote_src = module.params['remote_src']
file_args = module.load_file_common_arguments(module.params)
# did tar file arrive?
if not os.path.exists(src):
if not remote_src:
module.fail_json(msg="Source '%s' failed to transfer" % src)
# If remote_src=true, and src= contains ://, try and download the file to a temp directory.
elif '://' in src:
tempdir = os.path.dirname(os.path.realpath(__file__))
package = os.path.join(tempdir, str(src.rsplit('/', 1)[1]))
try:
rsp, info = fetch_url(module, src)
# If download fails, raise a proper exception
if rsp is None:
raise Exception(info['msg'])
# open in binary mode for python3
f = open(package, 'wb')
# Read 1kb at a time to save on ram
while True:
data = rsp.read(BUFSIZE)
data = to_bytes(data, errors='surrogate_or_strict')
if len(data) < 1:
break # End of file, break while loop
f.write(data)
f.close()
src = package
except Exception as e:
module.fail_json(msg="Failure downloading %s, %s" % (src, to_native(e)))
else:
module.fail_json(msg="Source '%s' does not exist" % src)
if not os.access(src, os.R_OK):
module.fail_json(msg="Source '%s' not readable" % src)
# skip working with 0 size archives
try:
if os.path.getsize(src) == 0:
module.fail_json(msg="Invalid archive '%s', the file is 0 bytes" % src)
except Exception as e:
module.fail_json(msg="Source '%s' not readable, %s" % (src, to_native(e)))
# is dest OK to receive tar file?
if not os.path.isdir(dest):
module.fail_json(msg="Destination '%s' is not a directory" % dest)
handler = pick_handler(src, dest, file_args, module)
res_args = dict(handler=handler.__class__.__name__, dest=dest, src=src)
# do we need to do unpack?
check_results = handler.is_unarchived()
# DEBUG
# res_args['check_results'] = check_results
if module.check_mode:
res_args['changed'] = not check_results['unarchived']
elif check_results['unarchived']:
res_args['changed'] = False
else:
# do the unpack
try:
res_args['extract_results'] = handler.unarchive()
if res_args['extract_results']['rc'] != 0:
module.fail_json(msg="failed to unpack %s to %s" % (src, dest), **res_args)
except IOError:
module.fail_json(msg="failed to unpack %s to %s" % (src, dest), **res_args)
else:
res_args['changed'] = True
# Get diff if required
if check_results.get('diff', False):
res_args['diff'] = {'prepared': check_results['diff']}
# Run only if we found differences (idempotence) or diff was missing
if res_args.get('diff', True) and not module.check_mode:
# do we need to change perms?
for filename in handler.files_in_archive:
file_args['path'] = os.path.join(dest, filename)
try:
res_args['changed'] = module.set_fs_attributes_if_different(file_args, res_args['changed'], expand=False)
except (IOError, OSError) as e:
module.fail_json(msg="Unexpected error when accessing exploded file: %s" % to_native(e), **res_args)
if module.params['list_files']:
res_args['files'] = handler.files_in_archive
module.exit_json(**res_args)
if __name__ == '__main__':
main()
| gpl-3.0 |
sometallgit/AutoUploader | Python27/Lib/bsddb/test/test_sequence.py | 68 | 5274 | import unittest
import os
from test_all import db, test_support, get_new_environment_path, get_new_database_path
class DBSequenceTest(unittest.TestCase):
def setUp(self):
self.int_32_max = 0x100000000
self.homeDir = get_new_environment_path()
self.filename = "test"
self.dbenv = db.DBEnv()
self.dbenv.open(self.homeDir, db.DB_CREATE | db.DB_INIT_MPOOL, 0666)
self.d = db.DB(self.dbenv)
self.d.open(self.filename, db.DB_BTREE, db.DB_CREATE, 0666)
def tearDown(self):
if hasattr(self, 'seq'):
self.seq.close()
del self.seq
if hasattr(self, 'd'):
self.d.close()
del self.d
if hasattr(self, 'dbenv'):
self.dbenv.close()
del self.dbenv
test_support.rmtree(self.homeDir)
def test_get(self):
self.seq = db.DBSequence(self.d, flags=0)
start_value = 10 * self.int_32_max
self.assertEqual(0xA00000000, start_value)
self.assertEqual(None, self.seq.initial_value(start_value))
self.assertEqual(None, self.seq.open(key='id', txn=None, flags=db.DB_CREATE))
self.assertEqual(start_value, self.seq.get(5))
self.assertEqual(start_value + 5, self.seq.get())
def test_remove(self):
self.seq = db.DBSequence(self.d, flags=0)
self.assertEqual(None, self.seq.open(key='foo', txn=None, flags=db.DB_CREATE))
self.assertEqual(None, self.seq.remove(txn=None, flags=0))
del self.seq
def test_get_key(self):
self.seq = db.DBSequence(self.d, flags=0)
key = 'foo'
self.assertEqual(None, self.seq.open(key=key, txn=None, flags=db.DB_CREATE))
self.assertEqual(key, self.seq.get_key())
def test_get_dbp(self):
self.seq = db.DBSequence(self.d, flags=0)
self.assertEqual(None, self.seq.open(key='foo', txn=None, flags=db.DB_CREATE))
self.assertEqual(self.d, self.seq.get_dbp())
def test_cachesize(self):
self.seq = db.DBSequence(self.d, flags=0)
cashe_size = 10
self.assertEqual(None, self.seq.set_cachesize(cashe_size))
self.assertEqual(None, self.seq.open(key='foo', txn=None, flags=db.DB_CREATE))
self.assertEqual(cashe_size, self.seq.get_cachesize())
def test_flags(self):
self.seq = db.DBSequence(self.d, flags=0)
flag = db.DB_SEQ_WRAP;
self.assertEqual(None, self.seq.set_flags(flag))
self.assertEqual(None, self.seq.open(key='foo', txn=None, flags=db.DB_CREATE))
self.assertEqual(flag, self.seq.get_flags() & flag)
def test_range(self):
self.seq = db.DBSequence(self.d, flags=0)
seq_range = (10 * self.int_32_max, 11 * self.int_32_max - 1)
self.assertEqual(None, self.seq.set_range(seq_range))
self.seq.initial_value(seq_range[0])
self.assertEqual(None, self.seq.open(key='foo', txn=None, flags=db.DB_CREATE))
self.assertEqual(seq_range, self.seq.get_range())
def test_stat(self):
self.seq = db.DBSequence(self.d, flags=0)
self.assertEqual(None, self.seq.open(key='foo', txn=None, flags=db.DB_CREATE))
stat = self.seq.stat()
for param in ('nowait', 'min', 'max', 'value', 'current',
'flags', 'cache_size', 'last_value', 'wait'):
self.assertTrue(param in stat, "parameter %s isn't in stat info" % param)
if db.version() >= (4,7) :
# This code checks a crash solved in Berkeley DB 4.7
def test_stat_crash(self) :
d=db.DB()
d.open(None,dbtype=db.DB_HASH,flags=db.DB_CREATE) # In RAM
seq = db.DBSequence(d, flags=0)
self.assertRaises(db.DBNotFoundError, seq.open,
key='id', txn=None, flags=0)
self.assertRaises(db.DBInvalidArgError, seq.stat)
d.close()
def test_64bits(self) :
# We don't use both extremes because they are problematic
value_plus=(1L<<63)-2
self.assertEqual(9223372036854775806L,value_plus)
value_minus=(-1L<<63)+1 # Two complement
self.assertEqual(-9223372036854775807L,value_minus)
self.seq = db.DBSequence(self.d, flags=0)
self.assertEqual(None, self.seq.initial_value(value_plus-1))
self.assertEqual(None, self.seq.open(key='id', txn=None,
flags=db.DB_CREATE))
self.assertEqual(value_plus-1, self.seq.get(1))
self.assertEqual(value_plus, self.seq.get(1))
self.seq.remove(txn=None, flags=0)
self.seq = db.DBSequence(self.d, flags=0)
self.assertEqual(None, self.seq.initial_value(value_minus))
self.assertEqual(None, self.seq.open(key='id', txn=None,
flags=db.DB_CREATE))
self.assertEqual(value_minus, self.seq.get(1))
self.assertEqual(value_minus+1, self.seq.get(1))
def test_multiple_close(self):
self.seq = db.DBSequence(self.d)
self.seq.close() # You can close a Sequence multiple times
self.seq.close()
self.seq.close()
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(DBSequenceTest))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| mit |
beiko-lab/gengis | bin/Lib/site-packages/win32/lib/mmsystem.py | 9 | 31388 | # Generated by h2py from d:/msdev/include/mmsystem.h
MAXPNAMELEN = 32
MAXERRORLENGTH = 256
MAX_JOYSTICKOEMVXDNAME = 260
MM_MICROSOFT = 1
MM_MIDI_MAPPER = 1
MM_WAVE_MAPPER = 2
MM_SNDBLST_MIDIOUT = 3
MM_SNDBLST_MIDIIN = 4
MM_SNDBLST_SYNTH = 5
MM_SNDBLST_WAVEOUT = 6
MM_SNDBLST_WAVEIN = 7
MM_ADLIB = 9
MM_MPU401_MIDIOUT = 10
MM_MPU401_MIDIIN = 11
MM_PC_JOYSTICK = 12
TIME_MS = 0x0001
TIME_SAMPLES = 0x0002
TIME_BYTES = 0x0004
TIME_SMPTE = 0x0008
TIME_MIDI = 0x0010
TIME_TICKS = 0x0020
MM_JOY1MOVE = 0x3A0
MM_JOY2MOVE = 0x3A1
MM_JOY1ZMOVE = 0x3A2
MM_JOY2ZMOVE = 0x3A3
MM_JOY1BUTTONDOWN = 0x3B5
MM_JOY2BUTTONDOWN = 0x3B6
MM_JOY1BUTTONUP = 0x3B7
MM_JOY2BUTTONUP = 0x3B8
MM_MCINOTIFY = 0x3B9
MM_WOM_OPEN = 0x3BB
MM_WOM_CLOSE = 0x3BC
MM_WOM_DONE = 0x3BD
MM_WIM_OPEN = 0x3BE
MM_WIM_CLOSE = 0x3BF
MM_WIM_DATA = 0x3C0
MM_MIM_OPEN = 0x3C1
MM_MIM_CLOSE = 0x3C2
MM_MIM_DATA = 0x3C3
MM_MIM_LONGDATA = 0x3C4
MM_MIM_ERROR = 0x3C5
MM_MIM_LONGERROR = 0x3C6
MM_MOM_OPEN = 0x3C7
MM_MOM_CLOSE = 0x3C8
MM_MOM_DONE = 0x3C9
MM_STREAM_OPEN = 0x3D4
MM_STREAM_CLOSE = 0x3D5
MM_STREAM_DONE = 0x3D6
MM_STREAM_ERROR = 0x3D7
MM_MOM_POSITIONCB = 0x3CA
MM_MIM_MOREDATA = 0x3CC
MM_MIXM_LINE_CHANGE = 0x3D0
MM_MIXM_CONTROL_CHANGE = 0x3D1
MMSYSERR_BASE = 0
WAVERR_BASE = 32
MIDIERR_BASE = 64
TIMERR_BASE = 96
JOYERR_BASE = 160
MCIERR_BASE = 256
MIXERR_BASE = 1024
MCI_STRING_OFFSET = 512
MCI_VD_OFFSET = 1024
MCI_CD_OFFSET = 1088
MCI_WAVE_OFFSET = 1152
MCI_SEQ_OFFSET = 1216
MMSYSERR_NOERROR = 0
MMSYSERR_ERROR = (MMSYSERR_BASE + 1)
MMSYSERR_BADDEVICEID = (MMSYSERR_BASE + 2)
MMSYSERR_NOTENABLED = (MMSYSERR_BASE + 3)
MMSYSERR_ALLOCATED = (MMSYSERR_BASE + 4)
MMSYSERR_INVALHANDLE = (MMSYSERR_BASE + 5)
MMSYSERR_NODRIVER = (MMSYSERR_BASE + 6)
MMSYSERR_NOMEM = (MMSYSERR_BASE + 7)
MMSYSERR_NOTSUPPORTED = (MMSYSERR_BASE + 8)
MMSYSERR_BADERRNUM = (MMSYSERR_BASE + 9)
MMSYSERR_INVALFLAG = (MMSYSERR_BASE + 10)
MMSYSERR_INVALPARAM = (MMSYSERR_BASE + 11)
MMSYSERR_HANDLEBUSY = (MMSYSERR_BASE + 12)
MMSYSERR_INVALIDALIAS = (MMSYSERR_BASE + 13)
MMSYSERR_BADDB = (MMSYSERR_BASE + 14)
MMSYSERR_KEYNOTFOUND = (MMSYSERR_BASE + 15)
MMSYSERR_READERROR = (MMSYSERR_BASE + 16)
MMSYSERR_WRITEERROR = (MMSYSERR_BASE + 17)
MMSYSERR_DELETEERROR = (MMSYSERR_BASE + 18)
MMSYSERR_VALNOTFOUND = (MMSYSERR_BASE + 19)
MMSYSERR_NODRIVERCB = (MMSYSERR_BASE + 20)
MMSYSERR_LASTERROR = (MMSYSERR_BASE + 20)
DRV_LOAD = 0x0001
DRV_ENABLE = 0x0002
DRV_OPEN = 0x0003
DRV_CLOSE = 0x0004
DRV_DISABLE = 0x0005
DRV_FREE = 0x0006
DRV_CONFIGURE = 0x0007
DRV_QUERYCONFIGURE = 0x0008
DRV_INSTALL = 0x0009
DRV_REMOVE = 0x000A
DRV_EXITSESSION = 0x000B
DRV_POWER = 0x000F
DRV_RESERVED = 0x0800
DRV_USER = 0x4000
DRVCNF_CANCEL = 0x0000
DRVCNF_OK = 0x0001
DRVCNF_RESTART = 0x0002
DRV_CANCEL = DRVCNF_CANCEL
DRV_OK = DRVCNF_OK
DRV_RESTART = DRVCNF_RESTART
DRV_MCI_FIRST = DRV_RESERVED
DRV_MCI_LAST = (DRV_RESERVED + 0xFFF)
CALLBACK_TYPEMASK = 0x00070000
CALLBACK_NULL = 0x00000000
CALLBACK_WINDOW = 0x00010000
CALLBACK_TASK = 0x00020000
CALLBACK_FUNCTION = 0x00030000
CALLBACK_THREAD = (CALLBACK_TASK)
CALLBACK_EVENT = 0x00050000
SND_SYNC = 0x0000
SND_ASYNC = 0x0001
SND_NODEFAULT = 0x0002
SND_MEMORY = 0x0004
SND_LOOP = 0x0008
SND_NOSTOP = 0x0010
SND_NOWAIT = 0x00002000
SND_ALIAS = 0x00010000
SND_ALIAS_ID = 0x00110000
SND_FILENAME = 0x00020000
SND_RESOURCE = 0x00040004
SND_PURGE = 0x0040
SND_APPLICATION = 0x0080
SND_ALIAS_START = 0
WAVERR_BADFORMAT = (WAVERR_BASE + 0)
WAVERR_STILLPLAYING = (WAVERR_BASE + 1)
WAVERR_UNPREPARED = (WAVERR_BASE + 2)
WAVERR_SYNC = (WAVERR_BASE + 3)
WAVERR_LASTERROR = (WAVERR_BASE + 3)
WOM_OPEN = MM_WOM_OPEN
WOM_CLOSE = MM_WOM_CLOSE
WOM_DONE = MM_WOM_DONE
WIM_OPEN = MM_WIM_OPEN
WIM_CLOSE = MM_WIM_CLOSE
WIM_DATA = MM_WIM_DATA
WAVE_MAPPER = -1 # 0xFFFFFFFF
WAVE_FORMAT_QUERY = 0x0001
WAVE_ALLOWSYNC = 0x0002
WAVE_MAPPED = 0x0004
WAVE_FORMAT_DIRECT = 0x0008
WAVE_FORMAT_DIRECT_QUERY = (WAVE_FORMAT_QUERY | WAVE_FORMAT_DIRECT)
WHDR_DONE = 0x00000001
WHDR_PREPARED = 0x00000002
WHDR_BEGINLOOP = 0x00000004
WHDR_ENDLOOP = 0x00000008
WHDR_INQUEUE = 0x00000010
WAVECAPS_PITCH = 0x0001
WAVECAPS_PLAYBACKRATE = 0x0002
WAVECAPS_VOLUME = 0x0004
WAVECAPS_LRVOLUME = 0x0008
WAVECAPS_SYNC = 0x0010
WAVECAPS_SAMPLEACCURATE = 0x0020
WAVECAPS_DIRECTSOUND = 0x0040
WAVE_INVALIDFORMAT = 0x00000000
WAVE_FORMAT_1M08 = 0x00000001
WAVE_FORMAT_1S08 = 0x00000002
WAVE_FORMAT_1M16 = 0x00000004
WAVE_FORMAT_1S16 = 0x00000008
WAVE_FORMAT_2M08 = 0x00000010
WAVE_FORMAT_2S08 = 0x00000020
WAVE_FORMAT_2M16 = 0x00000040
WAVE_FORMAT_2S16 = 0x00000080
WAVE_FORMAT_4M08 = 0x00000100
WAVE_FORMAT_4S08 = 0x00000200
WAVE_FORMAT_4M16 = 0x00000400
WAVE_FORMAT_4S16 = 0x00000800
WAVE_FORMAT_PCM = 1
WAVE_FORMAT_IEEE_FLOAT = 3
MIDIERR_UNPREPARED = (MIDIERR_BASE + 0)
MIDIERR_STILLPLAYING = (MIDIERR_BASE + 1)
MIDIERR_NOMAP = (MIDIERR_BASE + 2)
MIDIERR_NOTREADY = (MIDIERR_BASE + 3)
MIDIERR_NODEVICE = (MIDIERR_BASE + 4)
MIDIERR_INVALIDSETUP = (MIDIERR_BASE + 5)
MIDIERR_BADOPENMODE = (MIDIERR_BASE + 6)
MIDIERR_DONT_CONTINUE = (MIDIERR_BASE + 7)
MIDIERR_LASTERROR = (MIDIERR_BASE + 7)
MIDIPATCHSIZE = 128
MIM_OPEN = MM_MIM_OPEN
MIM_CLOSE = MM_MIM_CLOSE
MIM_DATA = MM_MIM_DATA
MIM_LONGDATA = MM_MIM_LONGDATA
MIM_ERROR = MM_MIM_ERROR
MIM_LONGERROR = MM_MIM_LONGERROR
MOM_OPEN = MM_MOM_OPEN
MOM_CLOSE = MM_MOM_CLOSE
MOM_DONE = MM_MOM_DONE
MIM_MOREDATA = MM_MIM_MOREDATA
MOM_POSITIONCB = MM_MOM_POSITIONCB
MIDI_IO_STATUS = 0x00000020
MIDI_CACHE_ALL = 1
MIDI_CACHE_BESTFIT = 2
MIDI_CACHE_QUERY = 3
MIDI_UNCACHE = 4
MOD_MIDIPORT = 1
MOD_SYNTH = 2
MOD_SQSYNTH = 3
MOD_FMSYNTH = 4
MOD_MAPPER = 5
MIDICAPS_VOLUME = 0x0001
MIDICAPS_LRVOLUME = 0x0002
MIDICAPS_CACHE = 0x0004
MIDICAPS_STREAM = 0x0008
MHDR_DONE = 0x00000001
MHDR_PREPARED = 0x00000002
MHDR_INQUEUE = 0x00000004
MHDR_ISSTRM = 0x00000008
MEVT_F_SHORT = 0x00000000
MEVT_F_LONG = -2147483648 # 0x80000000
MEVT_F_CALLBACK = 0x40000000
def MEVT_EVENTTYPE(x): return ((BYTE)(((x)>>24)&0xFF))
def MEVT_EVENTPARM(x): return ((DWORD)((x)&0x00FFFFFF))
MIDISTRM_ERROR = (-2)
MIDIPROP_SET = -2147483648 # 0x80000000
MIDIPROP_GET = 0x40000000
MIDIPROP_TIMEDIV = 0x00000001
MIDIPROP_TEMPO = 0x00000002
AUXCAPS_CDAUDIO = 1
AUXCAPS_AUXIN = 2
AUXCAPS_VOLUME = 0x0001
AUXCAPS_LRVOLUME = 0x0002
MIXER_SHORT_NAME_CHARS = 16
MIXER_LONG_NAME_CHARS = 64
MIXERR_INVALLINE = (MIXERR_BASE + 0)
MIXERR_INVALCONTROL = (MIXERR_BASE + 1)
MIXERR_INVALVALUE = (MIXERR_BASE + 2)
MIXERR_LASTERROR = (MIXERR_BASE + 2)
MIXER_OBJECTF_HANDLE = -2147483648 # 0x80000000
MIXER_OBJECTF_MIXER = 0x00000000
MIXER_OBJECTF_HMIXER = (MIXER_OBJECTF_HANDLE|MIXER_OBJECTF_MIXER)
MIXER_OBJECTF_WAVEOUT = 0x10000000
MIXER_OBJECTF_HWAVEOUT = (MIXER_OBJECTF_HANDLE|MIXER_OBJECTF_WAVEOUT)
MIXER_OBJECTF_WAVEIN = 0x20000000
MIXER_OBJECTF_HWAVEIN = (MIXER_OBJECTF_HANDLE|MIXER_OBJECTF_WAVEIN)
MIXER_OBJECTF_MIDIOUT = 0x30000000
MIXER_OBJECTF_HMIDIOUT = (MIXER_OBJECTF_HANDLE|MIXER_OBJECTF_MIDIOUT)
MIXER_OBJECTF_MIDIIN = 0x40000000
MIXER_OBJECTF_HMIDIIN = (MIXER_OBJECTF_HANDLE|MIXER_OBJECTF_MIDIIN)
MIXER_OBJECTF_AUX = 0x50000000
MIXERLINE_LINEF_ACTIVE = 0x00000001
MIXERLINE_LINEF_DISCONNECTED = 0x00008000
MIXERLINE_LINEF_SOURCE = -2147483648 # 0x80000000
MIXERLINE_COMPONENTTYPE_DST_FIRST = 0x00000000
MIXERLINE_COMPONENTTYPE_DST_UNDEFINED = (MIXERLINE_COMPONENTTYPE_DST_FIRST + 0)
MIXERLINE_COMPONENTTYPE_DST_DIGITAL = (MIXERLINE_COMPONENTTYPE_DST_FIRST + 1)
MIXERLINE_COMPONENTTYPE_DST_LINE = (MIXERLINE_COMPONENTTYPE_DST_FIRST + 2)
MIXERLINE_COMPONENTTYPE_DST_MONITOR = (MIXERLINE_COMPONENTTYPE_DST_FIRST + 3)
MIXERLINE_COMPONENTTYPE_DST_SPEAKERS = (MIXERLINE_COMPONENTTYPE_DST_FIRST + 4)
MIXERLINE_COMPONENTTYPE_DST_HEADPHONES = (MIXERLINE_COMPONENTTYPE_DST_FIRST + 5)
MIXERLINE_COMPONENTTYPE_DST_TELEPHONE = (MIXERLINE_COMPONENTTYPE_DST_FIRST + 6)
MIXERLINE_COMPONENTTYPE_DST_WAVEIN = (MIXERLINE_COMPONENTTYPE_DST_FIRST + 7)
MIXERLINE_COMPONENTTYPE_DST_VOICEIN = (MIXERLINE_COMPONENTTYPE_DST_FIRST + 8)
MIXERLINE_COMPONENTTYPE_DST_LAST = (MIXERLINE_COMPONENTTYPE_DST_FIRST + 8)
MIXERLINE_COMPONENTTYPE_SRC_FIRST = 0x00001000
MIXERLINE_COMPONENTTYPE_SRC_UNDEFINED = (MIXERLINE_COMPONENTTYPE_SRC_FIRST + 0)
MIXERLINE_COMPONENTTYPE_SRC_DIGITAL = (MIXERLINE_COMPONENTTYPE_SRC_FIRST + 1)
MIXERLINE_COMPONENTTYPE_SRC_LINE = (MIXERLINE_COMPONENTTYPE_SRC_FIRST + 2)
MIXERLINE_COMPONENTTYPE_SRC_MICROPHONE = (MIXERLINE_COMPONENTTYPE_SRC_FIRST + 3)
MIXERLINE_COMPONENTTYPE_SRC_SYNTHESIZER = (MIXERLINE_COMPONENTTYPE_SRC_FIRST + 4)
MIXERLINE_COMPONENTTYPE_SRC_COMPACTDISC = (MIXERLINE_COMPONENTTYPE_SRC_FIRST + 5)
MIXERLINE_COMPONENTTYPE_SRC_TELEPHONE = (MIXERLINE_COMPONENTTYPE_SRC_FIRST + 6)
MIXERLINE_COMPONENTTYPE_SRC_PCSPEAKER = (MIXERLINE_COMPONENTTYPE_SRC_FIRST + 7)
MIXERLINE_COMPONENTTYPE_SRC_WAVEOUT = (MIXERLINE_COMPONENTTYPE_SRC_FIRST + 8)
MIXERLINE_COMPONENTTYPE_SRC_AUXILIARY = (MIXERLINE_COMPONENTTYPE_SRC_FIRST + 9)
MIXERLINE_COMPONENTTYPE_SRC_ANALOG = (MIXERLINE_COMPONENTTYPE_SRC_FIRST + 10)
MIXERLINE_COMPONENTTYPE_SRC_LAST = (MIXERLINE_COMPONENTTYPE_SRC_FIRST + 10)
MIXERLINE_TARGETTYPE_UNDEFINED = 0
MIXERLINE_TARGETTYPE_WAVEOUT = 1
MIXERLINE_TARGETTYPE_WAVEIN = 2
MIXERLINE_TARGETTYPE_MIDIOUT = 3
MIXERLINE_TARGETTYPE_MIDIIN = 4
MIXERLINE_TARGETTYPE_AUX = 5
MIXER_GETLINEINFOF_DESTINATION = 0x00000000
MIXER_GETLINEINFOF_SOURCE = 0x00000001
MIXER_GETLINEINFOF_LINEID = 0x00000002
MIXER_GETLINEINFOF_COMPONENTTYPE = 0x00000003
MIXER_GETLINEINFOF_TARGETTYPE = 0x00000004
MIXER_GETLINEINFOF_QUERYMASK = 0x0000000F
MIXERCONTROL_CONTROLF_UNIFORM = 0x00000001
MIXERCONTROL_CONTROLF_MULTIPLE = 0x00000002
MIXERCONTROL_CONTROLF_DISABLED = -2147483648 # 0x80000000
MIXERCONTROL_CT_CLASS_MASK = -268435456 # 0xF0000000
MIXERCONTROL_CT_CLASS_CUSTOM = 0x00000000
MIXERCONTROL_CT_CLASS_METER = 0x10000000
MIXERCONTROL_CT_CLASS_SWITCH = 0x20000000
MIXERCONTROL_CT_CLASS_NUMBER = 0x30000000
MIXERCONTROL_CT_CLASS_SLIDER = 0x40000000
MIXERCONTROL_CT_CLASS_FADER = 0x50000000
MIXERCONTROL_CT_CLASS_TIME = 0x60000000
MIXERCONTROL_CT_CLASS_LIST = 0x70000000
MIXERCONTROL_CT_SUBCLASS_MASK = 0x0F000000
MIXERCONTROL_CT_SC_SWITCH_BOOLEAN = 0x00000000
MIXERCONTROL_CT_SC_SWITCH_BUTTON = 0x01000000
MIXERCONTROL_CT_SC_METER_POLLED = 0x00000000
MIXERCONTROL_CT_SC_TIME_MICROSECS = 0x00000000
MIXERCONTROL_CT_SC_TIME_MILLISECS = 0x01000000
MIXERCONTROL_CT_SC_LIST_SINGLE = 0x00000000
MIXERCONTROL_CT_SC_LIST_MULTIPLE = 0x01000000
MIXERCONTROL_CT_UNITS_MASK = 0x00FF0000
MIXERCONTROL_CT_UNITS_CUSTOM = 0x00000000
MIXERCONTROL_CT_UNITS_BOOLEAN = 0x00010000
MIXERCONTROL_CT_UNITS_SIGNED = 0x00020000
MIXERCONTROL_CT_UNITS_UNSIGNED = 0x00030000
MIXERCONTROL_CT_UNITS_DECIBELS = 0x00040000
MIXERCONTROL_CT_UNITS_PERCENT = 0x00050000
MIXERCONTROL_CONTROLTYPE_CUSTOM = (MIXERCONTROL_CT_CLASS_CUSTOM | MIXERCONTROL_CT_UNITS_CUSTOM)
MIXERCONTROL_CONTROLTYPE_BOOLEANMETER = (MIXERCONTROL_CT_CLASS_METER | MIXERCONTROL_CT_SC_METER_POLLED | MIXERCONTROL_CT_UNITS_BOOLEAN)
MIXERCONTROL_CONTROLTYPE_SIGNEDMETER = (MIXERCONTROL_CT_CLASS_METER | MIXERCONTROL_CT_SC_METER_POLLED | MIXERCONTROL_CT_UNITS_SIGNED)
MIXERCONTROL_CONTROLTYPE_PEAKMETER = (MIXERCONTROL_CONTROLTYPE_SIGNEDMETER + 1)
MIXERCONTROL_CONTROLTYPE_UNSIGNEDMETER = (MIXERCONTROL_CT_CLASS_METER | MIXERCONTROL_CT_SC_METER_POLLED | MIXERCONTROL_CT_UNITS_UNSIGNED)
MIXERCONTROL_CONTROLTYPE_BOOLEAN = (MIXERCONTROL_CT_CLASS_SWITCH | MIXERCONTROL_CT_SC_SWITCH_BOOLEAN | MIXERCONTROL_CT_UNITS_BOOLEAN)
MIXERCONTROL_CONTROLTYPE_ONOFF = (MIXERCONTROL_CONTROLTYPE_BOOLEAN + 1)
MIXERCONTROL_CONTROLTYPE_MUTE = (MIXERCONTROL_CONTROLTYPE_BOOLEAN + 2)
MIXERCONTROL_CONTROLTYPE_MONO = (MIXERCONTROL_CONTROLTYPE_BOOLEAN + 3)
MIXERCONTROL_CONTROLTYPE_LOUDNESS = (MIXERCONTROL_CONTROLTYPE_BOOLEAN + 4)
MIXERCONTROL_CONTROLTYPE_STEREOENH = (MIXERCONTROL_CONTROLTYPE_BOOLEAN + 5)
MIXERCONTROL_CONTROLTYPE_BUTTON = (MIXERCONTROL_CT_CLASS_SWITCH | MIXERCONTROL_CT_SC_SWITCH_BUTTON | MIXERCONTROL_CT_UNITS_BOOLEAN)
MIXERCONTROL_CONTROLTYPE_DECIBELS = (MIXERCONTROL_CT_CLASS_NUMBER | MIXERCONTROL_CT_UNITS_DECIBELS)
MIXERCONTROL_CONTROLTYPE_SIGNED = (MIXERCONTROL_CT_CLASS_NUMBER | MIXERCONTROL_CT_UNITS_SIGNED)
MIXERCONTROL_CONTROLTYPE_UNSIGNED = (MIXERCONTROL_CT_CLASS_NUMBER | MIXERCONTROL_CT_UNITS_UNSIGNED)
MIXERCONTROL_CONTROLTYPE_PERCENT = (MIXERCONTROL_CT_CLASS_NUMBER | MIXERCONTROL_CT_UNITS_PERCENT)
MIXERCONTROL_CONTROLTYPE_SLIDER = (MIXERCONTROL_CT_CLASS_SLIDER | MIXERCONTROL_CT_UNITS_SIGNED)
MIXERCONTROL_CONTROLTYPE_PAN = (MIXERCONTROL_CONTROLTYPE_SLIDER + 1)
MIXERCONTROL_CONTROLTYPE_QSOUNDPAN = (MIXERCONTROL_CONTROLTYPE_SLIDER + 2)
MIXERCONTROL_CONTROLTYPE_FADER = (MIXERCONTROL_CT_CLASS_FADER | MIXERCONTROL_CT_UNITS_UNSIGNED)
MIXERCONTROL_CONTROLTYPE_VOLUME = (MIXERCONTROL_CONTROLTYPE_FADER + 1)
MIXERCONTROL_CONTROLTYPE_BASS = (MIXERCONTROL_CONTROLTYPE_FADER + 2)
MIXERCONTROL_CONTROLTYPE_TREBLE = (MIXERCONTROL_CONTROLTYPE_FADER + 3)
MIXERCONTROL_CONTROLTYPE_EQUALIZER = (MIXERCONTROL_CONTROLTYPE_FADER + 4)
MIXERCONTROL_CONTROLTYPE_SINGLESELECT = (MIXERCONTROL_CT_CLASS_LIST | MIXERCONTROL_CT_SC_LIST_SINGLE | MIXERCONTROL_CT_UNITS_BOOLEAN)
MIXERCONTROL_CONTROLTYPE_MUX = (MIXERCONTROL_CONTROLTYPE_SINGLESELECT + 1)
MIXERCONTROL_CONTROLTYPE_MULTIPLESELECT = (MIXERCONTROL_CT_CLASS_LIST | MIXERCONTROL_CT_SC_LIST_MULTIPLE | MIXERCONTROL_CT_UNITS_BOOLEAN)
MIXERCONTROL_CONTROLTYPE_MIXER = (MIXERCONTROL_CONTROLTYPE_MULTIPLESELECT + 1)
MIXERCONTROL_CONTROLTYPE_MICROTIME = (MIXERCONTROL_CT_CLASS_TIME | MIXERCONTROL_CT_SC_TIME_MICROSECS | MIXERCONTROL_CT_UNITS_UNSIGNED)
MIXERCONTROL_CONTROLTYPE_MILLITIME = (MIXERCONTROL_CT_CLASS_TIME | MIXERCONTROL_CT_SC_TIME_MILLISECS | MIXERCONTROL_CT_UNITS_UNSIGNED)
MIXER_GETLINECONTROLSF_ALL = 0x00000000
MIXER_GETLINECONTROLSF_ONEBYID = 0x00000001
MIXER_GETLINECONTROLSF_ONEBYTYPE = 0x00000002
MIXER_GETLINECONTROLSF_QUERYMASK = 0x0000000F
MIXER_GETCONTROLDETAILSF_VALUE = 0x00000000
MIXER_GETCONTROLDETAILSF_LISTTEXT = 0x00000001
MIXER_GETCONTROLDETAILSF_QUERYMASK = 0x0000000F
MIXER_SETCONTROLDETAILSF_VALUE = 0x00000000
MIXER_SETCONTROLDETAILSF_CUSTOM = 0x00000001
MIXER_SETCONTROLDETAILSF_QUERYMASK = 0x0000000F
TIMERR_NOERROR = (0)
TIMERR_NOCANDO = (TIMERR_BASE+1)
TIMERR_STRUCT = (TIMERR_BASE+33)
TIME_ONESHOT = 0x0000
TIME_PERIODIC = 0x0001
TIME_CALLBACK_FUNCTION = 0x0000
TIME_CALLBACK_EVENT_SET = 0x0010
TIME_CALLBACK_EVENT_PULSE = 0x0020
JOYERR_NOERROR = (0)
JOYERR_PARMS = (JOYERR_BASE+5)
JOYERR_NOCANDO = (JOYERR_BASE+6)
JOYERR_UNPLUGGED = (JOYERR_BASE+7)
JOY_BUTTON1 = 0x0001
JOY_BUTTON2 = 0x0002
JOY_BUTTON3 = 0x0004
JOY_BUTTON4 = 0x0008
JOY_BUTTON1CHG = 0x0100
JOY_BUTTON2CHG = 0x0200
JOY_BUTTON3CHG = 0x0400
JOY_BUTTON4CHG = 0x0800
JOY_BUTTON5 = 0x00000010
JOY_BUTTON6 = 0x00000020
JOY_BUTTON7 = 0x00000040
JOY_BUTTON8 = 0x00000080
JOY_BUTTON9 = 0x00000100
JOY_BUTTON10 = 0x00000200
JOY_BUTTON11 = 0x00000400
JOY_BUTTON12 = 0x00000800
JOY_BUTTON13 = 0x00001000
JOY_BUTTON14 = 0x00002000
JOY_BUTTON15 = 0x00004000
JOY_BUTTON16 = 0x00008000
JOY_BUTTON17 = 0x00010000
JOY_BUTTON18 = 0x00020000
JOY_BUTTON19 = 0x00040000
JOY_BUTTON20 = 0x00080000
JOY_BUTTON21 = 0x00100000
JOY_BUTTON22 = 0x00200000
JOY_BUTTON23 = 0x00400000
JOY_BUTTON24 = 0x00800000
JOY_BUTTON25 = 0x01000000
JOY_BUTTON26 = 0x02000000
JOY_BUTTON27 = 0x04000000
JOY_BUTTON28 = 0x08000000
JOY_BUTTON29 = 0x10000000
JOY_BUTTON30 = 0x20000000
JOY_BUTTON31 = 0x40000000
JOY_BUTTON32 = -2147483648 # 0x80000000
JOY_POVFORWARD = 0
JOY_POVRIGHT = 9000
JOY_POVBACKWARD = 18000
JOY_POVLEFT = 27000
JOY_RETURNX = 0x00000001
JOY_RETURNY = 0x00000002
JOY_RETURNZ = 0x00000004
JOY_RETURNR = 0x00000008
JOY_RETURNU = 0x00000010
JOY_RETURNV = 0x00000020
JOY_RETURNPOV = 0x00000040
JOY_RETURNBUTTONS = 0x00000080
JOY_RETURNRAWDATA = 0x00000100
JOY_RETURNPOVCTS = 0x00000200
JOY_RETURNCENTERED = 0x00000400
JOY_USEDEADZONE = 0x00000800
JOY_RETURNALL = (JOY_RETURNX | JOY_RETURNY | JOY_RETURNZ | \
JOY_RETURNR | JOY_RETURNU | JOY_RETURNV | \
JOY_RETURNPOV | JOY_RETURNBUTTONS)
JOY_CAL_READALWAYS = 0x00010000
JOY_CAL_READXYONLY = 0x00020000
JOY_CAL_READ3 = 0x00040000
JOY_CAL_READ4 = 0x00080000
JOY_CAL_READXONLY = 0x00100000
JOY_CAL_READYONLY = 0x00200000
JOY_CAL_READ5 = 0x00400000
JOY_CAL_READ6 = 0x00800000
JOY_CAL_READZONLY = 0x01000000
JOY_CAL_READRONLY = 0x02000000
JOY_CAL_READUONLY = 0x04000000
JOY_CAL_READVONLY = 0x08000000
JOYSTICKID1 = 0
JOYSTICKID2 = 1
JOYCAPS_HASZ = 0x0001
JOYCAPS_HASR = 0x0002
JOYCAPS_HASU = 0x0004
JOYCAPS_HASV = 0x0008
JOYCAPS_HASPOV = 0x0010
JOYCAPS_POV4DIR = 0x0020
JOYCAPS_POVCTS = 0x0040
MMIOERR_BASE = 256
MMIOERR_FILENOTFOUND = (MMIOERR_BASE + 1)
MMIOERR_OUTOFMEMORY = (MMIOERR_BASE + 2)
MMIOERR_CANNOTOPEN = (MMIOERR_BASE + 3)
MMIOERR_CANNOTCLOSE = (MMIOERR_BASE + 4)
MMIOERR_CANNOTREAD = (MMIOERR_BASE + 5)
MMIOERR_CANNOTWRITE = (MMIOERR_BASE + 6)
MMIOERR_CANNOTSEEK = (MMIOERR_BASE + 7)
MMIOERR_CANNOTEXPAND = (MMIOERR_BASE + 8)
MMIOERR_CHUNKNOTFOUND = (MMIOERR_BASE + 9)
MMIOERR_UNBUFFERED = (MMIOERR_BASE + 10)
MMIOERR_PATHNOTFOUND = (MMIOERR_BASE + 11)
MMIOERR_ACCESSDENIED = (MMIOERR_BASE + 12)
MMIOERR_SHARINGVIOLATION = (MMIOERR_BASE + 13)
MMIOERR_NETWORKERROR = (MMIOERR_BASE + 14)
MMIOERR_TOOMANYOPENFILES = (MMIOERR_BASE + 15)
MMIOERR_INVALIDFILE = (MMIOERR_BASE + 16)
CFSEPCHAR = ord('+')
MMIO_RWMODE = 0x00000003
MMIO_SHAREMODE = 0x00000070
MMIO_CREATE = 0x00001000
MMIO_PARSE = 0x00000100
MMIO_DELETE = 0x00000200
MMIO_EXIST = 0x00004000
MMIO_ALLOCBUF = 0x00010000
MMIO_GETTEMP = 0x00020000
MMIO_DIRTY = 0x10000000
MMIO_READ = 0x00000000
MMIO_WRITE = 0x00000001
MMIO_READWRITE = 0x00000002
MMIO_COMPAT = 0x00000000
MMIO_EXCLUSIVE = 0x00000010
MMIO_DENYWRITE = 0x00000020
MMIO_DENYREAD = 0x00000030
MMIO_DENYNONE = 0x00000040
MMIO_FHOPEN = 0x0010
MMIO_EMPTYBUF = 0x0010
MMIO_TOUPPER = 0x0010
MMIO_INSTALLPROC = 0x00010000
MMIO_GLOBALPROC = 0x10000000
MMIO_REMOVEPROC = 0x00020000
MMIO_UNICODEPROC = 0x01000000
MMIO_FINDPROC = 0x00040000
MMIO_FINDCHUNK = 0x0010
MMIO_FINDRIFF = 0x0020
MMIO_FINDLIST = 0x0040
MMIO_CREATERIFF = 0x0020
MMIO_CREATELIST = 0x0040
MMIOM_READ = MMIO_READ
MMIOM_WRITE = MMIO_WRITE
MMIOM_SEEK = 2
MMIOM_OPEN = 3
MMIOM_CLOSE = 4
MMIOM_WRITEFLUSH = 5
MMIOM_RENAME = 6
MMIOM_USER = 0x8000
SEEK_SET = 0
SEEK_CUR = 1
SEEK_END = 2
MMIO_DEFAULTBUFFER = 8192
MCIERR_INVALID_DEVICE_ID = (MCIERR_BASE + 1)
MCIERR_UNRECOGNIZED_KEYWORD = (MCIERR_BASE + 3)
MCIERR_UNRECOGNIZED_COMMAND = (MCIERR_BASE + 5)
MCIERR_HARDWARE = (MCIERR_BASE + 6)
MCIERR_INVALID_DEVICE_NAME = (MCIERR_BASE + 7)
MCIERR_OUT_OF_MEMORY = (MCIERR_BASE + 8)
MCIERR_DEVICE_OPEN = (MCIERR_BASE + 9)
MCIERR_CANNOT_LOAD_DRIVER = (MCIERR_BASE + 10)
MCIERR_MISSING_COMMAND_STRING = (MCIERR_BASE + 11)
MCIERR_PARAM_OVERFLOW = (MCIERR_BASE + 12)
MCIERR_MISSING_STRING_ARGUMENT = (MCIERR_BASE + 13)
MCIERR_BAD_INTEGER = (MCIERR_BASE + 14)
MCIERR_PARSER_INTERNAL = (MCIERR_BASE + 15)
MCIERR_DRIVER_INTERNAL = (MCIERR_BASE + 16)
MCIERR_MISSING_PARAMETER = (MCIERR_BASE + 17)
MCIERR_UNSUPPORTED_FUNCTION = (MCIERR_BASE + 18)
MCIERR_FILE_NOT_FOUND = (MCIERR_BASE + 19)
MCIERR_DEVICE_NOT_READY = (MCIERR_BASE + 20)
MCIERR_INTERNAL = (MCIERR_BASE + 21)
MCIERR_DRIVER = (MCIERR_BASE + 22)
MCIERR_CANNOT_USE_ALL = (MCIERR_BASE + 23)
MCIERR_MULTIPLE = (MCIERR_BASE + 24)
MCIERR_EXTENSION_NOT_FOUND = (MCIERR_BASE + 25)
MCIERR_OUTOFRANGE = (MCIERR_BASE + 26)
MCIERR_FLAGS_NOT_COMPATIBLE = (MCIERR_BASE + 28)
MCIERR_FILE_NOT_SAVED = (MCIERR_BASE + 30)
MCIERR_DEVICE_TYPE_REQUIRED = (MCIERR_BASE + 31)
MCIERR_DEVICE_LOCKED = (MCIERR_BASE + 32)
MCIERR_DUPLICATE_ALIAS = (MCIERR_BASE + 33)
MCIERR_BAD_CONSTANT = (MCIERR_BASE + 34)
MCIERR_MUST_USE_SHAREABLE = (MCIERR_BASE + 35)
MCIERR_MISSING_DEVICE_NAME = (MCIERR_BASE + 36)
MCIERR_BAD_TIME_FORMAT = (MCIERR_BASE + 37)
MCIERR_NO_CLOSING_QUOTE = (MCIERR_BASE + 38)
MCIERR_DUPLICATE_FLAGS = (MCIERR_BASE + 39)
MCIERR_INVALID_FILE = (MCIERR_BASE + 40)
MCIERR_NULL_PARAMETER_BLOCK = (MCIERR_BASE + 41)
MCIERR_UNNAMED_RESOURCE = (MCIERR_BASE + 42)
MCIERR_NEW_REQUIRES_ALIAS = (MCIERR_BASE + 43)
MCIERR_NOTIFY_ON_AUTO_OPEN = (MCIERR_BASE + 44)
MCIERR_NO_ELEMENT_ALLOWED = (MCIERR_BASE + 45)
MCIERR_NONAPPLICABLE_FUNCTION = (MCIERR_BASE + 46)
MCIERR_ILLEGAL_FOR_AUTO_OPEN = (MCIERR_BASE + 47)
MCIERR_FILENAME_REQUIRED = (MCIERR_BASE + 48)
MCIERR_EXTRA_CHARACTERS = (MCIERR_BASE + 49)
MCIERR_DEVICE_NOT_INSTALLED = (MCIERR_BASE + 50)
MCIERR_GET_CD = (MCIERR_BASE + 51)
MCIERR_SET_CD = (MCIERR_BASE + 52)
MCIERR_SET_DRIVE = (MCIERR_BASE + 53)
MCIERR_DEVICE_LENGTH = (MCIERR_BASE + 54)
MCIERR_DEVICE_ORD_LENGTH = (MCIERR_BASE + 55)
MCIERR_NO_INTEGER = (MCIERR_BASE + 56)
MCIERR_WAVE_OUTPUTSINUSE = (MCIERR_BASE + 64)
MCIERR_WAVE_SETOUTPUTINUSE = (MCIERR_BASE + 65)
MCIERR_WAVE_INPUTSINUSE = (MCIERR_BASE + 66)
MCIERR_WAVE_SETINPUTINUSE = (MCIERR_BASE + 67)
MCIERR_WAVE_OUTPUTUNSPECIFIED = (MCIERR_BASE + 68)
MCIERR_WAVE_INPUTUNSPECIFIED = (MCIERR_BASE + 69)
MCIERR_WAVE_OUTPUTSUNSUITABLE = (MCIERR_BASE + 70)
MCIERR_WAVE_SETOUTPUTUNSUITABLE = (MCIERR_BASE + 71)
MCIERR_WAVE_INPUTSUNSUITABLE = (MCIERR_BASE + 72)
MCIERR_WAVE_SETINPUTUNSUITABLE = (MCIERR_BASE + 73)
MCIERR_SEQ_DIV_INCOMPATIBLE = (MCIERR_BASE + 80)
MCIERR_SEQ_PORT_INUSE = (MCIERR_BASE + 81)
MCIERR_SEQ_PORT_NONEXISTENT = (MCIERR_BASE + 82)
MCIERR_SEQ_PORT_MAPNODEVICE = (MCIERR_BASE + 83)
MCIERR_SEQ_PORT_MISCERROR = (MCIERR_BASE + 84)
MCIERR_SEQ_TIMER = (MCIERR_BASE + 85)
MCIERR_SEQ_PORTUNSPECIFIED = (MCIERR_BASE + 86)
MCIERR_SEQ_NOMIDIPRESENT = (MCIERR_BASE + 87)
MCIERR_NO_WINDOW = (MCIERR_BASE + 90)
MCIERR_CREATEWINDOW = (MCIERR_BASE + 91)
MCIERR_FILE_READ = (MCIERR_BASE + 92)
MCIERR_FILE_WRITE = (MCIERR_BASE + 93)
MCIERR_NO_IDENTITY = (MCIERR_BASE + 94)
MCIERR_CUSTOM_DRIVER_BASE = (MCIERR_BASE + 256)
MCI_FIRST = DRV_MCI_FIRST
MCI_OPEN = 0x0803
MCI_CLOSE = 0x0804
MCI_ESCAPE = 0x0805
MCI_PLAY = 0x0806
MCI_SEEK = 0x0807
MCI_STOP = 0x0808
MCI_PAUSE = 0x0809
MCI_INFO = 0x080A
MCI_GETDEVCAPS = 0x080B
MCI_SPIN = 0x080C
MCI_SET = 0x080D
MCI_STEP = 0x080E
MCI_RECORD = 0x080F
MCI_SYSINFO = 0x0810
MCI_BREAK = 0x0811
MCI_SAVE = 0x0813
MCI_STATUS = 0x0814
MCI_CUE = 0x0830
MCI_REALIZE = 0x0840
MCI_WINDOW = 0x0841
MCI_PUT = 0x0842
MCI_WHERE = 0x0843
MCI_FREEZE = 0x0844
MCI_UNFREEZE = 0x0845
MCI_LOAD = 0x0850
MCI_CUT = 0x0851
MCI_COPY = 0x0852
MCI_PASTE = 0x0853
MCI_UPDATE = 0x0854
MCI_RESUME = 0x0855
MCI_DELETE = 0x0856
MCI_USER_MESSAGES = (DRV_MCI_FIRST + 0x400)
MCI_LAST = 0x0FFF
MCI_DEVTYPE_VCR = 513
MCI_DEVTYPE_VIDEODISC = 514
MCI_DEVTYPE_OVERLAY = 515
MCI_DEVTYPE_CD_AUDIO = 516
MCI_DEVTYPE_DAT = 517
MCI_DEVTYPE_SCANNER = 518
MCI_DEVTYPE_ANIMATION = 519
MCI_DEVTYPE_DIGITAL_VIDEO = 520
MCI_DEVTYPE_OTHER = 521
MCI_DEVTYPE_WAVEFORM_AUDIO = 522
MCI_DEVTYPE_SEQUENCER = 523
MCI_DEVTYPE_FIRST = MCI_DEVTYPE_VCR
MCI_DEVTYPE_LAST = MCI_DEVTYPE_SEQUENCER
MCI_DEVTYPE_FIRST_USER = 0x1000
MCI_MODE_NOT_READY = (MCI_STRING_OFFSET + 12)
MCI_MODE_STOP = (MCI_STRING_OFFSET + 13)
MCI_MODE_PLAY = (MCI_STRING_OFFSET + 14)
MCI_MODE_RECORD = (MCI_STRING_OFFSET + 15)
MCI_MODE_SEEK = (MCI_STRING_OFFSET + 16)
MCI_MODE_PAUSE = (MCI_STRING_OFFSET + 17)
MCI_MODE_OPEN = (MCI_STRING_OFFSET + 18)
MCI_FORMAT_MILLISECONDS = 0
MCI_FORMAT_HMS = 1
MCI_FORMAT_MSF = 2
MCI_FORMAT_FRAMES = 3
MCI_FORMAT_SMPTE_24 = 4
MCI_FORMAT_SMPTE_25 = 5
MCI_FORMAT_SMPTE_30 = 6
MCI_FORMAT_SMPTE_30DROP = 7
MCI_FORMAT_BYTES = 8
MCI_FORMAT_SAMPLES = 9
MCI_FORMAT_TMSF = 10
def MCI_MSF_MINUTE(msf): return ((BYTE)(msf))
def MCI_MSF_SECOND(msf): return ((BYTE)(((WORD)(msf)) >> 8))
def MCI_MSF_FRAME(msf): return ((BYTE)((msf)>>16))
def MCI_TMSF_TRACK(tmsf): return ((BYTE)(tmsf))
def MCI_TMSF_MINUTE(tmsf): return ((BYTE)(((WORD)(tmsf)) >> 8))
def MCI_TMSF_SECOND(tmsf): return ((BYTE)((tmsf)>>16))
def MCI_TMSF_FRAME(tmsf): return ((BYTE)((tmsf)>>24))
def MCI_HMS_HOUR(hms): return ((BYTE)(hms))
def MCI_HMS_MINUTE(hms): return ((BYTE)(((WORD)(hms)) >> 8))
def MCI_HMS_SECOND(hms): return ((BYTE)((hms)>>16))
MCI_NOTIFY_SUCCESSFUL = 0x0001
MCI_NOTIFY_SUPERSEDED = 0x0002
MCI_NOTIFY_ABORTED = 0x0004
MCI_NOTIFY_FAILURE = 0x0008
MCI_NOTIFY = 0x00000001
MCI_WAIT = 0x00000002
MCI_FROM = 0x00000004
MCI_TO = 0x00000008
MCI_TRACK = 0x00000010
MCI_OPEN_SHAREABLE = 0x00000100
MCI_OPEN_ELEMENT = 0x00000200
MCI_OPEN_ALIAS = 0x00000400
MCI_OPEN_ELEMENT_ID = 0x00000800
MCI_OPEN_TYPE_ID = 0x00001000
MCI_OPEN_TYPE = 0x00002000
MCI_SEEK_TO_START = 0x00000100
MCI_SEEK_TO_END = 0x00000200
MCI_STATUS_ITEM = 0x00000100
MCI_STATUS_START = 0x00000200
MCI_STATUS_LENGTH = 0x00000001
MCI_STATUS_POSITION = 0x00000002
MCI_STATUS_NUMBER_OF_TRACKS = 0x00000003
MCI_STATUS_MODE = 0x00000004
MCI_STATUS_MEDIA_PRESENT = 0x00000005
MCI_STATUS_TIME_FORMAT = 0x00000006
MCI_STATUS_READY = 0x00000007
MCI_STATUS_CURRENT_TRACK = 0x00000008
MCI_INFO_PRODUCT = 0x00000100
MCI_INFO_FILE = 0x00000200
MCI_INFO_MEDIA_UPC = 0x00000400
MCI_INFO_MEDIA_IDENTITY = 0x00000800
MCI_INFO_NAME = 0x00001000
MCI_INFO_COPYRIGHT = 0x00002000
MCI_GETDEVCAPS_ITEM = 0x00000100
MCI_GETDEVCAPS_CAN_RECORD = 0x00000001
MCI_GETDEVCAPS_HAS_AUDIO = 0x00000002
MCI_GETDEVCAPS_HAS_VIDEO = 0x00000003
MCI_GETDEVCAPS_DEVICE_TYPE = 0x00000004
MCI_GETDEVCAPS_USES_FILES = 0x00000005
MCI_GETDEVCAPS_COMPOUND_DEVICE = 0x00000006
MCI_GETDEVCAPS_CAN_EJECT = 0x00000007
MCI_GETDEVCAPS_CAN_PLAY = 0x00000008
MCI_GETDEVCAPS_CAN_SAVE = 0x00000009
MCI_SYSINFO_QUANTITY = 0x00000100
MCI_SYSINFO_OPEN = 0x00000200
MCI_SYSINFO_NAME = 0x00000400
MCI_SYSINFO_INSTALLNAME = 0x00000800
MCI_SET_DOOR_OPEN = 0x00000100
MCI_SET_DOOR_CLOSED = 0x00000200
MCI_SET_TIME_FORMAT = 0x00000400
MCI_SET_AUDIO = 0x00000800
MCI_SET_VIDEO = 0x00001000
MCI_SET_ON = 0x00002000
MCI_SET_OFF = 0x00004000
MCI_SET_AUDIO_ALL = 0x00000000
MCI_SET_AUDIO_LEFT = 0x00000001
MCI_SET_AUDIO_RIGHT = 0x00000002
MCI_BREAK_KEY = 0x00000100
MCI_BREAK_HWND = 0x00000200
MCI_BREAK_OFF = 0x00000400
MCI_RECORD_INSERT = 0x00000100
MCI_RECORD_OVERWRITE = 0x00000200
MCI_SAVE_FILE = 0x00000100
MCI_LOAD_FILE = 0x00000100
MCI_VD_MODE_PARK = (MCI_VD_OFFSET + 1)
MCI_VD_MEDIA_CLV = (MCI_VD_OFFSET + 2)
MCI_VD_MEDIA_CAV = (MCI_VD_OFFSET + 3)
MCI_VD_MEDIA_OTHER = (MCI_VD_OFFSET + 4)
MCI_VD_FORMAT_TRACK = 0x4001
MCI_VD_PLAY_REVERSE = 0x00010000
MCI_VD_PLAY_FAST = 0x00020000
MCI_VD_PLAY_SPEED = 0x00040000
MCI_VD_PLAY_SCAN = 0x00080000
MCI_VD_PLAY_SLOW = 0x00100000
MCI_VD_SEEK_REVERSE = 0x00010000
MCI_VD_STATUS_SPEED = 0x00004002
MCI_VD_STATUS_FORWARD = 0x00004003
MCI_VD_STATUS_MEDIA_TYPE = 0x00004004
MCI_VD_STATUS_SIDE = 0x00004005
MCI_VD_STATUS_DISC_SIZE = 0x00004006
MCI_VD_GETDEVCAPS_CLV = 0x00010000
MCI_VD_GETDEVCAPS_CAV = 0x00020000
MCI_VD_SPIN_UP = 0x00010000
MCI_VD_SPIN_DOWN = 0x00020000
MCI_VD_GETDEVCAPS_CAN_REVERSE = 0x00004002
MCI_VD_GETDEVCAPS_FAST_RATE = 0x00004003
MCI_VD_GETDEVCAPS_SLOW_RATE = 0x00004004
MCI_VD_GETDEVCAPS_NORMAL_RATE = 0x00004005
MCI_VD_STEP_FRAMES = 0x00010000
MCI_VD_STEP_REVERSE = 0x00020000
MCI_VD_ESCAPE_STRING = 0x00000100
MCI_CDA_STATUS_TYPE_TRACK = 0x00004001
MCI_CDA_TRACK_AUDIO = (MCI_CD_OFFSET + 0)
MCI_CDA_TRACK_OTHER = (MCI_CD_OFFSET + 1)
MCI_WAVE_PCM = (MCI_WAVE_OFFSET + 0)
MCI_WAVE_MAPPER = (MCI_WAVE_OFFSET + 1)
MCI_WAVE_OPEN_BUFFER = 0x00010000
MCI_WAVE_SET_FORMATTAG = 0x00010000
MCI_WAVE_SET_CHANNELS = 0x00020000
MCI_WAVE_SET_SAMPLESPERSEC = 0x00040000
MCI_WAVE_SET_AVGBYTESPERSEC = 0x00080000
MCI_WAVE_SET_BLOCKALIGN = 0x00100000
MCI_WAVE_SET_BITSPERSAMPLE = 0x00200000
MCI_WAVE_INPUT = 0x00400000
MCI_WAVE_OUTPUT = 0x00800000
MCI_WAVE_STATUS_FORMATTAG = 0x00004001
MCI_WAVE_STATUS_CHANNELS = 0x00004002
MCI_WAVE_STATUS_SAMPLESPERSEC = 0x00004003
MCI_WAVE_STATUS_AVGBYTESPERSEC = 0x00004004
MCI_WAVE_STATUS_BLOCKALIGN = 0x00004005
MCI_WAVE_STATUS_BITSPERSAMPLE = 0x00004006
MCI_WAVE_STATUS_LEVEL = 0x00004007
MCI_WAVE_SET_ANYINPUT = 0x04000000
MCI_WAVE_SET_ANYOUTPUT = 0x08000000
MCI_WAVE_GETDEVCAPS_INPUTS = 0x00004001
MCI_WAVE_GETDEVCAPS_OUTPUTS = 0x00004002
MCI_SEQ_DIV_PPQN = (0 + MCI_SEQ_OFFSET)
MCI_SEQ_DIV_SMPTE_24 = (1 + MCI_SEQ_OFFSET)
MCI_SEQ_DIV_SMPTE_25 = (2 + MCI_SEQ_OFFSET)
MCI_SEQ_DIV_SMPTE_30DROP = (3 + MCI_SEQ_OFFSET)
MCI_SEQ_DIV_SMPTE_30 = (4 + MCI_SEQ_OFFSET)
MCI_SEQ_FORMAT_SONGPTR = 0x4001
MCI_SEQ_FILE = 0x4002
MCI_SEQ_MIDI = 0x4003
MCI_SEQ_SMPTE = 0x4004
MCI_SEQ_NONE = 65533
MCI_SEQ_MAPPER = 65535
MCI_SEQ_STATUS_TEMPO = 0x00004002
MCI_SEQ_STATUS_PORT = 0x00004003
MCI_SEQ_STATUS_SLAVE = 0x00004007
MCI_SEQ_STATUS_MASTER = 0x00004008
MCI_SEQ_STATUS_OFFSET = 0x00004009
MCI_SEQ_STATUS_DIVTYPE = 0x0000400A
MCI_SEQ_STATUS_NAME = 0x0000400B
MCI_SEQ_STATUS_COPYRIGHT = 0x0000400C
MCI_SEQ_SET_TEMPO = 0x00010000
MCI_SEQ_SET_PORT = 0x00020000
MCI_SEQ_SET_SLAVE = 0x00040000
MCI_SEQ_SET_MASTER = 0x00080000
MCI_SEQ_SET_OFFSET = 0x01000000
MCI_ANIM_OPEN_WS = 0x00010000
MCI_ANIM_OPEN_PARENT = 0x00020000
MCI_ANIM_OPEN_NOSTATIC = 0x00040000
MCI_ANIM_PLAY_SPEED = 0x00010000
MCI_ANIM_PLAY_REVERSE = 0x00020000
MCI_ANIM_PLAY_FAST = 0x00040000
MCI_ANIM_PLAY_SLOW = 0x00080000
MCI_ANIM_PLAY_SCAN = 0x00100000
MCI_ANIM_STEP_REVERSE = 0x00010000
MCI_ANIM_STEP_FRAMES = 0x00020000
MCI_ANIM_STATUS_SPEED = 0x00004001
MCI_ANIM_STATUS_FORWARD = 0x00004002
MCI_ANIM_STATUS_HWND = 0x00004003
MCI_ANIM_STATUS_HPAL = 0x00004004
MCI_ANIM_STATUS_STRETCH = 0x00004005
MCI_ANIM_INFO_TEXT = 0x00010000
MCI_ANIM_GETDEVCAPS_CAN_REVERSE = 0x00004001
MCI_ANIM_GETDEVCAPS_FAST_RATE = 0x00004002
MCI_ANIM_GETDEVCAPS_SLOW_RATE = 0x00004003
MCI_ANIM_GETDEVCAPS_NORMAL_RATE = 0x00004004
MCI_ANIM_GETDEVCAPS_PALETTES = 0x00004006
MCI_ANIM_GETDEVCAPS_CAN_STRETCH = 0x00004007
MCI_ANIM_GETDEVCAPS_MAX_WINDOWS = 0x00004008
MCI_ANIM_REALIZE_NORM = 0x00010000
MCI_ANIM_REALIZE_BKGD = 0x00020000
MCI_ANIM_WINDOW_HWND = 0x00010000
MCI_ANIM_WINDOW_STATE = 0x00040000
MCI_ANIM_WINDOW_TEXT = 0x00080000
MCI_ANIM_WINDOW_ENABLE_STRETCH = 0x00100000
MCI_ANIM_WINDOW_DISABLE_STRETCH = 0x00200000
MCI_ANIM_WINDOW_DEFAULT = 0x00000000
MCI_ANIM_RECT = 0x00010000
MCI_ANIM_PUT_SOURCE = 0x00020000
MCI_ANIM_PUT_DESTINATION = 0x00040000
MCI_ANIM_WHERE_SOURCE = 0x00020000
MCI_ANIM_WHERE_DESTINATION = 0x00040000
MCI_ANIM_UPDATE_HDC = 0x00020000
MCI_OVLY_OPEN_WS = 0x00010000
MCI_OVLY_OPEN_PARENT = 0x00020000
MCI_OVLY_STATUS_HWND = 0x00004001
MCI_OVLY_STATUS_STRETCH = 0x00004002
MCI_OVLY_INFO_TEXT = 0x00010000
MCI_OVLY_GETDEVCAPS_CAN_STRETCH = 0x00004001
MCI_OVLY_GETDEVCAPS_CAN_FREEZE = 0x00004002
MCI_OVLY_GETDEVCAPS_MAX_WINDOWS = 0x00004003
MCI_OVLY_WINDOW_HWND = 0x00010000
MCI_OVLY_WINDOW_STATE = 0x00040000
MCI_OVLY_WINDOW_TEXT = 0x00080000
MCI_OVLY_WINDOW_ENABLE_STRETCH = 0x00100000
MCI_OVLY_WINDOW_DISABLE_STRETCH = 0x00200000
MCI_OVLY_WINDOW_DEFAULT = 0x00000000
MCI_OVLY_RECT = 0x00010000
MCI_OVLY_PUT_SOURCE = 0x00020000
MCI_OVLY_PUT_DESTINATION = 0x00040000
MCI_OVLY_PUT_FRAME = 0x00080000
MCI_OVLY_PUT_VIDEO = 0x00100000
MCI_OVLY_WHERE_SOURCE = 0x00020000
MCI_OVLY_WHERE_DESTINATION = 0x00040000
MCI_OVLY_WHERE_FRAME = 0x00080000
MCI_OVLY_WHERE_VIDEO = 0x00100000
SELECTDIB = 41
def DIBINDEX(n): return MAKELONG((n),0x10FF)
| gpl-3.0 |
NightKhaos/accustom | accustom/decorators.py | 1 | 13333 | """ Decorators for the accustom library.
This includes two decorators, one for the handler function, and one to apply to any resource handling
functions.
"""
#Exceptions
from .Exceptions import FailedToSendResponseException
from .Exceptions import DataIsNotDictException
from .Exceptions import InvalidResponseStatusException
#Constants
from .constants import RequestType
from .constants import Status
# Required Libraries
import logging
import json
from functools import wraps
from uuid import uuid4
from .response import ResponseObject
import six
logger = logging.getLogger(__name__)
def decorator(enforceUseOfClass=False,hideResourceDeleteFailure=False):
"""Decorate a function to add exception handling and emit CloudFormation responses.
Usage with Lambda:
>>> import accustom
>>> @accustom.decorator()
... def function_handler(event, context)
... sum = (float(event['ResourceProperties']['key1']) +
... float(event['ResourceProperties']['key2']))
... return { 'sum' : sum }
Usage outside Lambda:
>>> import accustom
>>> @accustom.decorator()
... def function_handler(event)
... sum = (float(event['ResourceProperties']['key1']) +
... float(event['ResourceProperties']['key2']))
... r = accustom.ResponseObject(data={'sum':sum},physicalResourceId='abc')
... return r
Args:
enforceUseOfClass: When true send a FAILED signal if a ResponseObject class is not utilised.
This is implicitly set to true if no Lambda Context is provided.
hideResourceDeleteFailure: When true will return SUCCESS even on getting an Exception for DELETE requests.
Returns:
The response object sent to CloudFormation
Raises:
FailedToSendResponseException
"""
def inner_decorator(func):
@wraps(func)
def handler_wrapper(event, lambdaContext=None):
logger.info('Request recieved, processing...')
logger.debug('Request Body:\n' + json.dumps(event))
try:
# Run the function
if lambdaContext is not None:
result = func(event, lambdaContext)
else:
result = func(event)
except Exception as e:
# If there was an exception thrown by the function, send a failure response
result = ResponseObject(
physicalResourceId=uuid4().hex if lambdaContext is None else None,
reason='Function %s failed due to exception "%s"' % (func.__name__, str(e)),
responseStatus=Status.FAILED)
logger.error(result.reason)
if not isinstance(result, ResponseObject):
# If a ResponseObject is not provided, work out what kind of response object to pass, or return a failure if it is an
# invalid response type, or if the enforceUseOfClass is explicitly or implicitly set.
if lambdaContext is None:
result = ResponseObject(
reason='Response Object of type %s was not a ResponseObject and there is no Lambda Context' % result.__class__,
responseStatus=Status.FAILED)
logger.error(result.reason)
elif enforceUseOfClass:
result = ResponseObject(
reason='Response Object of type %s was not a ResponseObject instance and enforceUseOfClass set to true' % result.__class__,
responseStatus=Status.FAILED)
logger.error(result.reason)
elif result is False:
result = ResponseObject(
reason='Function %s returned False.'% func.__name__,
responseStatus=Status.FAILED)
logger.debug(result.reason)
elif isinstance(result, dict):
result = ResponseObject(data=result)
elif isinstance(result, six.string_types):
result = ResponseObject(data={'Return' : result})
elif result is None or result is True:
result = ResponseObject()
else:
result = ResponseObject(
reason='Return value from Function %s is of unsupported type %s' % (func.__name__,result.__class__),
responseStatus=Status.FAILED)
logger.error(result.reason)
# This block will hide resources on delete failure if the flag is set to true
if event['RequestType'] == RequestType.DELETE and result.responseStatus == Status.FAILED and hideResourceDeleteFailure:
logger.warn('Hiding Resource DELETE request failure')
if result.data is not None: logger.debug('Data:\n' + json.dumps(result.data))
if result.reason is not None: logger.debug('Reason: %s' % result.reason)
if result.physicalResourceId is not None: logger.debug('PhysicalResourceId: %s' % result.physicalResourceId)
result = ResponseObject(
reason='There may be resources created by this Custom Resource that have not been cleaned up despite the fact this resource is in DELETE_COMPLETE',
physicalResourceId=result.physicalResourceId,
responseStatus=Status.SUCCESS)
try:
returnValue = result.send(event, lambdaContext)
except Exception as e:
if isinstance(e, FailedToSendResponseException):
raise e
logger.error('Malformed request, Exception: %s' % str(e))
if result.data is not None and not isinstance(e, DataIsNotDictException): logger.debug('Data:\n' + json.dumps(result.data))
if result.reason is not None: logger.debug('Reason: %s' % result.reason)
if result.physicalResourceId is not None: logger.debug('PhysicalResourceId: %s' % result.physicalResourceId)
if not isinstance(e, InvalidResponseStatusException): logger.debug('Status: %s' % result.responseStatus)
result = ResponseObject(
reason='Malformed request, Exception: %s' % str(e),
physicalResourceId=result.physicalResourceId,
responseStatus=Status.FAILED)
returnValue = result.send(event, lambdaContext)
return returnValue
return handler_wrapper
return inner_decorator
def rdecorator(decoratorHandleDelete=False,expectedProperties=None,genUUID=True):
"""Decorate a function to add input validation for resource handler functions.
Usage with Lambda:
>>> import accustom
>>> @accustom.rdecorator(expectedProperties=['key1','key2'],genUUID=False)
... def resource_function(event, context):
... sum = (float(event['ResourceProperties']['key1']) +
... float(event['ResourceProperties']['key2']))
... return { 'sum' : sum }
>>> @accustom.decorator()
... def function_handler(event, context)
... return resource_function(event,context)
Usage outside Lambda:
>>> import accustom
>>> @accustom.rdecorator(expectedProperties=['key1','key2'])
... def resource_function(event, context=None)
... sum = (float(event['ResourceProperties']['key1']) +
... float(event['ResourceProperties']['key2']))
... r = accustom.ResponseObject(data={'sum':sum},physicalResourceId=event['PhysicalResourceId'])
... return r
>>> @accustom.decorator()
... def function_handler(event)
... return resource_function(event)
Args:
decoratorHandleDelete: When set to true, if a delete request is made in event the decorator will
return a ResponseObject with a with SUCCESS without actually executing the decorated function
genUUID: When set to true, if the PhysicalResourceId in the event is not set, automatically generate
a UUID4 and put it in the PhysicalResoruceId field.
expectedProperties: Pass in a list or tuple of properties that you want to check for before running
the decorated function.
Returns:
The result of the decorated function, or a ResponseObject with SUCCESS depending on the event and flags.
Raises:
Any exception raised by the decorated function.
"""
def resource_decorator_inner(func):
@wraps(func)
def resource_decorator_handler(event, context=None):
logger.info('Supported resource %s' % event['ResourceType'])
# Set the Physical Resource ID to a randomly generated UUID if it is not present
if genUUID and 'PhysicalResourceId' not in event:
event['PhysicalResourceId'] = uuid4().hex
logger.info('Set PhysicalResourceId to %s' % event['PhysicalResourceId'])
# Handle Delete when decoratorHandleDelete is set to True
if decoratorHandleDelete and event['RequestType'] == RequestType.DELETE:
logger.info('Request type %s detected, returning success without calling function' % RequestType.DELETE )
return ResponseObject(physicalResourceId=event['PhysicalResourceId'])
# Validate important properties exist
if expectedProperties is not None and isinstance(expectedProperties, (list, tuple)):
for index, item in enumerate(expectedProperties):
if item not in event['ResourceProperties']:
errMsg = 'Property %s missing, sending failure signal' % item
logger.info(errMsg)
return ResponseObject(reason=errMsg,responseStatus=Status.FAILED,physicalResourceId=event['PhysicalResourceId'])
# If a list or tuple was not provided then log a warning
elif expectedProperties is not None:
logger.warn('expectedProperties passed to decorator is not a list, properties were not validated.')
# Pre-validation complete, calling function
return func(event, context)
return resource_decorator_handler
return resource_decorator_inner
def sdecorator(decoratorHandleDelete=False,expectedProperties=None,genUUID=True,enforceUseOfClass=False,hideResourceDeleteFailure=False):
"""Decorate a function to add input validation for resource handler functions, exception handling and send
CloudFormation responses.
Usage with Lambda:
>>> import accustom
>>> @accustom.sdecorator(expectedProperties=['key1','key2'],genUUID=False)
... def resource_handler(event, context):
... sum = (float(event['ResourceProperties']['key1']) +
... float(event['ResourceProperties']['key2']))
... return { 'sum' : sum }
Usage outside Lambda:
>>> import accustom
>>> @accustom.sdecorator(expectedProperties=['key1','key2'])
... def resource_handler(event, context=None)
... sum = (float(event['ResourceProperties']['key1']) +
... float(event['ResourceProperties']['key2']))
... r = accustom.ResponseObject(data={'sum':sum},physicalResourceId=event['PhysicalResourceId'])
... return r
Args:
decoratorHandleDelete: When set to true, if a delete request is made in event the decorator will
return SUCCESS to CloudFormation without actually executing the decorated function
genUUID: When set to true, if the PhysicalResourceId in the event is not set, automatically generate
a UUID4 and put it in the PhysicalResoruceId field.
expectedProperties: Pass in a list or tuple of properties that you want to check for before running
the decorated function.
enforceUseOfClass: When true send a FAILED signal if a ResponseObject class is not utilised.
This is implicitly set to true if no Lambda Context is provided.
hideResourceDeleteFailure: When true will return SUCCESS even on getting an Exception for DELETE requests.
Note that this particular flag is made redundant if decoratorHandleDelete is set to True.
Returns:
The response object sent to CloudFormation
Raises:
FailedToSendResponseException
"""
def standalone_decorator_inner(func):
@wraps(func)
@decorator(enforceUseOfClass=enforceUseOfClass,hideResourceDeleteFailure=hideResourceDeleteFailure)
@rdecorator(decoratorHandleDelete=decoratorHandleDelete,expectedProperties=expectedProperties,genUUID=genUUID)
def standalone_decorator_handler(event, lambdaContext=None):
return func(event, lambdaContext)
return standalone_decorator_handler
return standalone_decorator_inner
| mit |
orvi2014/kitsune | kitsune/products/tests/test_models.py | 13 | 1356 | from nose.tools import eq_
from kitsune.products.tests import product, topic
from kitsune.sumo.tests import TestCase
class TopicModelTests(TestCase):
def test_path(self):
"""Verify that the path property works."""
p = product(slug='p', save=True)
t1 = topic(product=p, slug='t1', save=True)
t2 = topic(product=p, slug='t2', parent=t1, save=True)
t3 = topic(product=p, slug='t3', parent=t2, save=True)
eq_(t1.path, [t1.slug])
eq_(t2.path, [t1.slug, t2.slug])
eq_(t3.path, [t1.slug, t2.slug, t3.slug])
def test_absolute_url(self):
p = product(save=True)
t = topic(product=p, save=True)
expected = '/products/{p}/{t}'.format(p=p.slug, t=t.slug)
actual = t.get_absolute_url()
eq_(actual, expected)
def test_absolute_url_subtopic(self):
p = product(save=True)
t1 = topic(product=p, save=True)
t2 = topic(parent=t1, product=p, save=True)
expected = '/products/{p}/{t1}/{t2}'.format(p=p.slug, t1=t1.slug, t2=t2.slug)
actual = t2.get_absolute_url()
eq_(actual, expected)
class ProductModelTests(TestCase):
def test_absolute_url(self):
p = product(save=True)
expected = '/products/{p}'.format(p=p.slug)
actual = p.get_absolute_url()
eq_(actual, expected)
| bsd-3-clause |
HummingbirdTeam/ardupilot | Tools/LogAnalyzer/tests/TestVCC.py | 218 | 1278 | from LogAnalyzer import Test,TestResult
import DataflashLog
import collections
class TestVCC(Test):
'''test for VCC within recommendations, or abrupt end to log in flight'''
def __init__(self):
Test.__init__(self)
self.name = "VCC"
def run(self, logdata, verbose):
self.result = TestResult()
self.result.status = TestResult.StatusType.GOOD
if not "CURR" in logdata.channels:
self.result.status = TestResult.StatusType.UNKNOWN
self.result.statusMessage = "No CURR log data"
return
# just a naive min/max test for now
vccMin = logdata.channels["CURR"]["Vcc"].min()
vccMax = logdata.channels["CURR"]["Vcc"].max()
vccDiff = vccMax - vccMin;
vccMinThreshold = 4.6 * 1000;
vccMaxDiff = 0.3 * 1000;
if vccDiff > vccMaxDiff:
self.result.status = TestResult.StatusType.WARN
self.result.statusMessage = "VCC min/max diff %sv, should be <%sv" % (vccDiff/1000.0, vccMaxDiff/1000.0)
elif vccMin < vccMinThreshold:
self.result.status = TestResult.StatusType.FAIL
self.result.statusMessage = "VCC below minimum of %sv (%sv)" % (`vccMinThreshold/1000.0`,`vccMin/1000.0`)
| gpl-3.0 |
gerv/bedrock | bedrock/security/tests/test_views.py | 5 | 5061 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from mock import patch
from nose.tools import eq_, ok_
from product_details import product_details
from product_details.version_compare import Version
from bedrock.mozorg.tests import TestCase
from bedrock.security.models import Product
from bedrock.security.views import ProductView, ProductVersionView, product_is_obsolete
@patch.object(product_details, 'firefox_versions', {'LATEST_FIREFOX_VERSION': '33.0',
'FIREFOX_ESR': '31.2.0'})
@patch.object(product_details, 'thunderbird_versions', {'LATEST_THUNDERBIRD_VERSION': '31.2.0'})
def test_product_is_obsolete():
ok_(product_is_obsolete('firefox', '3.6'))
ok_(product_is_obsolete('firefox', '32'))
ok_(product_is_obsolete('firefox-esr', '17.0'))
ok_(product_is_obsolete('thunderbird', '30'))
ok_(product_is_obsolete('seamonkey', '2.0'))
ok_(product_is_obsolete('seamonkey', '2.19'))
ok_(product_is_obsolete('other-things', '3000'))
ok_(not product_is_obsolete('firefox', '33.0.2'))
ok_(not product_is_obsolete('firefox', '34.0'))
ok_(not product_is_obsolete('firefox-esr', '31.0'))
ok_(not product_is_obsolete('thunderbird', '31'))
ok_(not product_is_obsolete('seamonkey', '2.30'))
class TestViews(TestCase):
def setUp(self):
pvnames = [
'Firefox 3.6',
'Firefox 4.0',
'Firefox 4.0.1',
'Firefox 4.2',
'Firefox 4.2.3',
'Firefox 24.0',
]
self.pvs = [Product.objects.create(name=pv) for pv in pvnames]
def test_product_view_min_version(self):
"""Should not include versions below minimum."""
pview = ProductView()
pview.kwargs = {'slug': 'firefox'}
with patch.dict(pview.minimum_versions, {'firefox': Version('4.2')}):
self.assertListEqual(pview.get_queryset(),
[self.pvs[5], self.pvs[4], self.pvs[3]])
with patch.dict(pview.minimum_versions, {'firefox': Version('22.0')}):
self.assertListEqual(pview.get_queryset(), [self.pvs[5]])
def test_product_version_view_filter_major(self):
"""Given a major version should return all minor versions."""
pview = ProductVersionView()
pview.kwargs = {'product': 'firefox', 'version': '4'}
self.assertListEqual(pview.get_queryset(),
[self.pvs[4], self.pvs[3], self.pvs[2], self.pvs[1]])
def test_product_version_view_filter_minor(self):
"""Given a minor version should return all point versions."""
pview = ProductVersionView()
pview.kwargs = {'product': 'firefox', 'version': '4.2'}
self.assertListEqual(pview.get_queryset(), [self.pvs[4], self.pvs[3]])
class TestKVRedirects(TestCase):
def _test_names(self, url_component, expected):
# old urls lack '/en-US' prefix, but that will be the first redirect.
path = '/en-US/security/known-vulnerabilities/{0}.html'.format(url_component)
resp = self.client.get(path)
eq_(resp.status_code, 301)
eq_(expected, resp['Location'].split('/')[-2])
def test_correct_redirects(self):
self._test_names('firefox', 'firefox')
self._test_names('firefoxESR', 'firefox-esr')
self._test_names('firefox20', 'firefox-2.0')
self._test_names('thunderbird15', 'thunderbird-1.5')
self._test_names('suite17', 'mozilla-suite')
def test_spaces_removed(self):
"""Should succeed even if accidental spaces are in the URL.
Bug 1171181.
"""
self._test_names('firefox3%20%200', 'firefox-3.0')
def test_unknown_is_404(self):
"""Should 410 instead of 500 if an unknown url matches the redirector.
Bug 1171181.
"""
path = '/en-US/security/known-vulnerabilities/the-dude-abides-15.html'
resp = self.client.get(path)
eq_(resp.status_code, 410)
class TestOldAdvisories(TestCase):
def _test_redirect(self, path, expected):
# old urls lack '/en-US' prefix, but that will be the first redirect.
resp = self.client.get('/en-US' + path)
eq_(resp.status_code, 301)
ok_(resp['Location'].endswith(expected))
def test_old_urls(self):
"""Should redirect old URLs properly."""
self._test_redirect('/security/announce/mfsa2005-31.html',
'/security/advisories/mfsa2005-31/')
self._test_redirect('/security/announce/2005/mfsa2005-40.html',
'/security/advisories/mfsa2005-40/')
self._test_redirect('/security/advisories/2008/mfsa2008-47.html',
'/security/advisories/mfsa2008-47/')
self._test_redirect('/security/advisories/mfsa2008-66/mfsa2008-37.html',
'/security/advisories/mfsa2008-37/')
| mpl-2.0 |
mahendra-r/edx-platform | lms/djangoapps/instructor_task/api.py | 27 | 19795 | """
API for submitting background tasks by an instructor for a course.
Also includes methods for getting information about tasks that have
already been submitted, filtered either by running state or input
arguments.
"""
import hashlib
from celery.states import READY_STATES
from xmodule.modulestore.django import modulestore
from instructor_task.models import InstructorTask
from instructor_task.tasks import (
rescore_problem,
reset_problem_attempts,
delete_problem_state,
send_bulk_course_email,
calculate_problem_responses_csv,
calculate_grades_csv,
calculate_problem_grade_report,
calculate_students_features_csv,
cohort_students,
enrollment_report_features_csv,
calculate_may_enroll_csv,
exec_summary_report_csv,
generate_certificates,
proctored_exam_results_csv
)
from instructor_task.api_helper import (
check_arguments_for_rescoring,
encode_problem_and_student_input,
encode_entrance_exam_and_student_input,
check_entrance_exam_problems_for_rescoring,
submit_task,
)
from bulk_email.models import CourseEmail
def get_running_instructor_tasks(course_id):
"""
Returns a query of InstructorTask objects of running tasks for a given course.
Used to generate a list of tasks to display on the instructor dashboard.
"""
instructor_tasks = InstructorTask.objects.filter(course_id=course_id)
# exclude states that are "ready" (i.e. not "running", e.g. failure, success, revoked):
for state in READY_STATES:
instructor_tasks = instructor_tasks.exclude(task_state=state)
return instructor_tasks.order_by('-id')
def get_instructor_task_history(course_id, usage_key=None, student=None, task_type=None):
"""
Returns a query of InstructorTask objects of historical tasks for a given course,
that optionally match a particular problem, a student, and/or a task type.
"""
instructor_tasks = InstructorTask.objects.filter(course_id=course_id)
if usage_key is not None or student is not None:
_, task_key = encode_problem_and_student_input(usage_key, student)
instructor_tasks = instructor_tasks.filter(task_key=task_key)
if task_type is not None:
instructor_tasks = instructor_tasks.filter(task_type=task_type)
return instructor_tasks.order_by('-id')
def get_entrance_exam_instructor_task_history(course_id, usage_key=None, student=None): # pylint: disable=invalid-name
"""
Returns a query of InstructorTask objects of historical tasks for a given course,
that optionally match an entrance exam and student if present.
"""
instructor_tasks = InstructorTask.objects.filter(course_id=course_id)
if usage_key is not None or student is not None:
_, task_key = encode_entrance_exam_and_student_input(usage_key, student)
instructor_tasks = instructor_tasks.filter(task_key=task_key)
return instructor_tasks.order_by('-id')
# Disabling invalid-name because this fn name is longer than 30 chars.
def submit_rescore_problem_for_student(request, usage_key, student): # pylint: disable=invalid-name
"""
Request a problem to be rescored as a background task.
The problem will be rescored for the specified student only. Parameters are the `course_id`,
the `problem_url`, and the `student` as a User object.
The url must specify the location of the problem, using i4x-type notation.
ItemNotFoundException is raised if the problem doesn't exist, or AlreadyRunningError
if the problem is already being rescored for this student, or NotImplementedError if
the problem doesn't support rescoring.
This method makes sure the InstructorTask entry is committed.
When called from any view that is wrapped by TransactionMiddleware,
and thus in a "commit-on-success" transaction, an autocommit buried within here
will cause any pending transaction to be committed by a successful
save here. Any future database operations will take place in a
separate transaction.
"""
# check arguments: let exceptions return up to the caller.
check_arguments_for_rescoring(usage_key)
task_type = 'rescore_problem'
task_class = rescore_problem
task_input, task_key = encode_problem_and_student_input(usage_key, student)
return submit_task(request, task_type, task_class, usage_key.course_key, task_input, task_key)
def submit_rescore_problem_for_all_students(request, usage_key): # pylint: disable=invalid-name
"""
Request a problem to be rescored as a background task.
The problem will be rescored for all students who have accessed the
particular problem in a course and have provided and checked an answer.
Parameters are the `course_id` and the `problem_url`.
The url must specify the location of the problem, using i4x-type notation.
ItemNotFoundException is raised if the problem doesn't exist, or AlreadyRunningError
if the problem is already being rescored, or NotImplementedError if the problem doesn't
support rescoring.
This method makes sure the InstructorTask entry is committed.
When called from any view that is wrapped by TransactionMiddleware,
and thus in a "commit-on-success" transaction, an autocommit buried within here
will cause any pending transaction to be committed by a successful
save here. Any future database operations will take place in a
separate transaction.
"""
# check arguments: let exceptions return up to the caller.
check_arguments_for_rescoring(usage_key)
# check to see if task is already running, and reserve it otherwise
task_type = 'rescore_problem'
task_class = rescore_problem
task_input, task_key = encode_problem_and_student_input(usage_key)
return submit_task(request, task_type, task_class, usage_key.course_key, task_input, task_key)
def submit_rescore_entrance_exam_for_student(request, usage_key, student=None): # pylint: disable=invalid-name
"""
Request entrance exam problems to be re-scored as a background task.
The entrance exam problems will be re-scored for given student or if student
is None problems for all students who have accessed the entrance exam.
Parameters are `usage_key`, which must be a :class:`Location`
representing entrance exam section and the `student` as a User object.
ItemNotFoundError is raised if entrance exam does not exists for given
usage_key, AlreadyRunningError is raised if the entrance exam
is already being re-scored, or NotImplementedError if the problem doesn't
support rescoring.
This method makes sure the InstructorTask entry is committed.
When called from any view that is wrapped by TransactionMiddleware,
and thus in a "commit-on-success" transaction, an autocommit buried within here
will cause any pending transaction to be committed by a successful
save here. Any future database operations will take place in a
separate transaction.
"""
# check problems for rescoring: let exceptions return up to the caller.
check_entrance_exam_problems_for_rescoring(usage_key)
# check to see if task is already running, and reserve it otherwise
task_type = 'rescore_problem'
task_class = rescore_problem
task_input, task_key = encode_entrance_exam_and_student_input(usage_key, student)
return submit_task(request, task_type, task_class, usage_key.course_key, task_input, task_key)
def submit_reset_problem_attempts_for_all_students(request, usage_key): # pylint: disable=invalid-name
"""
Request to have attempts reset for a problem as a background task.
The problem's attempts will be reset for all students who have accessed the
particular problem in a course. Parameters are the `course_id` and
the `usage_key`, which must be a :class:`Location`.
ItemNotFoundException is raised if the problem doesn't exist, or AlreadyRunningError
if the problem is already being reset.
This method makes sure the InstructorTask entry is committed.
When called from any view that is wrapped by TransactionMiddleware,
and thus in a "commit-on-success" transaction, an autocommit buried within here
will cause any pending transaction to be committed by a successful
save here. Any future database operations will take place in a
separate transaction.
"""
# check arguments: make sure that the usage_key is defined
# (since that's currently typed in). If the corresponding module descriptor doesn't exist,
# an exception will be raised. Let it pass up to the caller.
modulestore().get_item(usage_key)
task_type = 'reset_problem_attempts'
task_class = reset_problem_attempts
task_input, task_key = encode_problem_and_student_input(usage_key)
return submit_task(request, task_type, task_class, usage_key.course_key, task_input, task_key)
def submit_reset_problem_attempts_in_entrance_exam(request, usage_key, student): # pylint: disable=invalid-name
"""
Request to have attempts reset for a entrance exam as a background task.
Problem attempts for all problems in entrance exam will be reset
for specified student. If student is None problem attempts will be
reset for all students.
Parameters are `usage_key`, which must be a :class:`Location`
representing entrance exam section and the `student` as a User object.
ItemNotFoundError is raised if entrance exam does not exists for given
usage_key, AlreadyRunningError is raised if the entrance exam
is already being reset.
This method makes sure the InstructorTask entry is committed.
When called from any view that is wrapped by TransactionMiddleware,
and thus in a "commit-on-success" transaction, an autocommit buried within here
will cause any pending transaction to be committed by a successful
save here. Any future database operations will take place in a
separate transaction.
"""
# check arguments: make sure entrance exam(section) exists for given usage_key
modulestore().get_item(usage_key)
task_type = 'reset_problem_attempts'
task_class = reset_problem_attempts
task_input, task_key = encode_entrance_exam_and_student_input(usage_key, student)
return submit_task(request, task_type, task_class, usage_key.course_key, task_input, task_key)
def submit_delete_problem_state_for_all_students(request, usage_key): # pylint: disable=invalid-name
"""
Request to have state deleted for a problem as a background task.
The problem's state will be deleted for all students who have accessed the
particular problem in a course. Parameters are the `course_id` and
the `usage_key`, which must be a :class:`Location`.
ItemNotFoundException is raised if the problem doesn't exist, or AlreadyRunningError
if the particular problem's state is already being deleted.
This method makes sure the InstructorTask entry is committed.
When called from any view that is wrapped by TransactionMiddleware,
and thus in a "commit-on-success" transaction, an autocommit buried within here
will cause any pending transaction to be committed by a successful
save here. Any future database operations will take place in a
separate transaction.
"""
# check arguments: make sure that the usage_key is defined
# (since that's currently typed in). If the corresponding module descriptor doesn't exist,
# an exception will be raised. Let it pass up to the caller.
modulestore().get_item(usage_key)
task_type = 'delete_problem_state'
task_class = delete_problem_state
task_input, task_key = encode_problem_and_student_input(usage_key)
return submit_task(request, task_type, task_class, usage_key.course_key, task_input, task_key)
def submit_delete_entrance_exam_state_for_student(request, usage_key, student): # pylint: disable=invalid-name
"""
Requests reset of state for entrance exam as a background task.
Module state for all problems in entrance exam will be deleted
for specified student.
Parameters are `usage_key`, which must be a :class:`Location`
representing entrance exam section and the `student` as a User object.
ItemNotFoundError is raised if entrance exam does not exists for given
usage_key, AlreadyRunningError is raised if the entrance exam
is already being reset.
This method makes sure the InstructorTask entry is committed.
When called from any view that is wrapped by TransactionMiddleware,
and thus in a "commit-on-success" transaction, an autocommit buried within here
will cause any pending transaction to be committed by a successful
save here. Any future database operations will take place in a
separate transaction.
"""
# check arguments: make sure entrance exam(section) exists for given usage_key
modulestore().get_item(usage_key)
task_type = 'delete_problem_state'
task_class = delete_problem_state
task_input, task_key = encode_entrance_exam_and_student_input(usage_key, student)
return submit_task(request, task_type, task_class, usage_key.course_key, task_input, task_key)
def submit_bulk_course_email(request, course_key, email_id):
"""
Request to have bulk email sent as a background task.
The specified CourseEmail object will be sent be updated for all students who have enrolled
in a course. Parameters are the `course_key` and the `email_id`, the id of the CourseEmail object.
AlreadyRunningError is raised if the same recipients are already being emailed with the same
CourseEmail object.
This method makes sure the InstructorTask entry is committed.
When called from any view that is wrapped by TransactionMiddleware,
and thus in a "commit-on-success" transaction, an autocommit buried within here
will cause any pending transaction to be committed by a successful
save here. Any future database operations will take place in a
separate transaction.
"""
# Assume that the course is defined, and that the user has already been verified to have
# appropriate access to the course. But make sure that the email exists.
# We also pull out the To argument here, so that is displayed in
# the InstructorTask status.
email_obj = CourseEmail.objects.get(id=email_id)
to_option = email_obj.to_option
task_type = 'bulk_course_email'
task_class = send_bulk_course_email
# Pass in the to_option as a separate argument, even though it's (currently)
# in the CourseEmail. That way it's visible in the progress status.
# (At some point in the future, we might take the recipient out of the CourseEmail,
# so that the same saved email can be sent to different recipients, as it is tested.)
task_input = {'email_id': email_id, 'to_option': to_option}
task_key_stub = "{email_id}_{to_option}".format(email_id=email_id, to_option=to_option)
# create the key value by using MD5 hash:
task_key = hashlib.md5(task_key_stub).hexdigest()
return submit_task(request, task_type, task_class, course_key, task_input, task_key)
def submit_calculate_problem_responses_csv(request, course_key, problem_location): # pylint: disable=invalid-name
"""
Submits a task to generate a CSV file containing all student
answers to a given problem.
Raises AlreadyRunningError if said file is already being updated.
"""
task_type = 'problem_responses_csv'
task_class = calculate_problem_responses_csv
task_input = {'problem_location': problem_location}
task_key = ""
return submit_task(request, task_type, task_class, course_key, task_input, task_key)
def submit_calculate_grades_csv(request, course_key):
"""
AlreadyRunningError is raised if the course's grades are already being updated.
"""
task_type = 'grade_course'
task_class = calculate_grades_csv
task_input = {}
task_key = ""
return submit_task(request, task_type, task_class, course_key, task_input, task_key)
def submit_problem_grade_report(request, course_key):
"""
Submits a task to generate a CSV grade report containing problem
values.
"""
task_type = 'grade_problems'
task_class = calculate_problem_grade_report
task_input = {}
task_key = ""
return submit_task(request, task_type, task_class, course_key, task_input, task_key)
def submit_calculate_students_features_csv(request, course_key, features):
"""
Submits a task to generate a CSV containing student profile info.
Raises AlreadyRunningError if said CSV is already being updated.
"""
task_type = 'profile_info_csv'
task_class = calculate_students_features_csv
task_input = {'features': features}
task_key = ""
return submit_task(request, task_type, task_class, course_key, task_input, task_key)
def submit_detailed_enrollment_features_csv(request, course_key): # pylint: disable=invalid-name
"""
Submits a task to generate a CSV containing detailed enrollment info.
Raises AlreadyRunningError if said CSV is already being updated.
"""
task_type = 'detailed_enrollment_report'
task_class = enrollment_report_features_csv
task_input = {}
task_key = ""
return submit_task(request, task_type, task_class, course_key, task_input, task_key)
def submit_calculate_may_enroll_csv(request, course_key, features):
"""
Submits a task to generate a CSV file containing information about
invited students who have not enrolled in a given course yet.
Raises AlreadyRunningError if said file is already being updated.
"""
task_type = 'may_enroll_info_csv'
task_class = calculate_may_enroll_csv
task_input = {'features': features}
task_key = ""
return submit_task(request, task_type, task_class, course_key, task_input, task_key)
def submit_executive_summary_report(request, course_key): # pylint: disable=invalid-name
"""
Submits a task to generate a HTML File containing the executive summary report.
Raises AlreadyRunningError if HTML File is already being updated.
"""
task_type = 'exec_summary_report'
task_class = exec_summary_report_csv
task_input = {}
task_key = ""
return submit_task(request, task_type, task_class, course_key, task_input, task_key)
def submit_proctored_exam_results_report(request, course_key, features): # pylint: disable=invalid-name
"""
Submits a task to generate a HTML File containing the executive summary report.
Raises AlreadyRunningError if HTML File is already being updated.
"""
task_type = 'proctored_exam_results_report'
task_class = proctored_exam_results_csv
task_input = {'features': features}
task_key = ""
return submit_task(request, task_type, task_class, course_key, task_input, task_key)
def submit_cohort_students(request, course_key, file_name):
"""
Request to have students cohorted in bulk.
Raises AlreadyRunningError if students are currently being cohorted.
"""
task_type = 'cohort_students'
task_class = cohort_students
task_input = {'file_name': file_name}
task_key = ""
return submit_task(request, task_type, task_class, course_key, task_input, task_key)
def generate_certificates_for_all_students(request, course_key): # pylint: disable=invalid-name
"""
Submits a task to generate certificates for all students enrolled in the course.
Raises AlreadyRunningError if certificates are currently being generated.
"""
task_type = 'generate_certificates_all_student'
task_class = generate_certificates
task_input = {}
task_key = ""
return submit_task(request, task_type, task_class, course_key, task_input, task_key)
| agpl-3.0 |
dongjoon-hyun/tensorflow | tensorflow/contrib/tensorrt/test/biasadd_matmul_test.py | 3 | 4995 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model script to test TF-TensorRT integration."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.tensorrt.test import tf_trt_integration_test_base as trt_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.platform import test
class BiasaddMatMulTest(trt_test.TfTrtIntegrationTestBase):
def _ConstOp(self, shape):
return constant_op.constant(np.random.randn(*shape), dtype=dtypes.float32)
def GetParams(self):
"""Testing conversion of BiasAdd MatMul in TF-TRT conversion."""
input_name = "input"
input_matrix_rows = 4
input_matrix_columns = 144
input_dims = [input_matrix_rows, input_matrix_columns]
output_name = "output"
g = ops.Graph()
with g.as_default():
x = array_ops.placeholder(
dtype=dtypes.float32, shape=input_dims, name=input_name)
b = self._ConstOp((input_matrix_columns, 4))
x1 = math_ops.matmul(x, b)
b = self._ConstOp((1, 4))
x1 = x1 + b
b = self._ConstOp((input_matrix_rows, 144))
x2 = self.trt_incompatible_op(x)
x2 = math_ops.matmul(x2, b, transpose_a=True)
x2 = gen_array_ops.reshape(x2, [4, -1])
x2 = self.trt_incompatible_op(x2)
b = self._ConstOp((4, input_matrix_columns))
x3 = math_ops.matmul(x, b, transpose_b=True)
b = self._ConstOp((16, input_matrix_rows))
x4 = self.trt_incompatible_op(x)
x4 = math_ops.matmul(x4, b, transpose_b=True, transpose_a=True)
x4 = gen_array_ops.reshape(x4, [4, -1])
x4 = self.trt_incompatible_op(x4)
b = self._ConstOp((input_matrix_columns, 48))
x5 = math_ops.matmul(x, b)
b = self._ConstOp((48,))
x5 = nn.bias_add(x5, b)
x5 = gen_array_ops.reshape(x5, [4, -1])
x6 = gen_array_ops.reshape(x, [4, 12, 12])
b = self._ConstOp((12,))
x6 = nn.bias_add(x6, b, data_format="NHWC")
x6 = gen_array_ops.reshape(x6, [4, -1])
x7 = gen_array_ops.reshape(x, [4, 12, 3, 4])
b = self._ConstOp((4,))
x7 = nn.bias_add(x7, b, data_format="NHWC")
x7 = gen_array_ops.reshape(x7, [4, -1])
x8 = gen_array_ops.reshape(x, [4, 12, 3, 2, 2])
b = self._ConstOp((2,))
x8 = nn.bias_add(x8, b, data_format="NHWC")
x8 = gen_array_ops.reshape(x8, [4, -1])
x9 = gen_array_ops.reshape(x, [4, 12, 3, 2, 2])
b = self._ConstOp((12,))
x9 = nn.bias_add(x9, b, data_format="NCHW")
x9 = gen_array_ops.reshape(x9, [4, -1])
x10 = gen_array_ops.reshape(x, [4, 12, 3, 4])
b = self._ConstOp((12,))
x10 = nn.bias_add(x10, b, data_format="NCHW")
x10 = gen_array_ops.reshape(x10, [4, -1])
x11 = gen_array_ops.reshape(x, [4, 12, 12])
b = self._ConstOp((12,))
x11 = nn.bias_add(x11, b, data_format="NCHW")
x11 = gen_array_ops.reshape(x11, [4, -1])
out = array_ops.concat([x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11],
axis=-1)
out = array_ops.squeeze(out, name=output_name)
return trt_test.TfTrtIntegrationTestParams(
gdef=g.as_graph_def(),
input_names=[input_name],
input_dims=[input_dims],
output_names=[output_name],
expected_output_dims=[(4, 6680)])
def GetConversionParams(self, run_params):
"""Return a ConversionParams for test."""
return super(BiasaddMatMulTest,
self).GetConversionParams(run_params)._replace(
max_batch_size=4, maximum_cached_engines=1)
def ExpectedEnginesToBuild(self, run_params):
"""Return the expected engines to build."""
return ["my_trt_op_0"]
def ShouldRunTest(self, run_params):
"""Whether to run the test."""
# TODO(aaroey): Trt 4.0 forbids conversion for tensors with rank <3 in int8
# mode, which is a bug. Re-enable this when trt library is fixed.
return not trt_test.IsQuantizationMode(run_params.precision_mode)
if __name__ == "__main__":
test.main()
| apache-2.0 |
gennad/Django-nonrel-stub-for-Google-App-Engine | django/templatetags/static.py | 233 | 2149 | from django import template
from django.utils.encoding import iri_to_uri
register = template.Library()
class PrefixNode(template.Node):
def __repr__(self):
return "<PrefixNode for %r>" % self.name
def __init__(self, varname=None, name=None):
if name is None:
raise template.TemplateSyntaxError(
"Prefix nodes must be given a name to return.")
self.varname = varname
self.name = name
@classmethod
def handle_token(cls, parser, token, name):
"""
Class method to parse prefix node and return a Node.
"""
tokens = token.contents.split()
if len(tokens) > 1 and tokens[1] != 'as':
raise template.TemplateSyntaxError(
"First argument in '%s' must be 'as'" % tokens[0])
if len(tokens) > 1:
varname = tokens[2]
else:
varname = None
return cls(varname, name)
@classmethod
def handle_simple(cls, name):
try:
from django.conf import settings
except ImportError:
prefix = ''
else:
prefix = iri_to_uri(getattr(settings, name, ''))
return prefix
def render(self, context):
prefix = self.handle_simple(self.name)
if self.varname is None:
return prefix
context[self.varname] = prefix
return ''
@register.tag
def get_static_prefix(parser, token):
"""
Populates a template variable with the static prefix,
``settings.STATIC_URL``.
Usage::
{% get_static_prefix [as varname] %}
Examples::
{% get_static_prefix %}
{% get_static_prefix as static_prefix %}
"""
return PrefixNode.handle_token(parser, token, "STATIC_URL")
@register.tag
def get_media_prefix(parser, token):
"""
Populates a template variable with the static prefix,
``settings.MEDIA_URL``.
Usage::
{% get_media_prefix [as varname] %}
Examples::
{% get_media_prefix %}
{% get_media_prefix as media_prefix %}
"""
return PrefixNode.handle_token(parser, token, "MEDIA_URL")
| bsd-3-clause |
whitergh/brainx | doc/sphinxext/only_directives.py | 15 | 2109 | #
# A pair of directives for inserting content that will only appear in
# either html or latex.
#
from docutils.nodes import Body, Element
from docutils.parsers.rst import directives
class only_base(Body, Element):
def dont_traverse(self, *args, **kwargs):
return []
class html_only(only_base):
pass
class latex_only(only_base):
pass
def run(content, node_class, state, content_offset):
text = '\n'.join(content)
node = node_class(text)
state.nested_parse(content, content_offset, node)
return [node]
def html_only_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
return run(content, html_only, state, content_offset)
def latex_only_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
return run(content, latex_only, state, content_offset)
def builder_inited(app):
if app.builder.name == 'html':
latex_only.traverse = only_base.dont_traverse
else:
html_only.traverse = only_base.dont_traverse
def setup(app):
app.add_directive('htmlonly', html_only_directive, True, (0, 0, 0))
app.add_directive('latexonly', latex_only_directive, True, (0, 0, 0))
app.add_node(html_only)
app.add_node(latex_only)
# This will *really* never see the light of day As it turns out,
# this results in "broken" image nodes since they never get
# processed, so best not to do this.
# app.connect('builder-inited', builder_inited)
# Add visit/depart methods to HTML-Translator:
def visit_perform(self, node):
pass
def depart_perform(self, node):
pass
def visit_ignore(self, node):
node.children = []
def depart_ignore(self, node):
node.children = []
app.add_node(html_only, html=(visit_perform, depart_perform))
app.add_node(html_only, latex=(visit_ignore, depart_ignore))
app.add_node(latex_only, latex=(visit_perform, depart_perform))
app.add_node(latex_only, html=(visit_ignore, depart_ignore))
| bsd-3-clause |
ericmjl/bokeh | bokeh/core/validation/decorators.py | 1 | 5686 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2020, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Provide decorators help with define Bokeh validation checks.
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
from functools import partial
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'error',
'warning',
)
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
def _validator(code_or_name, validator_type):
''' Internal shared implementation to handle both error and warning
validation checks.
Args:
code code_or_name (int or str) : a defined error code or custom message
validator_type (str) : either "error" or "warning"
Returns:
validation decorator
'''
if validator_type == "error":
from .errors import codes
from .errors import EXT
elif validator_type == "warning":
from .warnings import codes
from .warnings import EXT
else:
pass # TODO (bev) ValueError?
def decorator(func):
def wrapper(*args, **kw):
extra = func(*args, **kw)
if extra is None: return []
if isinstance(code_or_name, str):
code = EXT
name = codes[code][0] + ":" + code_or_name
else:
code = code_or_name
name = codes[code][0]
text = codes[code][1]
return [(code, name, text, extra)]
wrapper.validator_type = validator_type
return wrapper
return decorator
_error = partial(_validator, validator_type="error")
_warning = partial(_validator, validator_type="warning")
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
def error(code_or_name):
''' Decorator to mark a validator method for a Bokeh error condition
Args:
code_or_name (int or str) : a code from ``bokeh.validation.errors`` or a string label for a custom check
Returns:
callable : decorator for Bokeh model methods
The function that is decorated should have a name that starts with
``_check``, and return a string message in case a bad condition is
detected, and ``None`` if no bad condition is detected.
Examples:
The first example uses a numeric code for a standard error provided in
``bokeh.validation.errors``. This usage is primarily of interest to Bokeh
core developers.
.. code-block:: python
from bokeh.validation.errors import REQUIRED_RANGES
@error(REQUIRED_RANGES)
def _check_no_glyph_renderers(self):
if bad_condition: return "message"
The second example shows how a custom warning check can be implemented by
passing an arbitrary string label to the decorator. This usage is primarily
of interest to anyone extending Bokeh with their own custom models.
.. code-block:: python
@error("MY_CUSTOM_WARNING")
def _check_my_custom_warning(self):
if bad_condition: return "message"
'''
return _error(code_or_name)
def warning(code_or_name):
''' Decorator to mark a validator method for a Bokeh error condition
Args:
code_or_name (int or str) : a code from ``bokeh.validation.errors`` or a string label for a custom check
Returns:
callable : decorator for Bokeh model methods
The function that is decorated should have a name that starts with
``_check``, and return a string message in case a bad condition is
detected, and ``None`` if no bad condition is detected.
Examples:
The first example uses a numeric code for a standard warning provided in
``bokeh.validation.warnings``. This usage is primarily of interest to Bokeh
core developers.
.. code-block:: python
from bokeh.validation.warnings import MISSING_RENDERERS
@warning(MISSING_RENDERERS)
def _check_no_glyph_renderers(self):
if bad_condition: return "message"
The second example shows how a custom warning check can be implemented by
passing an arbitrary string label to the decorator. This usage is primarily
of interest to anyone extending Bokeh with their own custom models.
.. code-block:: python
@warning("MY_CUSTOM_WARNING")
def _check_my_custom_warning(self):
if bad_condition: return "message"
'''
return _warning(code_or_name)
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| bsd-3-clause |
kitanata/resume | env/lib/python2.7/site-packages/PyPDF2/xmp.py | 4 | 13640 | import re
import datetime
import decimal
from .generic import PdfObject
from xml.dom import getDOMImplementation
from xml.dom.minidom import parseString
from .utils import u_
RDF_NAMESPACE = "http://www.w3.org/1999/02/22-rdf-syntax-ns#"
DC_NAMESPACE = "http://purl.org/dc/elements/1.1/"
XMP_NAMESPACE = "http://ns.adobe.com/xap/1.0/"
PDF_NAMESPACE = "http://ns.adobe.com/pdf/1.3/"
XMPMM_NAMESPACE = "http://ns.adobe.com/xap/1.0/mm/"
# What is the PDFX namespace, you might ask? I might ask that too. It's
# a completely undocumented namespace used to place "custom metadata"
# properties, which are arbitrary metadata properties with no semantic or
# documented meaning. Elements in the namespace are key/value-style storage,
# where the element name is the key and the content is the value. The keys
# are transformed into valid XML identifiers by substituting an invalid
# identifier character with \u2182 followed by the unicode hex ID of the
# original character. A key like "my car" is therefore "my\u21820020car".
#
# \u2182, in case you're wondering, is the unicode character
# \u{ROMAN NUMERAL TEN THOUSAND}, a straightforward and obvious choice for
# escaping characters.
#
# Intentional users of the pdfx namespace should be shot on sight. A
# custom data schema and sensical XML elements could be used instead, as is
# suggested by Adobe's own documentation on XMP (under "Extensibility of
# Schemas").
#
# Information presented here on the /pdfx/ schema is a result of limited
# reverse engineering, and does not constitute a full specification.
PDFX_NAMESPACE = "http://ns.adobe.com/pdfx/1.3/"
iso8601 = re.compile("""
(?P<year>[0-9]{4})
(-
(?P<month>[0-9]{2})
(-
(?P<day>[0-9]+)
(T
(?P<hour>[0-9]{2}):
(?P<minute>[0-9]{2})
(:(?P<second>[0-9]{2}(.[0-9]+)?))?
(?P<tzd>Z|[-+][0-9]{2}:[0-9]{2})
)?
)?
)?
""", re.VERBOSE)
class XmpInformation(PdfObject):
"""
An object that represents Adobe XMP metadata.
Usually accessed by :meth:`getXmpMetadata()<PyPDF2.PdfFileReader.getXmpMetadata>`
"""
def __init__(self, stream):
self.stream = stream
docRoot = parseString(self.stream.getData())
self.rdfRoot = docRoot.getElementsByTagNameNS(RDF_NAMESPACE, "RDF")[0]
self.cache = {}
def writeToStream(self, stream, encryption_key):
self.stream.writeToStream(stream, encryption_key)
def getElement(self, aboutUri, namespace, name):
for desc in self.rdfRoot.getElementsByTagNameNS(RDF_NAMESPACE, "Description"):
if desc.getAttributeNS(RDF_NAMESPACE, "about") == aboutUri:
attr = desc.getAttributeNodeNS(namespace, name)
if attr != None:
yield attr
for element in desc.getElementsByTagNameNS(namespace, name):
yield element
def getNodesInNamespace(self, aboutUri, namespace):
for desc in self.rdfRoot.getElementsByTagNameNS(RDF_NAMESPACE, "Description"):
if desc.getAttributeNS(RDF_NAMESPACE, "about") == aboutUri:
for i in range(desc.attributes.length):
attr = desc.attributes.item(i)
if attr.namespaceURI == namespace:
yield attr
for child in desc.childNodes:
if child.namespaceURI == namespace:
yield child
def _getText(self, element):
text = ""
for child in element.childNodes:
if child.nodeType == child.TEXT_NODE:
text += child.data
return text
def _converter_string(value):
return value
def _converter_date(value):
m = iso8601.match(value)
year = int(m.group("year"))
month = int(m.group("month") or "1")
day = int(m.group("day") or "1")
hour = int(m.group("hour") or "0")
minute = int(m.group("minute") or "0")
second = decimal.Decimal(m.group("second") or "0")
seconds = second.to_integral(decimal.ROUND_FLOOR)
milliseconds = (second - seconds) * 1000000
tzd = m.group("tzd") or "Z"
dt = datetime.datetime(year, month, day, hour, minute, seconds, milliseconds)
if tzd != "Z":
tzd_hours, tzd_minutes = [int(x) for x in tzd.split(":")]
tzd_hours *= -1
if tzd_hours < 0:
tzd_minutes *= -1
dt = dt + datetime.timedelta(hours=tzd_hours, minutes=tzd_minutes)
return dt
_test_converter_date = staticmethod(_converter_date)
def _getter_bag(namespace, name, converter):
def get(self):
cached = self.cache.get(namespace, {}).get(name)
if cached:
return cached
retval = []
for element in self.getElement("", namespace, name):
bags = element.getElementsByTagNameNS(RDF_NAMESPACE, "Bag")
if len(bags):
for bag in bags:
for item in bag.getElementsByTagNameNS(RDF_NAMESPACE, "li"):
value = self._getText(item)
value = converter(value)
retval.append(value)
ns_cache = self.cache.setdefault(namespace, {})
ns_cache[name] = retval
return retval
return get
def _getter_seq(namespace, name, converter):
def get(self):
cached = self.cache.get(namespace, {}).get(name)
if cached:
return cached
retval = []
for element in self.getElement("", namespace, name):
seqs = element.getElementsByTagNameNS(RDF_NAMESPACE, "Seq")
if len(seqs):
for seq in seqs:
for item in seq.getElementsByTagNameNS(RDF_NAMESPACE, "li"):
value = self._getText(item)
value = converter(value)
retval.append(value)
else:
value = converter(self._getText(element))
retval.append(value)
ns_cache = self.cache.setdefault(namespace, {})
ns_cache[name] = retval
return retval
return get
def _getter_langalt(namespace, name, converter):
def get(self):
cached = self.cache.get(namespace, {}).get(name)
if cached:
return cached
retval = {}
for element in self.getElement("", namespace, name):
alts = element.getElementsByTagNameNS(RDF_NAMESPACE, "Alt")
if len(alts):
for alt in alts:
for item in alt.getElementsByTagNameNS(RDF_NAMESPACE, "li"):
value = self._getText(item)
value = converter(value)
retval[item.getAttribute("xml:lang")] = value
else:
retval["x-default"] = converter(self._getText(element))
ns_cache = self.cache.setdefault(namespace, {})
ns_cache[name] = retval
return retval
return get
def _getter_single(namespace, name, converter):
def get(self):
cached = self.cache.get(namespace, {}).get(name)
if cached:
return cached
value = None
for element in self.getElement("", namespace, name):
if element.nodeType == element.ATTRIBUTE_NODE:
value = element.nodeValue
else:
value = self._getText(element)
break
if value != None:
value = converter(value)
ns_cache = self.cache.setdefault(namespace, {})
ns_cache[name] = value
return value
return get
dc_contributor = property(_getter_bag(DC_NAMESPACE, "contributor", _converter_string))
"""
Contributors to the resource (other than the authors). An unsorted
array of names.
"""
dc_coverage = property(_getter_single(DC_NAMESPACE, "coverage", _converter_string))
"""
Text describing the extent or scope of the resource.
"""
dc_creator = property(_getter_seq(DC_NAMESPACE, "creator", _converter_string))
"""
A sorted array of names of the authors of the resource, listed in order
of precedence.
"""
dc_date = property(_getter_seq(DC_NAMESPACE, "date", _converter_date))
"""
A sorted array of dates (datetime.datetime instances) of signifigance to
the resource. The dates and times are in UTC.
"""
dc_description = property(_getter_langalt(DC_NAMESPACE, "description", _converter_string))
"""
A language-keyed dictionary of textual descriptions of the content of the
resource.
"""
dc_format = property(_getter_single(DC_NAMESPACE, "format", _converter_string))
"""
The mime-type of the resource.
"""
dc_identifier = property(_getter_single(DC_NAMESPACE, "identifier", _converter_string))
"""
Unique identifier of the resource.
"""
dc_language = property(_getter_bag(DC_NAMESPACE, "language", _converter_string))
"""
An unordered array specifying the languages used in the resource.
"""
dc_publisher = property(_getter_bag(DC_NAMESPACE, "publisher", _converter_string))
"""
An unordered array of publisher names.
"""
dc_relation = property(_getter_bag(DC_NAMESPACE, "relation", _converter_string))
"""
An unordered array of text descriptions of relationships to other
documents.
"""
dc_rights = property(_getter_langalt(DC_NAMESPACE, "rights", _converter_string))
"""
A language-keyed dictionary of textual descriptions of the rights the
user has to this resource.
"""
dc_source = property(_getter_single(DC_NAMESPACE, "source", _converter_string))
"""
Unique identifier of the work from which this resource was derived.
"""
dc_subject = property(_getter_bag(DC_NAMESPACE, "subject", _converter_string))
"""
An unordered array of descriptive phrases or keywrods that specify the
topic of the content of the resource.
"""
dc_title = property(_getter_langalt(DC_NAMESPACE, "title", _converter_string))
"""
A language-keyed dictionary of the title of the resource.
"""
dc_type = property(_getter_bag(DC_NAMESPACE, "type", _converter_string))
"""
An unordered array of textual descriptions of the document type.
"""
pdf_keywords = property(_getter_single(PDF_NAMESPACE, "Keywords", _converter_string))
"""
An unformatted text string representing document keywords.
"""
pdf_pdfversion = property(_getter_single(PDF_NAMESPACE, "PDFVersion", _converter_string))
"""
The PDF file version, for example 1.0, 1.3.
"""
pdf_producer = property(_getter_single(PDF_NAMESPACE, "Producer", _converter_string))
"""
The name of the tool that created the PDF document.
"""
xmp_createDate = property(_getter_single(XMP_NAMESPACE, "CreateDate", _converter_date))
"""
The date and time the resource was originally created. The date and
time are returned as a UTC datetime.datetime object.
"""
xmp_modifyDate = property(_getter_single(XMP_NAMESPACE, "ModifyDate", _converter_date))
"""
The date and time the resource was last modified. The date and time
are returned as a UTC datetime.datetime object.
"""
xmp_metadataDate = property(_getter_single(XMP_NAMESPACE, "MetadataDate", _converter_date))
"""
The date and time that any metadata for this resource was last
changed. The date and time are returned as a UTC datetime.datetime
object.
"""
xmp_creatorTool = property(_getter_single(XMP_NAMESPACE, "CreatorTool", _converter_string))
"""
The name of the first known tool used to create the resource.
"""
xmpmm_documentId = property(_getter_single(XMPMM_NAMESPACE, "DocumentID", _converter_string))
"""
The common identifier for all versions and renditions of this resource.
"""
xmpmm_instanceId = property(_getter_single(XMPMM_NAMESPACE, "InstanceID", _converter_string))
"""
An identifier for a specific incarnation of a document, updated each
time a file is saved.
"""
def custom_properties(self):
if not hasattr(self, "_custom_properties"):
self._custom_properties = {}
for node in self.getNodesInNamespace("", PDFX_NAMESPACE):
key = node.localName
while True:
# see documentation about PDFX_NAMESPACE earlier in file
idx = key.find(u_("\u2182"))
if idx == -1:
break
key = key[:idx] + chr(int(key[idx+1:idx+5], base=16)) + key[idx+5:]
if node.nodeType == node.ATTRIBUTE_NODE:
value = node.nodeValue
else:
value = self._getText(node)
self._custom_properties[key] = value
return self._custom_properties
custom_properties = property(custom_properties)
"""
Retrieves custom metadata properties defined in the undocumented pdfx
metadata schema.
:return: a dictionary of key/value items for custom metadata properties.
:rtype: dict
"""
| mit |
log2timeline/dfvfs | dfvfs/analyzer/ntfs_analyzer_helper.py | 2 | 1186 | # -*- coding: utf-8 -*-
"""The NTFS format analyzer helper implementation."""
from dfvfs.analyzer import analyzer
from dfvfs.analyzer import analyzer_helper
from dfvfs.analyzer import specification
from dfvfs.lib import definitions
class NTFSAnalyzerHelper(analyzer_helper.AnalyzerHelper):
"""NTFS analyzer helper."""
FORMAT_CATEGORIES = frozenset([
definitions.FORMAT_CATEGORY_FILE_SYSTEM])
TYPE_INDICATOR = definitions.TYPE_INDICATOR_NTFS
def GetFormatSpecification(self):
"""Retrieves the format specification.
Returns:
FormatSpecification: format specification or None if the format cannot
be defined by a specification object.
"""
format_specification = specification.FormatSpecification(
self.type_indicator)
# NTFS file system signature.
format_specification.AddNewSignature(b'NTFS ', offset=3)
return format_specification
def IsEnabled(self):
"""Determines if the analyzer helper is enabled.
Returns:
bool: True if the analyzer helper is enabled.
"""
return definitions.PREFERRED_NTFS_BACK_END == self.TYPE_INDICATOR
analyzer.Analyzer.RegisterHelper(NTFSAnalyzerHelper())
| apache-2.0 |
gmimano/commcaretest | corehq/apps/hqmedia/controller.py | 2 | 3024 | from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_noop
class BaseMultimediaUploadController(object):
"""
Media type is the user-facing term for the type of media that the uploader is uploading
"""
media_type = None
uploader_type = None
is_multi_file = False
errors_template = "hqmedia/uploader/errors.html"
def __init__(self, slug, destination):
self.slug = slug
self.destination = destination
@property
def licensing_params(self):
return ['shared', 'license', 'author', 'attribution-notes']
@property
def upload_params(self):
"""
Extra parameters that get sent to the processor once the file is uploaded.
"""
return {}
@property
def supported_files(self):
"""
A list of dicts of supported file extensions by the YUI Uploader widget.
"""
raise NotImplementedError("You must specify a list of supported files for this uploader.")
@property
def processing_url(self):
from corehq.apps.hqmedia.views import MultimediaUploadStatusView
return reverse(MultimediaUploadStatusView.name)
class MultimediaBulkUploadController(BaseMultimediaUploadController):
is_multi_file = True
uploader_type = "bulk"
media_type = ugettext_noop("zip")
queue_template = "hqmedia/uploader/queue_multi.html"
status_template = "hqmedia/uploader/status_multi.html"
details_template = "hqmedia/uploader/details_multi.html"
@property
def supported_files(self):
return [
{
'description': 'Zip',
'extensions': '*.zip',
},
]
class BaseMultimediaFileUploadController(BaseMultimediaUploadController):
uploader_type = "file"
queue_template = "hqmedia/uploader/queue_single.html"
class MultimediaImageUploadController(BaseMultimediaFileUploadController):
media_type = ugettext_noop("image")
existing_file_template = "hqmedia/uploader/preview_image_single.html"
@property
def supported_files(self):
return [
{
'description': 'Images',
'extensions': '*.jpg;*.png;*.gif',
},
]
class MultimediaAudioUploadController(BaseMultimediaFileUploadController):
media_type = ugettext_noop("audio")
existing_file_template = "hqmedia/uploader/preview_audio_single.html"
@property
def supported_files(self):
return [
{
'description': 'Audio',
'extensions': '*.mp3;*.wav',
},
]
class MultimediaVideoUploadController(BaseMultimediaFileUploadController):
media_type = ugettext_noop("video")
existing_file_template = "hqmedia/uploader/preview_video_single.html"
@property
def supported_files(self):
return [
{
'description': 'Video',
'extensions': '*.3gp',
},
]
| bsd-3-clause |
atsaki/ansible-modules-extras | monitoring/librato_annotation.py | 138 | 5556 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (C) Seth Edwards, 2014
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = '''
---
module: librato_annotation
short_description: create an annotation in librato
description:
- Create an annotation event on the given annotation stream :name. If the annotation stream does not exist, it will be created automatically
version_added: "1.6"
author: "Seth Edwards (@sedward)"
requirements: []
options:
user:
description:
- Librato account username
required: true
api_key:
description:
- Librato account api key
required: true
name:
description:
- The annotation stream name
- If the annotation stream does not exist, it will be created automatically
required: false
title:
description:
- The title of an annotation is a string and may contain spaces
- The title should be a short, high-level summary of the annotation e.g. v45 Deployment
required: true
source:
description:
- A string which describes the originating source of an annotation when that annotation is tracked across multiple members of a population
required: false
description:
description:
- The description contains extra meta-data about a particular annotation
- The description should contain specifics on the individual annotation e.g. Deployed 9b562b2 shipped new feature foo!
required: false
start_time:
description:
- The unix timestamp indicating the the time at which the event referenced by this annotation started
required: false
end_time:
description:
- The unix timestamp indicating the the time at which the event referenced by this annotation ended
- For events that have a duration, this is a useful way to annotate the duration of the event
required: false
links:
description:
- See examples
required: true
'''
EXAMPLES = '''
# Create a simple annotation event with a source
- librato_annotation:
user: user@example.com
api_key: XXXXXXXXXXXXXXXXX
title: 'App Config Change'
source: 'foo.bar'
description: 'This is a detailed description of the config change'
# Create an annotation that includes a link
- librato_annotation:
user: user@example.com
api_key: XXXXXXXXXXXXXXXXXX
name: 'code.deploy'
title: 'app code deploy'
description: 'this is a detailed description of a deployment'
links:
- { rel: 'example', href: 'http://www.example.com/deploy' }
# Create an annotation with a start_time and end_time
- librato_annotation:
user: user@example.com
api_key: XXXXXXXXXXXXXXXXXX
name: 'maintenance'
title: 'Maintenance window'
description: 'This is a detailed description of maintenance'
start_time: 1395940006
end_time: 1395954406
'''
def post_annotation(module):
user = module.params['user']
api_key = module.params['api_key']
name = module.params['name']
title = module.params['title']
url = 'https://metrics-api.librato.com/v1/annotations/%s' % name
params = {}
params['title'] = title
if module.params['source'] != None:
params['source'] = module.params['source']
if module.params['description'] != None:
params['description'] = module.params['description']
if module.params['start_time'] != None:
params['start_time'] = module.params['start_time']
if module.params['end_time'] != None:
params['end_time'] = module.params['end_time']
if module.params['links'] != None:
params['links'] = module.params['links']
json_body = module.jsonify(params)
headers = {}
headers['Content-Type'] = 'application/json'
# Hack send parameters the way fetch_url wants them
module.params['url_username'] = user
module.params['url_password'] = api_key
response, info = fetch_url(module, url, data=json_body, headers=headers)
if info['status'] != 200:
module.fail_json(msg="Request Failed", reason=e.reason)
response = response.read()
module.exit_json(changed=True, annotation=response)
def main():
module = AnsibleModule(
argument_spec = dict(
user = dict(required=True),
api_key = dict(required=True),
name = dict(required=False),
title = dict(required=True),
source = dict(required=False),
description = dict(required=False),
start_time = dict(required=False, default=None, type='int'),
end_time = dict(require=False, default=None, type='int'),
links = dict(type='list')
)
)
post_annotation(module)
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
if __name__ == '__main__':
main()
| gpl-3.0 |
enapps/enapps-openerp-server | openerp/addons/base/res/wizard/partner_wizard_massmail.py | 14 | 3567 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from osv import fields, osv
import re
import logging
_logger = logging.getLogger(__name__)
class partner_massmail_wizard(osv.osv_memory):
""" Mass Mailing """
_name = "partner.massmail.wizard"
_description = "Mass Mailing"
_columns = {
'email_from': fields.char("Sender's email", size=256, required=True),
'subject': fields.char('Subject', size=256,required=True),
'text': fields.text('Message',required=True),
}
def mass_mail_send(self, cr, uid, ids, context):
"""Send the given mail to all partners whose ids
are present in ``context['active_ids']``, to
all addresses with an email set.
:param dict context: ``context['active_ids']``
should contain the list of
ids of the partners who should
receive the mail.
"""
nbr = 0
partner_pool = self.pool.get('res.partner')
data = self.browse(cr, uid, ids[0], context=context)
event_pool = self.pool.get('res.partner.event')
assert context['active_model'] == 'res.partner', 'This wizard must be started on a list of Partners'
active_ids = context.get('active_ids', [])
partners = partner_pool.browse(cr, uid, active_ids, context)
subtype = 'plain'
if re.search('(<(pre)|[pubi].*>)', data.text):
subtype = 'html'
ir_mail_server = self.pool.get('ir.mail_server')
emails_seen = set()
for partner in partners:
for adr in partner.address:
if adr.email and not adr.email in emails_seen:
try:
emails_seen.add(adr.email)
name = adr.name or partner.name
to = '"%s" <%s>' % (name, adr.email)
msg = ir_mail_server.build_email(data.email_from, [to], data.subject, data.text, subtype=subtype)
if ir_mail_server.send_email(cr, uid, msg):
nbr += 1
except Exception:
#ignore failed deliveries, will be logged anyway
pass
event_pool.create(cr, uid,
{'name': 'Email(s) sent through mass mailing',
'partner_id': partner.id,
'description': data.text })
_logger.info('Mass-mailing wizard sent %s emails', nbr)
return {'email_sent': nbr}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
lorenzo-desantis/mne-python | mne/decoding/csp.py | 6 | 21527 | # Authors: Romain Trachel <trachelr@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Alexandre Barachant <alexandre.barachant@gmail.com>
#
# License: BSD (3-clause)
import copy as cp
import warnings
import numpy as np
from scipy import linalg
from .mixin import TransformerMixin
from ..cov import _regularized_covariance
class CSP(TransformerMixin):
"""M/EEG signal decomposition using the Common Spatial Patterns (CSP).
This object can be used as a supervised decomposition to estimate
spatial filters for feature extraction in a 2 class decoding problem.
See [1].
Parameters
----------
n_components : int (default 4)
The number of components to decompose M/EEG signals.
This number should be set by cross-validation.
reg : float | str | None (default None)
if not None, allow regularization for covariance estimation
if float, shrinkage covariance is used (0 <= shrinkage <= 1).
if str, optimal shrinkage using Ledoit-Wolf Shrinkage ('ledoit_wolf')
or Oracle Approximating Shrinkage ('oas').
log : bool (default True)
If true, apply log to standardize the features.
If false, features are just z-scored.
Attributes
----------
filters_ : ndarray, shape (n_channels, n_channels)
If fit, the CSP components used to decompose the data, else None.
patterns_ : ndarray, shape (n_channels, n_channels)
If fit, the CSP patterns used to restore M/EEG signals, else None.
mean_ : ndarray, shape (n_channels,)
If fit, the mean squared power for each component.
std_ : ndarray, shape (n_channels,)
If fit, the std squared power for each component.
References
----------
[1] Zoltan J. Koles. The quantitative extraction and topographic mapping
of the abnormal components in the clinical EEG. Electroencephalography
and Clinical Neurophysiology, 79(6):440--447, December 1991.
"""
def __init__(self, n_components=4, reg=None, log=True):
"""Init of CSP."""
self.n_components = n_components
if reg == 'lws':
warnings.warn('`lws` has been deprecated for the `reg`'
' argument. It will be removed in 0.11.'
' Use `ledoit_wolf` instead.', DeprecationWarning)
reg = 'ledoit_wolf'
self.reg = reg
self.log = log
self.filters_ = None
self.patterns_ = None
self.mean_ = None
self.std_ = None
def fit(self, epochs_data, y):
"""Estimate the CSP decomposition on epochs.
Parameters
----------
epochs_data : ndarray, shape (n_epochs, n_channels, n_times)
The data to estimate the CSP on.
y : array, shape (n_epochs,)
The class for each epoch.
Returns
-------
self : instance of CSP
Returns the modified instance.
"""
if not isinstance(epochs_data, np.ndarray):
raise ValueError("epochs_data should be of type ndarray (got %s)."
% type(epochs_data))
epochs_data = np.atleast_3d(epochs_data)
# check number of epochs
if epochs_data.shape[0] != len(y):
raise ValueError("n_epochs must be the same for epochs_data and y")
classes = np.unique(y)
if len(classes) != 2:
raise ValueError("More than two different classes in the data.")
# concatenate epochs
class_1 = np.transpose(epochs_data[y == classes[0]],
[1, 0, 2]).reshape(epochs_data.shape[1], -1)
class_2 = np.transpose(epochs_data[y == classes[1]],
[1, 0, 2]).reshape(epochs_data.shape[1], -1)
cov_1 = _regularized_covariance(class_1, reg=self.reg)
cov_2 = _regularized_covariance(class_2, reg=self.reg)
# then fit on covariance
self._fit(cov_1, cov_2)
pick_filters = self.filters_[:self.n_components]
X = np.asarray([np.dot(pick_filters, e) for e in epochs_data])
# compute features (mean band power)
X = (X ** 2).mean(axis=-1)
# To standardize features
self.mean_ = X.mean(axis=0)
self.std_ = X.std(axis=0)
return self
def _fit(self, cov_a, cov_b):
"""Aux Function (modifies cov_a and cov_b in-place)."""
cov_a /= np.trace(cov_a)
cov_b /= np.trace(cov_b)
# computes the eigen values
lambda_, u = linalg.eigh(cov_a + cov_b)
# sort them
ind = np.argsort(lambda_)[::-1]
lambda2_ = lambda_[ind]
u = u[:, ind]
p = np.dot(np.sqrt(linalg.pinv(np.diag(lambda2_))), u.T)
# Compute the generalized eigen value problem
w_a = np.dot(np.dot(p, cov_a), p.T)
w_b = np.dot(np.dot(p, cov_b), p.T)
# and solve it
vals, vecs = linalg.eigh(w_a, w_b)
# sort vectors by discriminative power using eigen values
ind = np.argsort(np.maximum(vals, 1. / vals))[::-1]
vecs = vecs[:, ind]
# and project
w = np.dot(vecs.T, p)
self.filters_ = w
self.patterns_ = linalg.pinv(w).T
def transform(self, epochs_data, y=None):
"""Estimate epochs sources given the CSP filters.
Parameters
----------
epochs_data : array, shape (n_epochs, n_channels, n_times)
The data.
y : None
Not used.
Returns
-------
X : ndarray of shape (n_epochs, n_sources)
The CSP features averaged over time.
"""
if not isinstance(epochs_data, np.ndarray):
raise ValueError("epochs_data should be of type ndarray (got %s)."
% type(epochs_data))
if self.filters_ is None:
raise RuntimeError('No filters available. Please first fit CSP '
'decomposition.')
pick_filters = self.filters_[:self.n_components]
X = np.asarray([np.dot(pick_filters, e) for e in epochs_data])
# compute features (mean band power)
X = (X ** 2).mean(axis=-1)
if self.log:
X = np.log(X)
else:
X -= self.mean_
X /= self.std_
return X
def plot_patterns(self, info, components=None, ch_type=None, layout=None,
vmin=None, vmax=None, cmap='RdBu_r', sensors=True,
colorbar=True, scale=None, scale_time=1, unit=None,
res=64, size=1, cbar_fmt='%3.1f',
name_format='CSP%01d', proj=False, show=True,
show_names=False, title=None, mask=None,
mask_params=None, outlines='head', contours=6,
image_interp='bilinear', average=None, head_pos=None):
"""Plot topographic patterns of CSP components.
The CSP patterns explain how the measured data was generated
from the neural sources (a.k.a. the forward model).
Parameters
----------
info : instance of Info
Info dictionary of the epochs used to fit CSP.
If not possible, consider using ``create_info``.
components : float | array of floats | None.
The CSP patterns to plot. If None, n_components will be shown.
ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None
The channel type to plot. For 'grad', the gradiometers are
collected in pairs and the RMS for each pair is plotted.
If None, then channels are chosen in the order given above.
layout : None | Layout
Layout instance specifying sensor positions (does not need to be
specified for Neuromag data). If possible, the correct layout file
is inferred from the data; if no appropriate layout file was found
the layout is automatically generated from the sensor locations.
vmin : float | callable
The value specfying the lower bound of the color range.
If None, and vmax is None, -vmax is used. Else np.min(data).
If callable, the output equals vmin(data).
vmax : float | callable
The value specfying the upper bound of the color range.
If None, the maximum absolute value is used. If vmin is None,
but vmax is not, defaults to np.min(data).
If callable, the output equals vmax(data).
cmap : matplotlib colormap
Colormap. For magnetometers and eeg defaults to 'RdBu_r', else
'Reds'.
sensors : bool | str
Add markers for sensor locations to the plot. Accepts matplotlib
plot format string (e.g., 'r+' for red plusses). If True,
a circle will be used (via .add_artist). Defaults to True.
colorbar : bool
Plot a colorbar.
scale : dict | float | None
Scale the data for plotting. If None, defaults to 1e6 for eeg, 1e13
for grad and 1e15 for mag.
scale_time : float | None
Scale the time labels. Defaults to 1.
unit : dict | str | None
The unit of the channel type used for colorbar label. If
scale is None the unit is automatically determined.
res : int
The resolution of the topomap image (n pixels along each side).
size : float
Side length per topomap in inches.
cbar_fmt : str
String format for colorbar values.
name_format : str
String format for topomap values. Defaults to "CSP%01d"
proj : bool | 'interactive'
If true SSP projections are applied before display.
If 'interactive', a check box for reversible selection
of SSP projection vectors will be show.
show : bool
Show figure if True.
show_names : bool | callable
If True, show channel names on top of the map. If a callable is
passed, channel names will be formatted using the callable; e.g.,
to delete the prefix 'MEG ' from all channel names, pass the
function lambda x: x.replace('MEG ', ''). If `mask` is not None,
only significant sensors will be shown.
title : str | None
Title. If None (default), no title is displayed.
mask : ndarray of bool, shape (n_channels, n_times) | None
The channels to be marked as significant at a given time point.
Indicies set to `True` will be considered. Defaults to None.
mask_params : dict | None
Additional plotting parameters for plotting significant sensors.
Default (None) equals::
dict(marker='o', markerfacecolor='w', markeredgecolor='k',
linewidth=0, markersize=4)
outlines : 'head' | 'skirt' | dict | None
The outlines to be drawn. If 'head', the default head scheme will
be drawn. If 'skirt' the head scheme will be drawn, but sensors are
allowed to be plotted outside of the head circle. If dict, each key
refers to a tuple of x and y positions, the values in 'mask_pos'
will serve as image mask, and the 'autoshrink' (bool) field will
trigger automated shrinking of the positions due to points outside
the outline. Alternatively, a matplotlib patch object can be passed
for advanced masking options, either directly or as a function that
returns patches (required for multi-axis plots). If None, nothing
will be drawn. Defaults to 'head'.
contours : int | False | None
The number of contour lines to draw.
If 0, no contours will be drawn.
image_interp : str
The image interpolation to be used.
All matplotlib options are accepted.
average : float | None
The time window around a given time to be used for averaging
(seconds). For example, 0.01 would translate into window that
starts 5 ms before and ends 5 ms after a given time point.
Defaults to None, which means no averaging.
head_pos : dict | None
If None (default), the sensors are positioned such that they span
the head circle. If dict, can have entries 'center' (tuple) and
'scale' (tuple) for what the center and scale of the head
should be relative to the electrode locations.
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure.
"""
from .. import EvokedArray
if components is None:
components = np.arange(self.n_components)
# set sampling frequency to have 1 component per time point
info = cp.deepcopy(info)
info['sfreq'] = 1.
# create an evoked
patterns = EvokedArray(self.patterns_.T, info, tmin=0)
# the call plot_topomap
return patterns.plot_topomap(times=components, ch_type=ch_type,
layout=layout, vmin=vmin, vmax=vmax,
cmap=cmap, colorbar=colorbar, res=res,
cbar_fmt=cbar_fmt, sensors=sensors,
scale=1, scale_time=1, unit='a.u.',
time_format=name_format, size=size,
show_names=show_names,
mask_params=mask_params,
mask=mask, outlines=outlines,
contours=contours,
image_interp=image_interp, show=show,
head_pos=head_pos)
def plot_filters(self, info, components=None, ch_type=None, layout=None,
vmin=None, vmax=None, cmap='RdBu_r', sensors=True,
colorbar=True, scale=None, scale_time=1, unit=None,
res=64, size=1, cbar_fmt='%3.1f',
name_format='CSP%01d', proj=False, show=True,
show_names=False, title=None, mask=None,
mask_params=None, outlines='head', contours=6,
image_interp='bilinear', average=None, head_pos=None):
"""Plot topographic filters of CSP components.
The CSP filters are used to extract discriminant neural sources from
the measured data (a.k.a. the backward model).
Parameters
----------
info : instance of Info
Info dictionary of the epochs used to fit CSP.
If not possible, consider using ``create_info``.
components : float | array of floats | None.
The CSP patterns to plot. If None, n_components will be shown.
ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None
The channel type to plot. For 'grad', the gradiometers are
collected in pairs and the RMS for each pair is plotted.
If None, then channels are chosen in the order given above.
layout : None | Layout
Layout instance specifying sensor positions (does not need to be
specified for Neuromag data). If possible, the correct layout file
is inferred from the data; if no appropriate layout file was found
the layout is automatically generated from the sensor locations.
vmin : float | callable
The value specfying the lower bound of the color range.
If None, and vmax is None, -vmax is used. Else np.min(data).
If callable, the output equals vmin(data).
vmax : float | callable
The value specfying the upper bound of the color range.
If None, the maximum absolute value is used. If vmin is None,
but vmax is not, defaults to np.min(data).
If callable, the output equals vmax(data).
cmap : matplotlib colormap
Colormap. For magnetometers and eeg defaults to 'RdBu_r', else
'Reds'.
sensors : bool | str
Add markers for sensor locations to the plot. Accepts matplotlib
plot format string (e.g., 'r+' for red plusses). If True,
a circle will be used (via .add_artist). Defaults to True.
colorbar : bool
Plot a colorbar.
scale : dict | float | None
Scale the data for plotting. If None, defaults to 1e6 for eeg, 1e13
for grad and 1e15 for mag.
scale_time : float | None
Scale the time labels. Defaults to 1.
unit : dict | str | None
The unit of the channel type used for colorbar label. If
scale is None the unit is automatically determined.
res : int
The resolution of the topomap image (n pixels along each side).
size : float
Side length per topomap in inches.
cbar_fmt : str
String format for colorbar values.
name_format : str
String format for topomap values. Defaults to "CSP%01d"
proj : bool | 'interactive'
If true SSP projections are applied before display.
If 'interactive', a check box for reversible selection
of SSP projection vectors will be show.
show : bool
Show figure if True.
show_names : bool | callable
If True, show channel names on top of the map. If a callable is
passed, channel names will be formatted using the callable; e.g.,
to delete the prefix 'MEG ' from all channel names, pass the
function lambda x: x.replace('MEG ', ''). If `mask` is not None,
only significant sensors will be shown.
title : str | None
Title. If None (default), no title is displayed.
mask : ndarray of bool, shape (n_channels, n_times) | None
The channels to be marked as significant at a given time point.
Indicies set to `True` will be considered. Defaults to None.
mask_params : dict | None
Additional plotting parameters for plotting significant sensors.
Default (None) equals::
dict(marker='o', markerfacecolor='w', markeredgecolor='k',
linewidth=0, markersize=4)
outlines : 'head' | 'skirt' | dict | None
The outlines to be drawn. If 'head', the default head scheme will
be drawn. If 'skirt' the head scheme will be drawn, but sensors are
allowed to be plotted outside of the head circle. If dict, each key
refers to a tuple of x and y positions, the values in 'mask_pos'
will serve as image mask, and the 'autoshrink' (bool) field will
trigger automated shrinking of the positions due to points outside
the outline. Alternatively, a matplotlib patch object can be passed
for advanced masking options, either directly or as a function that
returns patches (required for multi-axis plots). If None, nothing
will be drawn. Defaults to 'head'.
contours : int | False | None
The number of contour lines to draw.
If 0, no contours will be drawn.
image_interp : str
The image interpolation to be used.
All matplotlib options are accepted.
average : float | None
The time window around a given time to be used for averaging
(seconds). For example, 0.01 would translate into window that
starts 5 ms before and ends 5 ms after a given time point.
Defaults to None, which means no averaging.
head_pos : dict | None
If None (default), the sensors are positioned such that they span
the head circle. If dict, can have entries 'center' (tuple) and
'scale' (tuple) for what the center and scale of the head
should be relative to the electrode locations.
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure.
"""
from .. import EvokedArray
if components is None:
components = np.arange(self.n_components)
# set sampling frequency to have 1 component per time point
info = cp.deepcopy(info)
info['sfreq'] = 1.
# create an evoked
filters = EvokedArray(self.filters_, info, tmin=0)
# the call plot_topomap
return filters.plot_topomap(times=components, ch_type=ch_type,
layout=layout, vmin=vmin, vmax=vmax,
cmap=cmap, colorbar=colorbar, res=res,
cbar_fmt=cbar_fmt, sensors=sensors,
scale=1, scale_time=1, unit='a.u.',
time_format=name_format, size=size,
show_names=show_names,
mask_params=mask_params,
mask=mask, outlines=outlines,
contours=contours,
image_interp=image_interp, show=show,
head_pos=head_pos)
| bsd-3-clause |
DavidParkin/pomodoro-indicator | setup.py | 2 | 3340 | #!/usr/bin/env python
#-*- coding:utf-8 -*-
#
# Copyright 2011 malev.com.ar
#
# Author: Marcos Vanetta <marcosvanetta@gmail.com>
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of either or both of the following licenses:
#
# 1) the GNU Lesser General Public License version 3, as published by the
# Free Software Foundation; and/or
# 2) the GNU Lesser General Public License version 2.1, as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranties of
# MERCHANTABILITY, SATISFACTORY QUALITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the applicable version of the GNU Lesser General Public
# License for more details.
#
# You should have received a copy of both the GNU Lesser General Public
# License version 3 and version 2.1 along with this program. If not, see
# <http://www.gnu.org/licenses/>
#
"""Build tar.gz for pomodoro-indicator.
Needed packages to run (using Debian/Ubuntu package names):
python-appindicator 0.3.0-0ubuntu1
python-gobject 2.28.3-1ubuntu1.1
python-notify 0.1.1-2build4
python-gtk2-dev 0.1.1-2build4
"""
import os
from distutils.command.install import install
from distutils.core import setup
class CustomInstall(install):
"""Custom installation class on package files.
It copies all the files into the "PREFIX/share/PROJECTNAME" dir.
"""
def run(self):
"""Run parent install, and then save the install dir in the script."""
install.run(self)
for script in self.distribution.scripts:
script_path = os.path.join(self.install_scripts,
os.path.basename(script))
with open(script_path, 'rb') as fh:
content = fh.read()
content = content.replace('@ INSTALLED_BASE_DIR @',
self._custom_data_dir)
with open(script_path, 'wb') as fh:
fh.write(content)
def finalize_options(self):
"""Alter the installation path."""
install.finalize_options(self)
# the data path is under 'prefix'
data_dir = os.path.join(self.prefix, "share",
self.distribution.get_name())
# if we have 'root', put the building path also under it (used normally
# by pbuilder)
if self.root is None:
build_dir = data_dir
else:
build_dir = os.path.join(self.root, data_dir[1:])
# change the lib install directory so all package files go inside here
self.install_lib = build_dir
# save this custom data dir to later change the scripts
self._custom_data_dir = data_dir
setup(
name = 'pomodoro-indicator',
version = '0.0.1',
license = 'GPL-3',
author = 'Marcos Vanetta',
author_email = 'marcosvanetta@gmail.com',
description = 'Pomodoro technique app indicator.',
long_description = 'Pomodoro technique app indicator',
url = 'https://github.com/malev/pomodoro-indicator',
packages = ["pomodoro"],
package_data = {"pomodoro": ["images/*.png"]},
scripts = ["bin/pomodoro-indicator"],
cmdclass = {
'install': CustomInstall,
}
)
| gpl-3.0 |
grimmjow8/ansible | lib/ansible/modules/storage/netapp/na_cdot_user.py | 14 | 10605 | #!/usr/bin/python
# (c) 2017, NetApp, Inc
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
module: na_cdot_users
short_description: useradmin configuration and management
extends_documentation_fragment:
- netapp.ontap
version_added: '2.3'
author: Sumit Kumar (sumit4@netapp.com)
description:
- Create or destroy users.
options:
state:
description:
- Whether the specified user should exist or not.
required: true
choices: ['present', 'absent']
name:
description:
- The name of the user to manage.
required: true
application:
description:
- Applications to grant access to.
required: true
choices: ['console', 'http','ontapi','rsh','snmp','sp','ssh','telnet']
authentication_method:
description:
- Authentication method for the application.
- Not all authentication methods are valid for an application.
- Valid authentication methods for each application are as denoted in I(authentication_choices_description).
authentication_choices_description:
- password for console application
- password, domain, nsswitch, cert for http application.
- password, domain, nsswitch, cert for ontapi application.
- community for snmp application (when creating SNMPv1 and SNMPv2 users).
- usm and community for snmp application (when creating SNMPv3 users).
- password for sp application.
- password for rsh application.
- password for telnet application.
- password, publickey, domain, nsswitch for ssh application.
required: true
choices: ['community', 'password', 'publickey', 'domain', 'nsswitch', 'usm']
set_password:
description:
- Password for the user account.
- It is ignored for creating snmp users, but is required for creating non-snmp users.
- For an existing user, this value will be used as the new password.
default: None
role_name:
description:
- The name of the role.
note: required when C(state=present)
vserver:
description:
- The name of the vserver to use.
required: true
'''
EXAMPLES = """
- name: Create User
na_cdot_user:
state: present
name: SampleUser
application: ssh
authentication_method: password
set_password: apn1242183u1298u41
role_name: vsadmin
vserver: ansibleVServer
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
"""
RETURN = """
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
import ansible.module_utils.netapp as netapp_utils
HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
class NetAppCDOTUser(object):
"""
Common operations to manage users and roles.
"""
def __init__(self):
self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
self.argument_spec.update(dict(
state=dict(required=True, choices=['present', 'absent']),
name=dict(required=True, type='str'),
application=dict(required=True, type='str', choices=[
'console', 'http', 'ontapi', 'rsh',
'snmp', 'sp', 'ssh', 'telnet']),
authentication_method=dict(required=True, type='str',
choices=['community', 'password',
'publickey', 'domain',
'nsswitch', 'usm']),
set_password=dict(required=False, type='str', default=None),
role_name=dict(required=False, type='str'),
vserver=dict(required=True, type='str'),
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
required_if=[
('state', 'present', ['role_name'])
],
supports_check_mode=True
)
p = self.module.params
# set up state variables
self.state = p['state']
self.name = p['name']
self.application = p['application']
self.authentication_method = p['authentication_method']
self.set_password = p['set_password']
self.role_name = p['role_name']
self.vserver = p['vserver']
if HAS_NETAPP_LIB is False:
self.module.fail_json(msg="the python NetApp-Lib module is required")
else:
self.server = netapp_utils.setup_ontap_zapi(module=self.module)
def get_user(self):
"""
Checks if the user exists.
:return:
True if user found
False if user is not found
:rtype: bool
"""
security_login_get_iter = netapp_utils.zapi.NaElement('security-login-get-iter')
query_details = netapp_utils.zapi.NaElement.create_node_with_children(
'security-login-account-info', **{'vserver': self.vserver,
'user-name': self.name,
'application': self.application,
'authentication-method':
self.authentication_method})
query = netapp_utils.zapi.NaElement('query')
query.add_child_elem(query_details)
security_login_get_iter.add_child_elem(query)
try:
result = self.server.invoke_successfully(security_login_get_iter,
enable_tunneling=False)
if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
return True
else:
return False
except netapp_utils.zapi.NaApiError:
e = get_exception()
# Error 16034 denotes a user not being found.
if str(e.code) == "16034":
return False
else:
self.module.fail_json(msg='Error getting user %s' % self.name, exception=str(e))
def create_user(self):
user_create = netapp_utils.zapi.NaElement.create_node_with_children(
'security-login-create', **{'vserver': self.vserver,
'user-name': self.name,
'application': self.application,
'authentication-method':
self.authentication_method,
'role-name': self.role_name})
if self.set_password is not None:
user_create.add_new_child('password', self.set_password)
try:
self.server.invoke_successfully(user_create,
enable_tunneling=False)
except netapp_utils.zapi.NaApiError:
err = get_exception()
self.module.fail_json(msg='Error creating user %s' % self.name, exception=str(err))
def delete_user(self):
user_delete = netapp_utils.zapi.NaElement.create_node_with_children(
'security-login-delete', **{'vserver': self.vserver,
'user-name': self.name,
'application': self.application,
'authentication-method':
self.authentication_method})
try:
self.server.invoke_successfully(user_delete,
enable_tunneling=False)
except netapp_utils.zapi.NaApiError:
err = get_exception()
self.module.fail_json(msg='Error removing user %s' % self.name, exception=str(err))
def change_password(self):
"""
Changes the password
:return:
True if password updated
False if password is not updated
:rtype: bool
"""
self.server.set_vserver(self.vserver)
modify_password = netapp_utils.zapi.NaElement.create_node_with_children(
'security-login-modify-password', **{
'new-password': str(self.set_password),
'user-name': self.name})
try:
self.server.invoke_successfully(modify_password,
enable_tunneling=True)
except netapp_utils.zapi.NaApiError:
e = get_exception()
if str(e.code) == '13114':
return False
else:
err = get_exception()
self.module.fail_json(msg='Error setting password for user %s' % self.name, exception=str(err))
self.server.set_vserver(None)
return True
def apply(self):
property_changed = False
password_changed = False
user_exists = self.get_user()
if user_exists:
if self.state == 'absent':
property_changed = True
elif self.state == 'present':
if self.set_password is not None:
password_changed = self.change_password()
else:
if self.state == 'present':
# Check if anything needs to be updated
property_changed = True
if property_changed:
if self.module.check_mode:
pass
else:
if self.state == 'present':
if not user_exists:
self.create_user()
# Add ability to update parameters.
elif self.state == 'absent':
self.delete_user()
changed = property_changed or password_changed
self.module.exit_json(changed=changed)
def main():
v = NetAppCDOTUser()
v.apply()
if __name__ == '__main__':
main()
| gpl-3.0 |
hkawasaki/kawasaki-aio8-0 | lms/djangoapps/course_wiki/plugins/markdownedx/mdx_image.py | 151 | 2077 | #!/usr/bin/env python
'''
Image Embedding Extension for Python-Markdown
======================================
Converts lone links to embedded images, provided the file extension is allowed.
Ex:
http://www.ericfehse.net/media/img/ef/blog/django-pony.jpg
becomes
<img src="http://www.ericfehse.net/media/img/ef/blog/django-pony.jpg">
mypic.jpg becomes <img src="/MEDIA_PATH/mypic.jpg">
Requires Python-Markdown 1.6+
'''
import simplewiki.settings as settings
import markdown
try:
# Markdown 2.1.0 changed from 2.0.3. We try importing the new version first,
# but import the 2.0.3 version if it fails
from markdown.util import etree
except:
from markdown import etree
class ImageExtension(markdown.Extension):
def __init__(self, configs):
for key, value in configs:
self.setConfig(key, value)
def add_inline(self, md, name, klass, re):
pattern = klass(re)
pattern.md = md
pattern.ext = self
md.inlinePatterns.add(name, pattern, "<reference")
def extendMarkdown(self, md, md_globals):
self.add_inline(md, 'image', ImageLink,
r'^(?P<proto>([^:/?#])+://)?(?P<domain>([^/?#]*)/)?(?P<path>[^?#]*\.(?P<ext>[^?#]{3,4}))(?:\?([^#]*))?(?:#(.*))?$')
class ImageLink(markdown.inlinepatterns.Pattern):
def handleMatch(self, m):
img = etree.Element('img')
proto = m.group('proto') or "http://"
domain = m.group('domain')
path = m.group('path')
ext = m.group('ext')
# A fixer upper
if ext.lower() in settings.WIKI_IMAGE_EXTENSIONS:
if domain:
src = proto + domain + path
elif path:
# We need a nice way to source local attachments...
src = "/wiki/media/" + path + ".upload"
else:
src = ''
img.set('src', src)
return img
def makeExtension(configs=None):
return ImageExtension(configs=configs)
if __name__ == "__main__":
import doctest
doctest.testmod()
| agpl-3.0 |
MattsFleaMarket/python-for-android | python3-alpha/extra_modules/gdata/codesearch/__init__.py | 274 | 4934 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2007 Benoit Chesneau <benoitc@metavers.net>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Contains extensions to Atom objects used by Google Codesearch"""
__author__ = 'Benoit Chesneau'
import atom
import gdata
CODESEARCH_NAMESPACE='http://schemas.google.com/codesearch/2006'
CODESEARCH_TEMPLATE='{http://shema.google.com/codesearch/2006}%s'
class Match(atom.AtomBase):
""" The Google Codesearch match element """
_tag = 'match'
_namespace = CODESEARCH_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_attributes['lineNumber'] = 'line_number'
_attributes['type'] = 'type'
def __init__(self, line_number=None, type=None, extension_elements=None,
extension_attributes=None, text=None):
self.text = text
self.type = type
self.line_number = line_number
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
class File(atom.AtomBase):
""" The Google Codesearch file element"""
_tag = 'file'
_namespace = CODESEARCH_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_attributes['name'] = 'name'
def __init__(self, name=None, extension_elements=None,
extension_attributes=None, text=None):
self.text = text
self.name = name
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
class Package(atom.AtomBase):
""" The Google Codesearch package element"""
_tag = 'package'
_namespace = CODESEARCH_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_attributes['name'] = 'name'
_attributes['uri'] = 'uri'
def __init__(self, name=None, uri=None, extension_elements=None,
extension_attributes=None, text=None):
self.text = text
self.name = name
self.uri = uri
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
class CodesearchEntry(gdata.GDataEntry):
""" Google codesearch atom entry"""
_tag = gdata.GDataEntry._tag
_namespace = gdata.GDataEntry._namespace
_children = gdata.GDataEntry._children.copy()
_attributes = gdata.GDataEntry._attributes.copy()
_children['{%s}file' % CODESEARCH_NAMESPACE] = ('file', File)
_children['{%s}package' % CODESEARCH_NAMESPACE] = ('package', Package)
_children['{%s}match' % CODESEARCH_NAMESPACE] = ('match', [Match])
def __init__(self, author=None, category=None, content=None,
atom_id=None, link=None, published=None,
title=None, updated=None,
match=None,
extension_elements=None, extension_attributes=None, text=None):
gdata.GDataEntry.__init__(self, author=author, category=category,
content=content, atom_id=atom_id, link=link,
published=published, title=title,
updated=updated, text=None)
self.match = match or []
def CodesearchEntryFromString(xml_string):
"""Converts an XML string into a CodesearchEntry object.
Args:
xml_string: string The XML describing a Codesearch feed entry.
Returns:
A CodesearchEntry object corresponding to the given XML.
"""
return atom.CreateClassFromXMLString(CodesearchEntry, xml_string)
class CodesearchFeed(gdata.GDataFeed):
"""feed containing list of Google codesearch Items"""
_tag = gdata.GDataFeed._tag
_namespace = gdata.GDataFeed._namespace
_children = gdata.GDataFeed._children.copy()
_attributes = gdata.GDataFeed._attributes.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [CodesearchEntry])
def CodesearchFeedFromString(xml_string):
"""Converts an XML string into a CodesearchFeed object.
Args:
xml_string: string The XML describing a Codesearch feed.
Returns:
A CodeseartchFeed object corresponding to the given XML.
"""
return atom.CreateClassFromXMLString(CodesearchFeed, xml_string)
| apache-2.0 |
hkaj/birdy_server | core/db.py | 1 | 4104 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
import psycopg2
from flask import g
class Retriever(object):
"""
Retrieves data from the DB.
The 'fields' param accepts wildcards.
They still need to be in a list though
"""
def __init__(self, fields, table, cond=None):
super(Retriever, self).__init__()
self.fields = fields
self.table = table
self.condition = cond
def build_req(self):
req = "SELECT %s FROM %s WHERE %s;" % (', '.join(self.fields), self.table, self.condition)
req = "%s;" % req.split(' WHERE')[0] if not self.condition else req # Removes the condition part
return req
def jsonify(self, result):
json_result = []
for line in result:
dict_res = {self.fields[pos]: line[pos] for pos in range(len(self.fields))}
if 'last_update' in dict_res:
dict_res['last_update'] = dict_res['last_update'].isoformat()
if 'avatar' in dict_res:
dict_res['avatar'] = str(dict_res['avatar'])
json_result.append(dict_res)
json_result = json_result[0] if len(json_result) == 1 else json_result
return json.dumps(json_result)
def fetch(self):
cur = g.db.cursor()
req = self.build_req()
cur.execute(req)
res = self.jsonify(cur.fetchall())
cur.close()
return res
class Updater(object):
"""Updates DB data."""
def __init__(self, table, data_dict, cond=None):
super(Updater, self).__init__()
self.table = table
self.data_dict = data_dict
self.condition = cond
def build_req(self):
if 'avatar' in self.data_dict:
self.data_dict['avatar'] = psycopg2.Binary(self.data_dict['avatar'])
modified_fields = ', '.join(['"%s"=\'%s\'' % (k, v) for k, v in self.data_dict.iteritems()])
req = "UPDATE %s SET %s WHERE %s;" % (
self.table, modified_fields, self.condition)
req = re.split('WHERE')[0] if not self.condition else req # Removes the condition part
return req
def update(self):
req = self.build_req()
cur = g.db.cursor()
try:
cur.execute(req)
g.db.commit()
# we could use it to return the modified data.
# resp = Retriever(['*'], self.table, self.condition).fetch()
cur.close()
return '''{"resp": "OK"}'''
except:
return '''{"resp": "ERROR - Failed to update the values."}'''
class Deleter(object):
"""Delete lines in the DB."""
def __init__(self, table, cond):
super(Deleter, self).__init__()
self.table = table
self.condition = cond
def build_req(self):
req = "DELETE FROM %s WHERE %s" % (self.table, self.condition)
return req
def delete(self):
req = self.build_req()
cur = g.db.cursor()
try:
cur.execute(req)
g.db.commit()
cur.close()
return '''{"resp": "OK"}'''
except:
return '''{"resp": "ERROR - Failed to remove the data from the database."}'''
class Inserter(object):
"""Insert data into the DB."""
def __init__(self, table, data_dict):
super(Inserter, self).__init__()
self.table = table
self.data_dict = data_dict
def build_req(self):
if 'avatar' in self.data_dict:
self.data_dict['avatar'] = psycopg2.Binary(self.data_dict['avatar'])
req = "INSERT INTO %s(%s) VALUES (%s);" % (
self.table,
', '.join(['"%s"' % k for k in self.data_dict.keys()]),
', '.join(["'%s'" % v for v in self.data_dict.values()])
)
return req
def insert(self):
req = self.build_req()
cur = g.db.cursor()
try:
cur.execute(req)
g.db.commit()
cur.close()
return '''{"resp": "OK"}'''
except:
return '''{"resp": "ERROR - Failed to save the new data."}'''
| gpl-3.0 |
iancze/Pysplotter | test_label.py | 1 | 6392 | import matplotlib
matplotlib.use("Qt4Agg")
from matplotlib.pyplot import figure, show
from matplotlib.patches import Ellipse
import numpy as np
if 1:
fig = figure(1,figsize=(8,5))
ax = fig.add_subplot(111, autoscale_on=False, xlim=(-1,5), ylim=(-4,3))
t = np.arange(0.0, 5.0, 0.01)
s = np.cos(2*np.pi*t)
line, = ax.plot(t, s, lw=3, color='purple')
ax.annotate('arrowstyle', xy=(0, 1), xycoords='data',
xytext=(-50, 30), textcoords='offset points',
arrowprops=dict(arrowstyle="->")
)
ax.annotate('arc3', xy=(0.5, -1), xycoords='data',
xytext=(-30, -30), textcoords='offset points',
arrowprops=dict(arrowstyle="->",
connectionstyle="arc3,rad=.2")
)
ax.annotate('arc', xy=(1., 1), xycoords='data',
xytext=(-40, 30), textcoords='offset points',
arrowprops=dict(arrowstyle="->",
connectionstyle="arc,angleA=0,armA=30,rad=10"),
)
ax.annotate('arc', xy=(1.5, -1), xycoords='data',
xytext=(-40, -30), textcoords='offset points',
arrowprops=dict(arrowstyle="->",
connectionstyle="arc,angleA=0,armA=20,angleB=-90,armB=15,rad=7"),
)
ax.annotate('angle1', xy=(2., 1), xycoords='data',
xytext=(-50, 30), textcoords='offset points',
arrowprops=dict(arrowstyle="->",
connectionstyle="angle,angleA=0,angleB=90,rad=10"),
)
ax.annotate('angle2(3)', xy=(2.5, -1), xycoords='data',
xytext=(-50, -30), textcoords='offset points',
arrowprops=dict(arrowstyle="->",
connectionstyle="angle3,angleA=0,angleB=-90"),
)
ax.annotate('angle3', xy=(3., 1), xycoords='data',
xytext=(-50, 30), textcoords='offset points',
bbox=dict(boxstyle="round,rounding_size=0.2", fc="white"),
arrowprops=dict(arrowstyle="->",
connectionstyle="angle,angleA=0,angleB=90,rad=10"),
)
ax.annotate('angle4', xy=(3.5, -1), xycoords='data',
xytext=(-70, -60), textcoords='offset points',
size=20,
bbox=dict(boxstyle="round4,pad=.5", fc="0.8"),
arrowprops=dict(arrowstyle="->",
connectionstyle="angle,angleA=0,angleB=-90,rad=10"),
)
ax.annotate('angle5', xy=(4., 1), xycoords='data',
xytext=(-50, 30), textcoords='offset points',
bbox=dict(boxstyle="round", fc="0.8"),
arrowprops=dict(arrowstyle="->",
shrinkA=0, shrinkB=10,
connectionstyle="angle,angleA=0,angleB=90,rad=10"),
)
ann = ax.annotate('', xy=(4., 1.), xycoords='data',
xytext=(4.5, -1), textcoords='data',
arrowprops=dict(arrowstyle="<->",
connectionstyle="bar",
ec="k",
shrinkA=5, shrinkB=5,
)
)
def plot_more():
fig = figure(2)
fig.clf()
ax = fig.add_subplot(111, autoscale_on=False, xlim=(-1,5), ylim=(-5,3))
el = Ellipse((2, -1), 0.5, 0.5)
ax.add_patch(el)
ax.annotate('$->$', xy=(2., -1), xycoords='data',
xytext=(-150, -140), textcoords='offset points',
bbox=dict(boxstyle="round", fc="0.8"),
arrowprops=dict(arrowstyle="->",
patchB=el,
connectionstyle="angle,angleA=90,angleB=0,rad=10"),
)
ax.annotate('fancy', xy=(2., -1), xycoords='data',
xytext=(-100, 60), textcoords='offset points',
size=20,
#bbox=dict(boxstyle="round", fc="0.8"),
arrowprops=dict(arrowstyle="fancy",
fc="0.6", ec="none",
patchB=el,
connectionstyle="angle3,angleA=0,angleB=-90"),
)
ax.annotate('simple', xy=(2., -1), xycoords='data',
xytext=(100, 60), textcoords='offset points',
size=20,
#bbox=dict(boxstyle="round", fc="0.8"),
arrowprops=dict(arrowstyle="simple",
fc="0.6", ec="none",
patchB=el,
connectionstyle="arc3,rad=0.3"),
)
ax.annotate('wedge1', xy=(2., -1), xycoords='data',
xytext=(-100, -100), textcoords='offset points',
size=20,
#bbox=dict(boxstyle="round", fc="0.8"),
arrowprops=dict(arrowstyle="wedge,tail_width=0.7",
fc="0.6", ec="none",
patchB=el,
connectionstyle="arc3,rad=-0.3"),
)
ann = ax.annotate('wedge2', xy=(2., -1), xycoords='data',
xytext=(0, -45), textcoords='offset points',
size=20,
bbox=dict(boxstyle="round", fc=(1.0, 0.7, 0.7), ec=(1., .5, .5)),
arrowprops=dict(arrowstyle="wedge,tail_width=1.",
fc=(1.0, 0.7, 0.7), ec=(1., .5, .5),
patchA=None,
patchB=el,
relpos=(0.2, 0.8),
connectionstyle="arc3,rad=-0.1"),
)
ann = ax.annotate('wedge3', xy=(2., -1), xycoords='data',
xytext=(35, 0), textcoords='offset points',
size=20, va="center",
bbox=dict(boxstyle="round", fc=(1.0, 0.7, 0.7), ec="none"),
arrowprops=dict(arrowstyle="wedge,tail_width=1.",
fc=(1.0, 0.7, 0.7), ec="none",
patchA=None,
patchB=el,
relpos=(0.2, 0.5),
)
)
show() | mit |
rockerbox/kazoo | kazoo/tests/util.py | 18 | 3657 | ##############################################################################
#
# Copyright Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
import logging
import os
import time
TRAVIS = os.environ.get('TRAVIS', False)
TRAVIS_ZK_VERSION = TRAVIS and os.environ.get('ZOOKEEPER_VERSION', None)
if TRAVIS_ZK_VERSION:
if '-' in TRAVIS_ZK_VERSION:
# Ignore pre-release markers like -alpha
TRAVIS_ZK_VERSION = TRAVIS_ZK_VERSION.split('-')[0]
TRAVIS_ZK_VERSION = tuple([int(n) for n in TRAVIS_ZK_VERSION.split('.')])
class Handler(logging.Handler):
def __init__(self, *names, **kw):
logging.Handler.__init__(self)
self.names = names
self.records = []
self.setLoggerLevel(**kw)
def setLoggerLevel(self, level=1):
self.level = level
self.oldlevels = {}
def emit(self, record):
self.records.append(record)
def clear(self):
del self.records[:]
def install(self):
for name in self.names:
logger = logging.getLogger(name)
self.oldlevels[name] = logger.level
logger.setLevel(self.level)
logger.addHandler(self)
def uninstall(self):
for name in self.names:
logger = logging.getLogger(name)
logger.setLevel(self.oldlevels[name])
logger.removeHandler(self)
def __str__(self):
return '\n'.join(
[("%s %s\n %s" %
(record.name, record.levelname,
'\n'.join([line
for line in record.getMessage().split('\n')
if line.strip()])
)
)
for record in self.records])
class InstalledHandler(Handler):
def __init__(self, *names, **kw):
Handler.__init__(self, *names, **kw)
self.install()
class Wait(object):
class TimeOutWaitingFor(Exception):
"A test condition timed out"
timeout = 9
wait = .01
def __init__(self, timeout=None, wait=None, exception=None,
getnow=(lambda: time.time), getsleep=(lambda: time.sleep)):
if timeout is not None:
self.timeout = timeout
if wait is not None:
self.wait = wait
if exception is not None:
self.TimeOutWaitingFor = exception
self.getnow = getnow
self.getsleep = getsleep
def __call__(self, func=None, timeout=None, wait=None, message=None):
if func is None:
return lambda func: self(func, timeout, wait, message)
if func():
return
now = self.getnow()
sleep = self.getsleep()
if timeout is None:
timeout = self.timeout
if wait is None:
wait = self.wait
wait = float(wait)
deadline = now() + timeout
while 1:
sleep(wait)
if func():
return
if now() > deadline:
raise self.TimeOutWaitingFor(
message or
getattr(func, '__doc__') or
getattr(func, '__name__')
)
wait = Wait()
| apache-2.0 |
dataxu/ansible | test/units/modules/network/junos/test_junos_command.py | 56 | 6207 | # (c) 2017 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
try:
from lxml.etree import fromstring
except ImportError:
from xml.etree.ElementTree import fromstring
from ansible.compat.tests.mock import patch
from ansible.modules.network.junos import junos_command
from units.modules.utils import set_module_args
from .junos_module import TestJunosModule, load_fixture
RPC_CLI_MAP = {
'get-software-information': 'show version'
}
class TestJunosCommandModule(TestJunosModule):
module = junos_command
def setUp(self):
super(TestJunosCommandModule, self).setUp()
self.mock_conn = patch('ansible.module_utils.network.junos.junos.Connection')
self.conn = self.mock_conn.start()
self.mock_netconf = patch('ansible.module_utils.network.junos.junos.NetconfConnection')
self.netconf_conn = self.mock_netconf.start()
self.mock_exec_rpc = patch('ansible.modules.network.junos.junos_command.exec_rpc')
self.exec_rpc = self.mock_exec_rpc.start()
self.mock_netconf_rpc = patch('ansible.module_utils.network.common.netconf.NetconfConnection')
self.netconf_rpc = self.mock_netconf_rpc.start()
self.mock_get_connection = patch('ansible.modules.network.junos.junos_command.get_connection')
self.get_connection = self.mock_get_connection.start()
self.mock_get_capabilities = patch('ansible.modules.network.junos.junos_command.get_capabilities')
self.get_capabilities = self.mock_get_capabilities.start()
self.get_capabilities.return_value = {'network_api': 'netconf'}
def tearDown(self):
super(TestJunosCommandModule, self).tearDown()
self.mock_conn.stop()
self.mock_netconf.stop()
self.mock_get_capabilities.stop()
self.mock_netconf_rpc.stop()
self.mock_exec_rpc.stop()
self.mock_get_connection.stop()
def load_fixtures(self, commands=None, format='text', changed=False):
def load_from_file(*args, **kwargs):
element = fromstring(args[1])
if element.text:
path = str(element.text)
else:
path = RPC_CLI_MAP[str(element.tag)]
filename = path.replace(' ', '_')
filename = '%s_%s.txt' % (filename, format)
return load_fixture(filename)
self.exec_rpc.side_effect = load_from_file
def test_junos_command_simple(self):
set_module_args(dict(commands=['show version']))
result = self.execute_module()
self.assertEqual(len(result['stdout']), 1)
self.assertTrue(result['stdout'][0].startswith('Hostname:'))
def test_junos_command_multiple(self):
set_module_args(dict(commands=['show version', 'show version']))
result = self.execute_module()
self.assertEqual(len(result['stdout']), 2)
self.assertTrue(result['stdout'][0].startswith('Hostname:'))
def test_junos_command_wait_for(self):
wait_for = 'result[0] contains "Junos:"'
set_module_args(dict(commands=['show version'], wait_for=wait_for))
self.execute_module()
def test_junos_command_wait_for_fails(self):
wait_for = 'result[0] contains "test string"'
set_module_args(dict(commands=['show version'], wait_for=wait_for))
self.execute_module(failed=True)
self.assertEqual(self.exec_rpc.call_count, 10)
def test_junos_command_retries(self):
wait_for = 'result[0] contains "test string"'
set_module_args(dict(commands=['show version'], wait_for=wait_for, retries=2))
self.execute_module(failed=True)
self.assertEqual(self.exec_rpc.call_count, 2)
def test_junos_command_match_any(self):
wait_for = ['result[0] contains "Junos:"',
'result[0] contains "test string"']
set_module_args(dict(commands=['show version'], wait_for=wait_for, match='any'))
self.execute_module()
def test_junos_command_match_all(self):
wait_for = ['result[0] contains "Junos:"',
'result[0] contains "JUNOS Software Release"']
set_module_args(dict(commands=['show version'], wait_for=wait_for, match='all'))
self.execute_module()
def test_junos_command_match_all_failure(self):
wait_for = ['result[0] contains "Junos:"',
'result[0] contains "test string"']
commands = ['show version', 'show version']
set_module_args(dict(commands=commands, wait_for=wait_for, match='all'))
self.execute_module(failed=True)
def test_junos_command_simple_json(self):
set_module_args(dict(commands=['show version'], display='json'))
result = self.execute_module(format='json')
self.assertEqual(len(result['stdout']), 1)
self.assertTrue("software-information" in result['stdout'][0])
def test_junos_command_simple_rpc_text(self):
set_module_args(dict(rpcs=['get-software-information'], display='text'))
result = self.execute_module(format='text')
self.assertEqual(len(result['stdout']), 1)
self.assertTrue(result['stdout'][0].startswith('Hostname:'))
def test_junos_command_simple_rpc_json(self):
set_module_args(dict(rpcs=['get-software-information'], display='json'))
result = self.execute_module(format='json')
self.assertEqual(len(result['stdout']), 1)
self.assertTrue("software-information" in result['stdout'][0])
| gpl-3.0 |
igor-rangel7l/novoigorrangel.repository | plugin.video.SportsDevil/service/oscrypto/_openssl/symmetric.py | 7 | 22431 | # coding: utf-8
from __future__ import unicode_literals, division, absolute_import, print_function
import math
from .._errors import pretty_message
from .._ffi import new, null, is_null, buffer_from_bytes, bytes_from_buffer, deref
from ._libcrypto import libcrypto, LibcryptoConst, handle_openssl_error
from ..util import rand_bytes
from .._types import type_name, byte_cls
__all__ = [
'aes_cbc_no_padding_decrypt',
'aes_cbc_no_padding_encrypt',
'aes_cbc_pkcs7_decrypt',
'aes_cbc_pkcs7_encrypt',
'des_cbc_pkcs5_decrypt',
'des_cbc_pkcs5_encrypt',
'rc2_cbc_pkcs5_decrypt',
'rc2_cbc_pkcs5_encrypt',
'rc4_decrypt',
'rc4_encrypt',
'tripledes_cbc_pkcs5_decrypt',
'tripledes_cbc_pkcs5_encrypt',
]
def aes_cbc_no_padding_encrypt(key, data, iv):
"""
Encrypts plaintext using AES in CBC mode with a 128, 192 or 256 bit key and
no padding. This means the ciphertext must be an exact multiple of 16 bytes
long.
:param key:
The encryption key - a byte string either 16, 24 or 32 bytes long
:param data:
The plaintext - a byte string
:param iv:
The initialization vector - either a byte string 16-bytes long or None
to generate an IV
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by OpenSSL
:return:
A tuple of two byte strings (iv, ciphertext)
"""
cipher = _calculate_aes_cipher(key)
if not iv:
iv = rand_bytes(16)
elif len(iv) != 16:
raise ValueError(pretty_message(
'''
iv must be 16 bytes long - is %s
''',
len(iv)
))
if len(data) % 16 != 0:
raise ValueError(pretty_message(
'''
data must be a multiple of 16 bytes long - is %s
''',
len(data)
))
return (iv, _encrypt(cipher, key, data, iv, False))
def aes_cbc_no_padding_decrypt(key, data, iv):
"""
Decrypts AES ciphertext in CBC mode using a 128, 192 or 256 bit key and no
padding.
:param key:
The encryption key - a byte string either 16, 24 or 32 bytes long
:param data:
The ciphertext - a byte string
:param iv:
The initialization vector - a byte string 16-bytes long
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by OpenSSL
:return:
A byte string of the plaintext
"""
cipher = _calculate_aes_cipher(key)
if len(iv) != 16:
raise ValueError(pretty_message(
'''
iv must be 16 bytes long - is %s
''',
len(iv)
))
return _decrypt(cipher, key, data, iv, False)
def aes_cbc_pkcs7_encrypt(key, data, iv):
"""
Encrypts plaintext using AES in CBC mode with a 128, 192 or 256 bit key and
PKCS#7 padding.
:param key:
The encryption key - a byte string either 16, 24 or 32 bytes long
:param data:
The plaintext - a byte string
:param iv:
The initialization vector - either a byte string 16-bytes long or None
to generate an IV
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by OpenSSL
:return:
A tuple of two byte strings (iv, ciphertext)
"""
cipher = _calculate_aes_cipher(key)
if not iv:
iv = rand_bytes(16)
elif len(iv) != 16:
raise ValueError(pretty_message(
'''
iv must be 16 bytes long - is %s
''',
len(iv)
))
return (iv, _encrypt(cipher, key, data, iv, True))
def aes_cbc_pkcs7_decrypt(key, data, iv):
"""
Decrypts AES ciphertext in CBC mode using a 128, 192 or 256 bit key
:param key:
The encryption key - a byte string either 16, 24 or 32 bytes long
:param data:
The ciphertext - a byte string
:param iv:
The initialization vector - a byte string 16-bytes long
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by OpenSSL
:return:
A byte string of the plaintext
"""
cipher = _calculate_aes_cipher(key)
if len(iv) != 16:
raise ValueError(pretty_message(
'''
iv must be 16 bytes long - is %s
''',
len(iv)
))
return _decrypt(cipher, key, data, iv, True)
def _calculate_aes_cipher(key):
"""
Determines if the key is a valid AES 128, 192 or 256 key
:param key:
A byte string of the key to use
:raises:
ValueError - when an invalid key is provided
:return:
A unicode string of the AES variation - "aes128", "aes192" or "aes256"
"""
if len(key) not in [16, 24, 32]:
raise ValueError(pretty_message(
'''
key must be either 16, 24 or 32 bytes (128, 192 or 256 bits)
long - is %s
''',
len(key)
))
if len(key) == 16:
cipher = 'aes128'
elif len(key) == 24:
cipher = 'aes192'
elif len(key) == 32:
cipher = 'aes256'
return cipher
def rc4_encrypt(key, data):
"""
Encrypts plaintext using RC4 with a 40-128 bit key
:param key:
The encryption key - a byte string 5-16 bytes long
:param data:
The plaintext - a byte string
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by OpenSSL
:return:
A byte string of the ciphertext
"""
if len(key) < 5 or len(key) > 16:
raise ValueError(pretty_message(
'''
key must be 5 to 16 bytes (40 to 128 bits) long - is %s
''',
len(key)
))
return _encrypt('rc4', key, data, None, None)
def rc4_decrypt(key, data):
"""
Decrypts RC4 ciphertext using a 40-128 bit key
:param key:
The encryption key - a byte string 5-16 bytes long
:param data:
The ciphertext - a byte string
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by OpenSSL
:return:
A byte string of the plaintext
"""
if len(key) < 5 or len(key) > 16:
raise ValueError(pretty_message(
'''
key must be 5 to 16 bytes (40 to 128 bits) long - is %s
''',
len(key)
))
return _decrypt('rc4', key, data, None, None)
def rc2_cbc_pkcs5_encrypt(key, data, iv):
"""
Encrypts plaintext using RC2 in CBC mode with a 40-128 bit key and PKCS#5
padding.
:param key:
The encryption key - a byte string 8 bytes long
:param data:
The plaintext - a byte string
:param iv:
The initialization vector - a byte string 8-bytes long or None
to generate an IV
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by OpenSSL
:return:
A tuple of two byte strings (iv, ciphertext)
"""
if len(key) < 5 or len(key) > 16:
raise ValueError(pretty_message(
'''
key must be 5 to 16 bytes (40 to 128 bits) long - is %s
''',
len(key)
))
if not iv:
iv = rand_bytes(8)
elif len(iv) != 8:
raise ValueError(pretty_message(
'''
iv must be 8 bytes long - is %s
''',
len(iv)
))
return (iv, _encrypt('rc2', key, data, iv, True))
def rc2_cbc_pkcs5_decrypt(key, data, iv):
"""
Decrypts RC2 ciphertext ib CBC mode using a 40-128 bit key and PKCS#5
padding.
:param key:
The encryption key - a byte string 8 bytes long
:param data:
The ciphertext - a byte string
:param iv:
The initialization vector - a byte string 8 bytes long
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by OpenSSL
:return:
A byte string of the plaintext
"""
if len(key) < 5 or len(key) > 16:
raise ValueError(pretty_message(
'''
key must be 5 to 16 bytes (40 to 128 bits) long - is %s
''',
len(key)
))
if len(iv) != 8:
raise ValueError(pretty_message(
'''
iv must be 8 bytes long - is %s
''',
len(iv)
))
return _decrypt('rc2', key, data, iv, True)
def tripledes_cbc_pkcs5_encrypt(key, data, iv):
"""
Encrypts plaintext using 3DES in CBC mode using either the 2 or 3 key
variant (16 or 24 byte long key) and PKCS#5 padding.
:param key:
The encryption key - a byte string 16 or 24 bytes long (2 or 3 key mode)
:param data:
The plaintext - a byte string
:param iv:
The initialization vector - a byte string 8-bytes long or None
to generate an IV
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by OpenSSL
:return:
A tuple of two byte strings (iv, ciphertext)
"""
if len(key) != 16 and len(key) != 24:
raise ValueError(pretty_message(
'''
key must be 16 bytes (2 key) or 24 bytes (3 key) long - %s
''',
len(key)
))
if not iv:
iv = rand_bytes(8)
elif len(iv) != 8:
raise ValueError(pretty_message(
'''
iv must be 8 bytes long - %s
''',
len(iv)
))
cipher = 'tripledes_3key'
# Expand 2-key to actual 24 byte byte string used by cipher
if len(key) == 16:
key = key + key[0:8]
cipher = 'tripledes_2key'
return (iv, _encrypt(cipher, key, data, iv, True))
def tripledes_cbc_pkcs5_decrypt(key, data, iv):
"""
Decrypts 3DES ciphertext in CBC mode using either the 2 or 3 key variant
(16 or 24 byte long key) and PKCS#5 padding.
:param key:
The encryption key - a byte string 16 or 24 bytes long (2 or 3 key mode)
:param data:
The ciphertext - a byte string
:param iv:
The initialization vector - a byte string 8-bytes long
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by OpenSSL
:return:
A byte string of the plaintext
"""
if len(key) != 16 and len(key) != 24:
raise ValueError(pretty_message(
'''
key must be 16 bytes (2 key) or 24 bytes (3 key) long - is %s
''',
len(key)
))
if len(iv) != 8:
raise ValueError(pretty_message(
'''
iv must be 8 bytes long - is %s
''',
len(iv)
))
cipher = 'tripledes_3key'
# Expand 2-key to actual 24 byte byte string used by cipher
if len(key) == 16:
key = key + key[0:8]
cipher = 'tripledes_2key'
return _decrypt(cipher, key, data, iv, True)
def des_cbc_pkcs5_encrypt(key, data, iv):
"""
Encrypts plaintext using DES in CBC mode with a 56 bit key and PKCS#5
padding.
:param key:
The encryption key - a byte string 8 bytes long (includes error correction bits)
:param data:
The plaintext - a byte string
:param iv:
The initialization vector - a byte string 8-bytes long or None
to generate an IV
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by OpenSSL
:return:
A tuple of two byte strings (iv, ciphertext)
"""
if len(key) != 8:
raise ValueError(pretty_message(
'''
key must be 8 bytes (56 bits + 8 parity bits) long - is %s
''',
len(key)
))
if not iv:
iv = rand_bytes(8)
elif len(iv) != 8:
raise ValueError(pretty_message(
'''
iv must be 8 bytes long - is %s
''',
len(iv)
))
return (iv, _encrypt('des', key, data, iv, True))
def des_cbc_pkcs5_decrypt(key, data, iv):
"""
Decrypts DES ciphertext in CBC mode using a 56 bit key and PKCS#5 padding.
:param key:
The encryption key - a byte string 8 bytes long (includes error correction bits)
:param data:
The ciphertext - a byte string
:param iv:
The initialization vector - a byte string 8-bytes long
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by OpenSSL
:return:
A byte string of the plaintext
"""
if len(key) != 8:
raise ValueError(pretty_message(
'''
key must be 8 bytes (56 bits + 8 parity bits) long - is %s
''',
len(key)
))
if len(iv) != 8:
raise ValueError(pretty_message(
'''
iv must be 8 bytes long - is %s
''',
len(iv)
))
return _decrypt('des', key, data, iv, True)
def _encrypt(cipher, key, data, iv, padding):
"""
Encrypts plaintext
:param cipher:
A unicode string of "aes128", "aes192", "aes256", "des",
"tripledes_2key", "tripledes_3key", "rc2", "rc4"
:param key:
The encryption key - a byte string 5-32 bytes long
:param data:
The plaintext - a byte string
:param iv:
The initialization vector - a byte string - unused for RC4
:param padding:
Boolean, if padding should be used - unused for RC4
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by OpenSSL
:return:
A byte string of the ciphertext
"""
if not isinstance(key, byte_cls):
raise TypeError(pretty_message(
'''
key must be a byte string, not %s
''',
type_name(key)
))
if not isinstance(data, byte_cls):
raise TypeError(pretty_message(
'''
data must be a byte string, not %s
''',
type_name(data)
))
if cipher != 'rc4' and not isinstance(iv, byte_cls):
raise TypeError(pretty_message(
'''
iv must be a byte string, not %s
''',
type_name(iv)
))
if cipher != 'rc4' and not padding:
raise ValueError('padding must be specified')
evp_cipher_ctx = None
try:
evp_cipher_ctx = libcrypto.EVP_CIPHER_CTX_new()
if is_null(evp_cipher_ctx):
handle_openssl_error(0)
evp_cipher, buffer_size = _setup_evp_encrypt_decrypt(cipher, data)
if iv is None:
iv = null()
if cipher in set(['rc2', 'rc4']):
res = libcrypto.EVP_EncryptInit_ex(evp_cipher_ctx, evp_cipher, null(), null(), null())
handle_openssl_error(res)
res = libcrypto.EVP_CIPHER_CTX_set_key_length(evp_cipher_ctx, len(key))
handle_openssl_error(res)
if cipher == 'rc2':
res = libcrypto.EVP_CIPHER_CTX_ctrl(
evp_cipher_ctx,
LibcryptoConst.EVP_CTRL_SET_RC2_KEY_BITS,
len(key) * 8,
null()
)
handle_openssl_error(res)
evp_cipher = null()
res = libcrypto.EVP_EncryptInit_ex(evp_cipher_ctx, evp_cipher, null(), key, iv)
handle_openssl_error(res)
if padding is not None:
res = libcrypto.EVP_CIPHER_CTX_set_padding(evp_cipher_ctx, int(padding))
handle_openssl_error(res)
buffer = buffer_from_bytes(buffer_size)
output_length = new(libcrypto, 'int *')
res = libcrypto.EVP_EncryptUpdate(evp_cipher_ctx, buffer, output_length, data, len(data))
handle_openssl_error(res)
output = bytes_from_buffer(buffer, deref(output_length))
res = libcrypto.EVP_EncryptFinal_ex(evp_cipher_ctx, buffer, output_length)
handle_openssl_error(res)
output += bytes_from_buffer(buffer, deref(output_length))
return output
finally:
if evp_cipher_ctx:
libcrypto.EVP_CIPHER_CTX_free(evp_cipher_ctx)
def _decrypt(cipher, key, data, iv, padding):
"""
Decrypts AES/RC4/RC2/3DES/DES ciphertext
:param cipher:
A unicode string of "aes128", "aes192", "aes256", "des",
"tripledes_2key", "tripledes_3key", "rc2", "rc4"
:param key:
The encryption key - a byte string 5-32 bytes long
:param data:
The ciphertext - a byte string
:param iv:
The initialization vector - a byte string - unused for RC4
:param padding:
Boolean, if padding should be used - unused for RC4
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by OpenSSL
:return:
A byte string of the plaintext
"""
if not isinstance(key, byte_cls):
raise TypeError(pretty_message(
'''
key must be a byte string, not %s
''',
type_name(key)
))
if not isinstance(data, byte_cls):
raise TypeError(pretty_message(
'''
data must be a byte string, not %s
''',
type_name(data)
))
if cipher != 'rc4' and not isinstance(iv, byte_cls):
raise TypeError(pretty_message(
'''
iv must be a byte string, not %s
''',
type_name(iv)
))
if cipher != 'rc4' and padding is None:
raise ValueError('padding must be specified')
evp_cipher_ctx = None
try:
evp_cipher_ctx = libcrypto.EVP_CIPHER_CTX_new()
if is_null(evp_cipher_ctx):
handle_openssl_error(0)
evp_cipher, buffer_size = _setup_evp_encrypt_decrypt(cipher, data)
if iv is None:
iv = null()
if cipher in set(['rc2', 'rc4']):
res = libcrypto.EVP_DecryptInit_ex(evp_cipher_ctx, evp_cipher, null(), null(), null())
handle_openssl_error(res)
res = libcrypto.EVP_CIPHER_CTX_set_key_length(evp_cipher_ctx, len(key))
handle_openssl_error(res)
if cipher == 'rc2':
res = libcrypto.EVP_CIPHER_CTX_ctrl(
evp_cipher_ctx,
LibcryptoConst.EVP_CTRL_SET_RC2_KEY_BITS,
len(key) * 8,
null()
)
handle_openssl_error(res)
evp_cipher = null()
res = libcrypto.EVP_DecryptInit_ex(evp_cipher_ctx, evp_cipher, null(), key, iv)
handle_openssl_error(res)
if padding is not None:
res = libcrypto.EVP_CIPHER_CTX_set_padding(evp_cipher_ctx, int(padding))
handle_openssl_error(res)
buffer = buffer_from_bytes(buffer_size)
output_length = new(libcrypto, 'int *')
res = libcrypto.EVP_DecryptUpdate(evp_cipher_ctx, buffer, output_length, data, len(data))
handle_openssl_error(res)
output = bytes_from_buffer(buffer, deref(output_length))
res = libcrypto.EVP_DecryptFinal_ex(evp_cipher_ctx, buffer, output_length)
handle_openssl_error(res)
output += bytes_from_buffer(buffer, deref(output_length))
return output
finally:
if evp_cipher_ctx:
libcrypto.EVP_CIPHER_CTX_free(evp_cipher_ctx)
def _setup_evp_encrypt_decrypt(cipher, data):
"""
Creates an EVP_CIPHER pointer object and determines the buffer size
necessary for the parameter specified.
:param evp_cipher_ctx:
An EVP_CIPHER_CTX pointer
:param cipher:
A unicode string of "aes128", "aes192", "aes256", "des",
"tripledes_2key", "tripledes_3key", "rc2", "rc4"
:param key:
The key byte string
:param data:
The plaintext or ciphertext as a byte string
:param padding:
If padding is to be used
:return:
A 2-element tuple with the first element being an EVP_CIPHER pointer
and the second being an integer that is the required buffer size
"""
evp_cipher = {
'aes128': libcrypto.EVP_aes_128_cbc,
'aes192': libcrypto.EVP_aes_192_cbc,
'aes256': libcrypto.EVP_aes_256_cbc,
'rc2': libcrypto.EVP_rc2_cbc,
'rc4': libcrypto.EVP_rc4,
'des': libcrypto.EVP_des_cbc,
'tripledes_2key': libcrypto.EVP_des_ede_cbc,
'tripledes_3key': libcrypto.EVP_des_ede3_cbc,
}[cipher]()
if cipher == 'rc4':
buffer_size = len(data)
else:
block_size = {
'aes128': 16,
'aes192': 16,
'aes256': 16,
'rc2': 8,
'des': 8,
'tripledes_2key': 8,
'tripledes_3key': 8,
}[cipher]
buffer_size = block_size * int(math.ceil(len(data) / block_size))
return (evp_cipher, buffer_size)
| gpl-2.0 |
rldhont/Quantum-GIS | python/PyQt/PyQt5/Qt.py | 45 | 1036 | # -*- coding: utf-8 -*-
"""
***************************************************************************
Qt.py
---------------------
Date : September 2016
Copyright : (C) 2015 by Marco Bernasocchi
Email : marco at opengis dot ch
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Marco Bernasocchi'
__date__ = 'September 2016'
__copyright__ = '(C) 2016, Marco Bernasocchi'
from PyQt5.Qt import *
| gpl-2.0 |
moijes12/oh-mainline | vendor/packages/Django/tests/modeltests/order_with_respect_to/tests.py | 150 | 2855 | from __future__ import absolute_import
from operator import attrgetter
from django.test import TestCase
from .models import Post, Question, Answer
class OrderWithRespectToTests(TestCase):
def test_basic(self):
q1 = Question.objects.create(text="Which Beatle starts with the letter 'R'?")
q2 = Question.objects.create(text="What is your name?")
Answer.objects.create(text="John", question=q1)
Answer.objects.create(text="Jonno", question=q2)
Answer.objects.create(text="Paul", question=q1)
Answer.objects.create(text="Paulo", question=q2)
Answer.objects.create(text="George", question=q1)
Answer.objects.create(text="Ringo", question=q1)
# The answers will always be ordered in the order they were inserted.
self.assertQuerysetEqual(
q1.answer_set.all(), [
"John", "Paul", "George", "Ringo",
],
attrgetter("text"),
)
# We can retrieve the answers related to a particular object, in the
# order they were created, once we have a particular object.
a1 = Answer.objects.filter(question=q1)[0]
self.assertEqual(a1.text, "John")
a2 = a1.get_next_in_order()
self.assertEqual(a2.text, "Paul")
a4 = list(Answer.objects.filter(question=q1))[-1]
self.assertEqual(a4.text, "Ringo")
self.assertEqual(a4.get_previous_in_order().text, "George")
# Determining (and setting) the ordering for a particular item is also
# possible.
id_list = [o.pk for o in q1.answer_set.all()]
self.assertEqual(a2.question.get_answer_order(), id_list)
a5 = Answer.objects.create(text="Number five", question=q1)
# It doesn't matter which answer we use to check the order, it will
# always be the same.
self.assertEqual(
a2.question.get_answer_order(), a5.question.get_answer_order()
)
# The ordering can be altered:
id_list = [o.pk for o in q1.answer_set.all()]
x = id_list.pop()
id_list.insert(-1, x)
self.assertNotEqual(a5.question.get_answer_order(), id_list)
a5.question.set_answer_order(id_list)
self.assertQuerysetEqual(
q1.answer_set.all(), [
"John", "Paul", "George", "Number five", "Ringo"
],
attrgetter("text")
)
def test_recursive_ordering(self):
p1 = Post.objects.create(title='1')
p2 = Post.objects.create(title='2')
p1_1 = Post.objects.create(title="1.1", parent=p1)
p1_2 = Post.objects.create(title="1.2", parent=p1)
p2_1 = Post.objects.create(title="2.1", parent=p2)
p1_3 = Post.objects.create(title="1.3", parent=p1)
self.assertEqual(p1.get_post_order(), [p1_1.pk, p1_2.pk, p1_3.pk])
| agpl-3.0 |
hypnotika/namebench | setup.py | 173 | 5268 | #!/usr/bin/env python
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""distutils configuration: python setup.py install"""
__author__ = 'tstromberg@google.com (Thomas Stromberg)'
import os
from libnamebench import version
from distutils.core import setup
try:
import py2exe
except ImportError:
pass
# If you don't want 3rd party libraries included, set this in your environment.
if os.getenv('NO_THIRD_PARTY', None):
packages=['libnamebench']
else:
packages = [
'libnamebench',
'nb_third_party',
'nb_third_party/dns',
'nb_third_party/dns/rdtypes',
'nb_third_party/dns/rdtypes/ANY',
'nb_third_party/dns/rdtypes/IN',
'nb_third_party/graphy',
'nb_third_party/jinja2',
'nb_third_party/httplib2',
'nb_third_party/simplejson',
'nb_third_party/graphy/backends',
'nb_third_party/graphy/backends/google_chart_api'
]
RT_MANIFEST = 24
manifest_template = '''
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
<assemblyIdentity
version="5.0.0.0"
processorArchitecture="x86"
name="%(prog)s"
type="win32"
/>
<description>%(prog)s Program</description>
<dependency>
<dependentAssembly>
<assemblyIdentity
type="win32"
name="Microsoft.Windows.Common-Controls"
version="6.0.0.0"
processorArchitecture="X86"
publicKeyToken="6595b64144ccf1df"
language="*"
/>
</dependentAssembly>
</dependency>
</assembly>
'''
rt90_manifest = """<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
<trustInfo xmlns="urn:schemas-microsoft-com:asm.v3">
<security>
<requestedPrivileges>
<requestedExecutionLevel level="asInvoker" uiAccess="false"></requestedExecutionLevel>
</requestedPrivileges>
</security>
</trustInfo>
<dependency>
<dependentAssembly>
<assemblyIdentity type="win32" name="Microsoft.VC90.CRT" version="9.0.21022.8" processorArchitecture="x86" publicKeyToken="1fc8b3b9a1e18e3b"></assemblyIdentity>
</dependentAssembly>
</dependency>
</assembly>
"""
setup(name='namebench',
version=version.VERSION,
py_modules=['namebench'],
description='DNS service benchmarking tool',
author='Thomas Stromberg',
author_email='tstromberg@google.com',
url='http://namebench.googlecode.com/',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache 2.0',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Programming Language :: Python',
'Topic :: Networking',
],
packages=packages,
platforms=['Any'],
license='Apache 2.0',
scripts=['namebench.py'],
data_files=[
('namebench/config',
['config/namebench.cfg',
'config/hostname_reference.cfg',
'config/data_sources.cfg']
),
('namebench/templates',
['templates/ascii.tmpl',
'templates/html.tmpl',
'templates/resolv.conf.tmpl',
'templates/style.css'
]
),
('namebench/data',
['data/alexa-top-2000-domains.txt',
'data/cache-hit.txt',
'data/cache-miss.txt',
'data/cache-mix.txt'
]
)
],
# py2exe specific garbarge below.
options={
'py2exe': {
'bundle_files': 3, # 1 nor 2 does not work
'ascii': False,
'packages': ['nb_third_party'],
'excludes': ['dns', 'jinja2', 'graphy', 'httplib2', 'tcl', 'simplejson'],
'dll_excludes': ["w9xpopen.exe","MSVCP90.dll", "MSVCR90.DLL"],
}
},
zipfile = "namebench.zip", # None - when bundle_files 1 or 2 can work.
windows=[{
'script': "namebench.py",
'dest_base': "namebench",
'name': "namebench",
'copyright': "(c) 2009 Google, Inc.",
'comments': "http://namebench.googlecode.com/",
'other_resources': [
# Windows Common Controls, XP Look
(RT_MANIFEST, 1, manifest_template % dict(prog="namebench")),
# VCRT 2008
(RT_MANIFEST, 1, rt90_manifest), # 1 - EXE CRT Manifest, 2 - DLL
],
}],
# console=['namebench.py']
)
| apache-2.0 |
sumanthha/fundafriend | django/contrib/admin/sites.py | 88 | 18297 | from functools import update_wrapper
from django.http import Http404, HttpResponseRedirect
from django.contrib.admin import ModelAdmin, actions
from django.contrib.admin.forms import AdminAuthenticationForm
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.contrib.contenttypes import views as contenttype_views
from django.views.decorators.csrf import csrf_protect
from django.db.models.base import ModelBase
from django.core.exceptions import ImproperlyConfigured
from django.core.urlresolvers import reverse, NoReverseMatch
from django.template.response import TemplateResponse
from django.utils.safestring import mark_safe
from django.utils.text import capfirst
from django.utils.translation import ugettext as _
from django.views.decorators.cache import never_cache
from django.conf import settings
LOGIN_FORM_KEY = 'this_is_the_login_form'
class AlreadyRegistered(Exception):
pass
class NotRegistered(Exception):
pass
class AdminSite(object):
"""
An AdminSite object encapsulates an instance of the Django admin application, ready
to be hooked in to your URLconf. Models are registered with the AdminSite using the
register() method, and the get_urls() method can then be used to access Django view
functions that present a full admin interface for the collection of registered
models.
"""
login_form = None
index_template = None
app_index_template = None
login_template = None
logout_template = None
password_change_template = None
password_change_done_template = None
def __init__(self, name='admin', app_name='admin'):
self._registry = {} # model_class class -> admin_class instance
self.name = name
self.app_name = app_name
self._actions = {'delete_selected': actions.delete_selected}
self._global_actions = self._actions.copy()
def register(self, model_or_iterable, admin_class=None, **options):
"""
Registers the given model(s) with the given admin class.
The model(s) should be Model classes, not instances.
If an admin class isn't given, it will use ModelAdmin (the default
admin options). If keyword arguments are given -- e.g., list_display --
they'll be applied as options to the admin class.
If a model is already registered, this will raise AlreadyRegistered.
If a model is abstract, this will raise ImproperlyConfigured.
"""
if not admin_class:
admin_class = ModelAdmin
# Don't import the humongous validation code unless required
if admin_class and settings.DEBUG:
from django.contrib.admin.validation import validate
else:
validate = lambda model, adminclass: None
if isinstance(model_or_iterable, ModelBase):
model_or_iterable = [model_or_iterable]
for model in model_or_iterable:
if model._meta.abstract:
raise ImproperlyConfigured('The model %s is abstract, so it '
'cannot be registered with admin.' % model.__name__)
if model in self._registry:
raise AlreadyRegistered('The model %s is already registered' % model.__name__)
# If we got **options then dynamically construct a subclass of
# admin_class with those **options.
if options:
# For reasons I don't quite understand, without a __module__
# the created class appears to "live" in the wrong place,
# which causes issues later on.
options['__module__'] = __name__
admin_class = type("%sAdmin" % model.__name__, (admin_class,), options)
# Validate (which might be a no-op)
validate(admin_class, model)
# Instantiate the admin class to save in the registry
self._registry[model] = admin_class(model, self)
def unregister(self, model_or_iterable):
"""
Unregisters the given model(s).
If a model isn't already registered, this will raise NotRegistered.
"""
if isinstance(model_or_iterable, ModelBase):
model_or_iterable = [model_or_iterable]
for model in model_or_iterable:
if model not in self._registry:
raise NotRegistered('The model %s is not registered' % model.__name__)
del self._registry[model]
def add_action(self, action, name=None):
"""
Register an action to be available globally.
"""
name = name or action.__name__
self._actions[name] = action
self._global_actions[name] = action
def disable_action(self, name):
"""
Disable a globally-registered action. Raises KeyError for invalid names.
"""
del self._actions[name]
def get_action(self, name):
"""
Explicitally get a registered global action wheather it's enabled or
not. Raises KeyError for invalid names.
"""
return self._global_actions[name]
@property
def actions(self):
"""
Get all the enabled actions as an iterable of (name, func).
"""
return self._actions.iteritems()
def has_permission(self, request):
"""
Returns True if the given HttpRequest has permission to view
*at least one* page in the admin site.
"""
return request.user.is_active and request.user.is_staff
def check_dependencies(self):
"""
Check that all things needed to run the admin have been correctly installed.
The default implementation checks that LogEntry, ContentType and the
auth context processor are installed.
"""
from django.contrib.admin.models import LogEntry
from django.contrib.contenttypes.models import ContentType
if not LogEntry._meta.installed:
raise ImproperlyConfigured("Put 'django.contrib.admin' in your "
"INSTALLED_APPS setting in order to use the admin application.")
if not ContentType._meta.installed:
raise ImproperlyConfigured("Put 'django.contrib.contenttypes' in "
"your INSTALLED_APPS setting in order to use the admin application.")
if not ('django.contrib.auth.context_processors.auth' in settings.TEMPLATE_CONTEXT_PROCESSORS or
'django.core.context_processors.auth' in settings.TEMPLATE_CONTEXT_PROCESSORS):
raise ImproperlyConfigured("Put 'django.contrib.auth.context_processors.auth' "
"in your TEMPLATE_CONTEXT_PROCESSORS setting in order to use the admin application.")
def admin_view(self, view, cacheable=False):
"""
Decorator to create an admin view attached to this ``AdminSite``. This
wraps the view and provides permission checking by calling
``self.has_permission``.
You'll want to use this from within ``AdminSite.get_urls()``:
class MyAdminSite(AdminSite):
def get_urls(self):
from django.conf.urls import patterns, url
urls = super(MyAdminSite, self).get_urls()
urls += patterns('',
url(r'^my_view/$', self.admin_view(some_view))
)
return urls
By default, admin_views are marked non-cacheable using the
``never_cache`` decorator. If the view can be safely cached, set
cacheable=True.
"""
def inner(request, *args, **kwargs):
if not self.has_permission(request):
if request.path == reverse('admin:logout',
current_app=self.name):
index_path = reverse('admin:index', current_app=self.name)
return HttpResponseRedirect(index_path)
return self.login(request)
return view(request, *args, **kwargs)
if not cacheable:
inner = never_cache(inner)
# We add csrf_protect here so this function can be used as a utility
# function for any view, without having to repeat 'csrf_protect'.
if not getattr(view, 'csrf_exempt', False):
inner = csrf_protect(inner)
return update_wrapper(inner, view)
def get_urls(self):
from django.conf.urls import patterns, url, include
if settings.DEBUG:
self.check_dependencies()
def wrap(view, cacheable=False):
def wrapper(*args, **kwargs):
return self.admin_view(view, cacheable)(*args, **kwargs)
return update_wrapper(wrapper, view)
# Admin-site-wide views.
urlpatterns = patterns('',
url(r'^$',
wrap(self.index),
name='index'),
url(r'^logout/$',
wrap(self.logout),
name='logout'),
url(r'^password_change/$',
wrap(self.password_change, cacheable=True),
name='password_change'),
url(r'^password_change/done/$',
wrap(self.password_change_done, cacheable=True),
name='password_change_done'),
url(r'^jsi18n/$',
wrap(self.i18n_javascript, cacheable=True),
name='jsi18n'),
url(r'^r/(?P<content_type_id>\d+)/(?P<object_id>.+)/$',
wrap(contenttype_views.shortcut)),
url(r'^(?P<app_label>\w+)/$',
wrap(self.app_index),
name='app_list')
)
# Add in each model's views.
for model, model_admin in self._registry.iteritems():
urlpatterns += patterns('',
url(r'^%s/%s/' % (model._meta.app_label, model._meta.module_name),
include(model_admin.urls))
)
return urlpatterns
@property
def urls(self):
return self.get_urls(), self.app_name, self.name
def password_change(self, request):
"""
Handles the "change password" task -- both form display and validation.
"""
from django.contrib.auth.views import password_change
url = reverse('admin:password_change_done', current_app=self.name)
defaults = {
'current_app': self.name,
'post_change_redirect': url
}
if self.password_change_template is not None:
defaults['template_name'] = self.password_change_template
return password_change(request, **defaults)
def password_change_done(self, request, extra_context=None):
"""
Displays the "success" page after a password change.
"""
from django.contrib.auth.views import password_change_done
defaults = {
'current_app': self.name,
'extra_context': extra_context or {},
}
if self.password_change_done_template is not None:
defaults['template_name'] = self.password_change_done_template
return password_change_done(request, **defaults)
def i18n_javascript(self, request):
"""
Displays the i18n JavaScript that the Django admin requires.
This takes into account the USE_I18N setting. If it's set to False, the
generated JavaScript will be leaner and faster.
"""
if settings.USE_I18N:
from django.views.i18n import javascript_catalog
else:
from django.views.i18n import null_javascript_catalog as javascript_catalog
return javascript_catalog(request, packages=['django.conf', 'django.contrib.admin'])
@never_cache
def logout(self, request, extra_context=None):
"""
Logs out the user for the given HttpRequest.
This should *not* assume the user is already logged in.
"""
from django.contrib.auth.views import logout
defaults = {
'current_app': self.name,
'extra_context': extra_context or {},
}
if self.logout_template is not None:
defaults['template_name'] = self.logout_template
return logout(request, **defaults)
@never_cache
def login(self, request, extra_context=None):
"""
Displays the login form for the given HttpRequest.
"""
from django.contrib.auth.views import login
context = {
'title': _('Log in'),
'app_path': request.get_full_path(),
REDIRECT_FIELD_NAME: request.get_full_path(),
}
context.update(extra_context or {})
defaults = {
'extra_context': context,
'current_app': self.name,
'authentication_form': self.login_form or AdminAuthenticationForm,
'template_name': self.login_template or 'admin/login.html',
}
return login(request, **defaults)
@never_cache
def index(self, request, extra_context=None):
"""
Displays the main admin index page, which lists all of the installed
apps that have been registered in this site.
"""
app_dict = {}
user = request.user
for model, model_admin in self._registry.items():
app_label = model._meta.app_label
has_module_perms = user.has_module_perms(app_label)
if has_module_perms:
perms = model_admin.get_model_perms(request)
# Check whether user has any perm for this module.
# If so, add the module to the model_list.
if True in perms.values():
info = (app_label, model._meta.module_name)
model_dict = {
'name': capfirst(model._meta.verbose_name_plural),
'perms': perms,
}
if perms.get('change', False):
try:
model_dict['admin_url'] = reverse('admin:%s_%s_changelist' % info, current_app=self.name)
except NoReverseMatch:
pass
if perms.get('add', False):
try:
model_dict['add_url'] = reverse('admin:%s_%s_add' % info, current_app=self.name)
except NoReverseMatch:
pass
if app_label in app_dict:
app_dict[app_label]['models'].append(model_dict)
else:
app_dict[app_label] = {
'name': app_label.title(),
'app_url': reverse('admin:app_list', kwargs={'app_label': app_label}, current_app=self.name),
'has_module_perms': has_module_perms,
'models': [model_dict],
}
# Sort the apps alphabetically.
app_list = app_dict.values()
app_list.sort(key=lambda x: x['name'])
# Sort the models alphabetically within each app.
for app in app_list:
app['models'].sort(key=lambda x: x['name'])
context = {
'title': _('Site administration'),
'app_list': app_list,
}
context.update(extra_context or {})
return TemplateResponse(request, [
self.index_template or 'admin/index.html',
], context, current_app=self.name)
def app_index(self, request, app_label, extra_context=None):
user = request.user
has_module_perms = user.has_module_perms(app_label)
app_dict = {}
for model, model_admin in self._registry.items():
if app_label == model._meta.app_label:
if has_module_perms:
perms = model_admin.get_model_perms(request)
# Check whether user has any perm for this module.
# If so, add the module to the model_list.
if True in perms.values():
info = (app_label, model._meta.module_name)
model_dict = {
'name': capfirst(model._meta.verbose_name_plural),
'perms': perms,
}
if perms.get('change', False):
try:
model_dict['admin_url'] = reverse('admin:%s_%s_changelist' % info, current_app=self.name)
except NoReverseMatch:
pass
if perms.get('add', False):
try:
model_dict['add_url'] = reverse('admin:%s_%s_add' % info, current_app=self.name)
except NoReverseMatch:
pass
if app_dict:
app_dict['models'].append(model_dict),
else:
# First time around, now that we know there's
# something to display, add in the necessary meta
# information.
app_dict = {
'name': app_label.title(),
'app_url': '',
'has_module_perms': has_module_perms,
'models': [model_dict],
}
if not app_dict:
raise Http404('The requested admin page does not exist.')
# Sort the models alphabetically within each app.
app_dict['models'].sort(key=lambda x: x['name'])
context = {
'title': _('%s administration') % capfirst(app_label),
'app_list': [app_dict],
}
context.update(extra_context or {})
return TemplateResponse(request, self.app_index_template or [
'admin/%s/app_index.html' % app_label,
'admin/app_index.html'
], context, current_app=self.name)
# This global object represents the default admin site, for the common case.
# You can instantiate AdminSite in your own code to create a custom admin site.
site = AdminSite()
| bsd-3-clause |
jbassen/edx-platform | common/lib/xmodule/xmodule/exceptions.py | 2 | 1385 | class InvalidDefinitionError(Exception):
pass
class NotFoundError(Exception):
pass
class ProcessingError(Exception):
'''
An error occurred while processing a request to the XModule.
For example: if an exception occurs while checking a capa problem.
'''
pass
class InvalidVersionError(Exception):
"""
Tried to save an item with a location that a store cannot support (e.g., draft version
for a non-leaf node)
"""
def __init__(self, location):
super(InvalidVersionError, self).__init__()
self.location = location
class SerializationError(Exception):
"""
Thrown when a module cannot be exported to XML
"""
def __init__(self, location, msg):
super(SerializationError, self).__init__(msg)
self.location = location
class UndefinedContext(Exception):
"""
Tried to access an xmodule field which needs a different context (runtime) to have a value.
"""
pass
class HeartbeatFailure(Exception):
"""
Raised when heartbeat fails.
"""
def __unicode__(self, *args, **kwargs):
return self.message
def __init__(self, msg, service):
"""
In addition to a msg, provide the name of the service.
"""
self.service = service
super(HeartbeatFailure, self).__init__(msg)
class TimeExpiredError(Exception):
pass
| agpl-3.0 |
ianastewart/cwltc-admin | venv/Lib/site-packages/waitress/channel.py | 1 | 16123 | ##############################################################################
#
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
import socket
import threading
import time
import traceback
from waitress.buffers import (
OverflowableBuffer,
ReadOnlyFileBasedBuffer,
)
from waitress.parser import HTTPRequestParser
from waitress.task import (
ErrorTask,
WSGITask,
)
from waitress.utilities import InternalServerError
from . import wasyncore
class ClientDisconnected(Exception):
""" Raised when attempting to write to a closed socket."""
class HTTPChannel(wasyncore.dispatcher, object):
"""
Setting self.requests = [somerequest] prevents more requests from being
received until the out buffers have been flushed.
Setting self.requests = [] allows more requests to be received.
"""
task_class = WSGITask
error_task_class = ErrorTask
parser_class = HTTPRequestParser
request = None # A request parser instance
last_activity = 0 # Time of last activity
will_close = False # set to True to close the socket.
close_when_flushed = False # set to True to close the socket when flushed
requests = () # currently pending requests
sent_continue = False # used as a latch after sending 100 continue
total_outbufs_len = 0 # total bytes ready to send
current_outbuf_count = 0 # total bytes written to current outbuf
#
# ASYNCHRONOUS METHODS (including __init__)
#
def __init__(
self,
server,
sock,
addr,
adj,
map=None,
):
self.server = server
self.adj = adj
self.outbufs = [OverflowableBuffer(adj.outbuf_overflow)]
self.creation_time = self.last_activity = time.time()
self.sendbuf_len = sock.getsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF)
# task_lock used to push/pop requests
self.task_lock = threading.Lock()
# outbuf_lock used to access any outbuf (expected to use an RLock)
self.outbuf_lock = threading.Condition()
wasyncore.dispatcher.__init__(self, sock, map=map)
# Don't let wasyncore.dispatcher throttle self.addr on us.
self.addr = addr
def writable(self):
# if there's data in the out buffer or we've been instructed to close
# the channel (possibly by our server maintenance logic), run
# handle_write
return (
self.total_outbufs_len
or self.will_close
or self.close_when_flushed
)
def handle_write(self):
# Precondition: there's data in the out buffer to be sent, or
# there's a pending will_close request
if not self.connected:
# we dont want to close the channel twice
return
# try to flush any pending output
if not self.requests:
# 1. There are no running tasks, so we don't need to try to lock
# the outbuf before sending
# 2. The data in the out buffer should be sent as soon as possible
# because it's either data left over from task output
# or a 100 Continue line sent within "received".
flush = self._flush_some
elif self.total_outbufs_len >= self.adj.send_bytes:
# 1. There's a running task, so we need to try to lock
# the outbuf before sending
# 2. Only try to send if the data in the out buffer is larger
# than self.adj_bytes to avoid TCP fragmentation
flush = self._flush_some_if_lockable
else:
# 1. There's not enough data in the out buffer to bother to send
# right now.
flush = None
if flush:
try:
flush()
except socket.error:
if self.adj.log_socket_errors:
self.logger.exception('Socket error')
self.will_close = True
except Exception:
self.logger.exception('Unexpected exception when flushing')
self.will_close = True
if self.close_when_flushed and not self.total_outbufs_len:
self.close_when_flushed = False
self.will_close = True
if self.will_close:
self.handle_close()
def readable(self):
# We might want to create a new task. We can only do this if:
# 1. We're not already about to close the connection.
# 2. There's no already currently running task(s).
# 3. There's no data in the output buffer that needs to be sent
# before we potentially create a new task.
return not (self.will_close or self.requests or self.total_outbufs_len)
def handle_read(self):
try:
data = self.recv(self.adj.recv_bytes)
except socket.error:
if self.adj.log_socket_errors:
self.logger.exception('Socket error')
self.handle_close()
return
if data:
self.last_activity = time.time()
self.received(data)
def received(self, data):
"""
Receives input asynchronously and assigns one or more requests to the
channel.
"""
# Preconditions: there's no task(s) already running
request = self.request
requests = []
if not data:
return False
while data:
if request is None:
request = self.parser_class(self.adj)
n = request.received(data)
if request.expect_continue and request.headers_finished:
# guaranteed by parser to be a 1.1 request
request.expect_continue = False
if not self.sent_continue:
# there's no current task, so we don't need to try to
# lock the outbuf to append to it.
outbuf_payload = b'HTTP/1.1 100 Continue\r\n\r\n'
self.outbufs[-1].append(outbuf_payload)
self.current_outbuf_count += len(outbuf_payload)
self.total_outbufs_len += len(outbuf_payload)
self.sent_continue = True
self._flush_some()
request.completed = False
if request.completed:
# The request (with the body) is ready to use.
self.request = None
if not request.empty:
requests.append(request)
request = None
else:
self.request = request
if n >= len(data):
break
data = data[n:]
if requests:
self.requests = requests
self.server.add_task(self)
return True
def _flush_some_if_lockable(self):
# Since our task may be appending to the outbuf, we try to acquire
# the lock, but we don't block if we can't.
if self.outbuf_lock.acquire(False):
try:
self._flush_some()
if self.total_outbufs_len < self.adj.outbuf_high_watermark:
self.outbuf_lock.notify()
finally:
self.outbuf_lock.release()
def _flush_some(self):
# Send as much data as possible to our client
sent = 0
dobreak = False
while True:
outbuf = self.outbufs[0]
# use outbuf.__len__ rather than len(outbuf) FBO of not getting
# OverflowError on 32-bit Python
outbuflen = outbuf.__len__()
while outbuflen > 0:
chunk = outbuf.get(self.sendbuf_len)
num_sent = self.send(chunk)
if num_sent:
outbuf.skip(num_sent, True)
outbuflen -= num_sent
sent += num_sent
self.total_outbufs_len -= num_sent
else:
# failed to write anything, break out entirely
dobreak = True
break
else:
# self.outbufs[-1] must always be a writable outbuf
if len(self.outbufs) > 1:
toclose = self.outbufs.pop(0)
try:
toclose.close()
except Exception:
self.logger.exception(
'Unexpected error when closing an outbuf')
else:
# caught up, done flushing for now
dobreak = True
if dobreak:
break
if sent:
self.last_activity = time.time()
return True
return False
def handle_close(self):
with self.outbuf_lock:
for outbuf in self.outbufs:
try:
outbuf.close()
except Exception:
self.logger.exception(
'Unknown exception while trying to close outbuf')
self.total_outbufs_len = 0
self.connected = False
self.outbuf_lock.notify()
wasyncore.dispatcher.close(self)
def add_channel(self, map=None):
"""See wasyncore.dispatcher
This hook keeps track of opened channels.
"""
wasyncore.dispatcher.add_channel(self, map)
self.server.active_channels[self._fileno] = self
def del_channel(self, map=None):
"""See wasyncore.dispatcher
This hook keeps track of closed channels.
"""
fd = self._fileno # next line sets this to None
wasyncore.dispatcher.del_channel(self, map)
ac = self.server.active_channels
if fd in ac:
del ac[fd]
#
# SYNCHRONOUS METHODS
#
def write_soon(self, data):
if not self.connected:
# if the socket is closed then interrupt the task so that it
# can cleanup possibly before the app_iter is exhausted
raise ClientDisconnected
if data:
# the async mainloop might be popping data off outbuf; we can
# block here waiting for it because we're in a task thread
with self.outbuf_lock:
self._flush_outbufs_below_high_watermark()
if not self.connected:
raise ClientDisconnected
num_bytes = len(data)
if data.__class__ is ReadOnlyFileBasedBuffer:
# they used wsgi.file_wrapper
self.outbufs.append(data)
nextbuf = OverflowableBuffer(self.adj.outbuf_overflow)
self.outbufs.append(nextbuf)
self.current_outbuf_count = 0
else:
if self.current_outbuf_count > self.adj.outbuf_high_watermark:
# rotate to a new buffer if the current buffer has hit
# the watermark to avoid it growing unbounded
nextbuf = OverflowableBuffer(self.adj.outbuf_overflow)
self.outbufs.append(nextbuf)
self.current_outbuf_count = 0
self.outbufs[-1].append(data)
self.current_outbuf_count += num_bytes
self.total_outbufs_len += num_bytes
if self.total_outbufs_len >= self.adj.send_bytes:
self.server.pull_trigger()
return num_bytes
return 0
def _flush_outbufs_below_high_watermark(self):
# check first to avoid locking if possible
if self.total_outbufs_len > self.adj.outbuf_high_watermark:
with self.outbuf_lock:
while (
self.connected and
self.total_outbufs_len > self.adj.outbuf_high_watermark
):
self.server.pull_trigger()
self.outbuf_lock.wait()
def service(self):
"""Execute all pending requests """
with self.task_lock:
while self.requests:
request = self.requests[0]
if request.error:
task = self.error_task_class(self, request)
else:
task = self.task_class(self, request)
try:
task.service()
except ClientDisconnected:
self.logger.info('Client disconnected while serving %s' %
task.request.path)
task.close_on_finish = True
except Exception:
self.logger.exception('Exception while serving %s' %
task.request.path)
if not task.wrote_header:
if self.adj.expose_tracebacks:
body = traceback.format_exc()
else:
body = ('The server encountered an unexpected '
'internal server error')
req_version = request.version
req_headers = request.headers
request = self.parser_class(self.adj)
request.error = InternalServerError(body)
# copy some original request attributes to fulfill
# HTTP 1.1 requirements
request.version = req_version
try:
request.headers['CONNECTION'] = req_headers[
'CONNECTION']
except KeyError:
pass
task = self.error_task_class(self, request)
try:
task.service() # must not fail
except ClientDisconnected:
task.close_on_finish = True
else:
task.close_on_finish = True
# we cannot allow self.requests to drop to empty til
# here; otherwise the mainloop gets confused
if task.close_on_finish:
self.close_when_flushed = True
for request in self.requests:
request.close()
self.requests = []
else:
# before processing a new request, ensure there is not too
# much data in the outbufs waiting to be flushed
# NB: currently readable() returns False while we are
# flushing data so we know no new requests will come in
# that we need to account for, otherwise it'd be better
# to do this check at the start of the request instead of
# at the end to account for consecutive service() calls
if len(self.requests) > 1:
self._flush_outbufs_below_high_watermark()
request = self.requests.pop(0)
request.close()
if self.connected:
self.server.pull_trigger()
self.last_activity = time.time()
def cancel(self):
""" Cancels all pending / active requests """
self.will_close = True
self.connected = False
self.last_activity = time.time()
self.requests = []
| mit |
suiyuan2009/tensorflow | tensorflow/contrib/losses/python/losses/loss_ops_test.py | 82 | 55012 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for contrib.losses.python.losses.loss_ops."""
# pylint: disable=unused-import,g-bad-import-order
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: enable=unused-import
import numpy as np
from tensorflow.contrib.framework.python.ops import arg_scope
from tensorflow.contrib.losses.python.losses import loss_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import momentum as momentum_lib
class AbsoluteDifferenceLossTest(test.TestCase):
def setUp(self):
self._predictions = constant_op.constant([4, 8, 12, 8, 1, 3], shape=(2, 3))
self._labels = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
def testValueErrorThrownWhenWeightIsNone(self):
with self.test_session():
with self.assertRaises(ValueError):
loss_ops.absolute_difference(
self._predictions, self._predictions, weights=None)
def testAllCorrectNoLossWeight(self):
loss = loss_ops.absolute_difference(self._predictions, self._predictions)
with self.test_session():
self.assertAlmostEqual(0.0, loss.eval(), 3)
def testNonZeroLoss(self):
loss = loss_ops.absolute_difference(self._predictions, self._labels)
with self.test_session():
self.assertAlmostEqual(5.5, loss.eval(), 3)
def testNonZeroLossWithPythonScalarWeight(self):
weights = 2.3
loss = loss_ops.absolute_difference(self._predictions, self._labels,
weights)
with self.test_session():
self.assertAlmostEqual(5.5 * weights, loss.eval(), 3)
def testNonZeroLossWithScalarTensorWeight(self):
weights = 2.3
loss = loss_ops.absolute_difference(self._predictions, self._labels,
constant_op.constant(weights))
with self.test_session():
self.assertAlmostEqual(5.5 * weights, loss.eval(), 3)
def testNonZeroLossWithOneDimBatchSpecificWeights(self):
weights = constant_op.constant([1.2, 0.0], shape=[2,])
loss = loss_ops.absolute_difference(self._predictions, self._labels,
weights)
with self.test_session():
self.assertAlmostEqual(5.6, loss.eval(), 3)
def testNonZeroLossWithTwoDimBatchSpecificWeights(self):
weights = constant_op.constant([1.2, 0.0], shape=[2, 1])
loss = loss_ops.absolute_difference(self._predictions, self._labels,
weights)
with self.test_session():
self.assertAlmostEqual(5.6, loss.eval(), 3)
def testNonZeroLossWithSampleSpecificWeights(self):
weights = constant_op.constant([3, 6, 5, 0, 4, 2], shape=[2, 3])
loss = loss_ops.absolute_difference(self._predictions, self._labels,
weights)
with self.test_session():
self.assertAlmostEqual(16.6, loss.eval(), 3)
def testNonZeroLossWithSampleSpecificWeightsMostZero(self):
weights = constant_op.constant([0, 0, 0, 0, 0, 2], shape=[2, 3])
loss = loss_ops.absolute_difference(self._predictions, self._labels,
weights)
with self.test_session():
self.assertAlmostEqual(6.0, loss.eval(), 3)
def testLossWithSampleSpecificWeightsAllZero(self):
weights = array_ops.zeros((2, 3))
loss = loss_ops.absolute_difference(self._predictions, self._labels,
weights)
with self.test_session():
self.assertAlmostEqual(0.0, loss.eval(), 3)
class SoftmaxCrossEntropyLossTest(test.TestCase):
def testNoneWeightRaisesValueError(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
with self.test_session():
with self.assertRaises(ValueError):
loss_ops.softmax_cross_entropy(logits, labels, weights=None)
def testAllCorrect(self):
with self.test_session():
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
loss = loss_ops.softmax_cross_entropy(logits, labels)
self.assertEquals('softmax_cross_entropy_loss/value', loss.op.name)
self.assertAlmostEqual(loss.eval(), 0.0, 3)
def testAllWrong(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
with self.test_session():
loss = loss_ops.softmax_cross_entropy(logits, labels)
self.assertEquals(loss.op.name, 'softmax_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 10.0, 3)
def testNonZeroLossWithPythonScalarWeight(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
weights = 2.3
with self.test_session():
loss = loss_ops.softmax_cross_entropy(logits, labels, weights)
self.assertAlmostEqual(weights * 10.0, loss.eval(), 3)
def testNonZeroLossWithScalarTensorWeight(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
weights = 2.3
with self.test_session():
loss = loss_ops.softmax_cross_entropy(logits, labels,
constant_op.constant(weights))
self.assertAlmostEqual(weights * 10.0, loss.eval(), 3)
def testNonZeroLossWithOneDimBatchSpecificWeights(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
weights = constant_op.constant([1.2, 3.4, 5.6], shape=[3])
with self.test_session():
loss = loss_ops.softmax_cross_entropy(logits, labels, weights)
self.assertAlmostEqual((1.2 + 3.4 + 5.6) * 10.0 / 3.0, loss.eval(), 3)
def testAllWrongAllWeightsMissing(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
weights = constant_op.constant([0, 0, 0], shape=[3])
with self.test_session():
loss = loss_ops.softmax_cross_entropy(logits, labels, weights)
self.assertAlmostEqual(0.0, loss.eval(), 3)
def testSomeWeightsMissing(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
weights = constant_op.constant([1.2, 0, 0], shape=[3])
with self.test_session():
loss = loss_ops.softmax_cross_entropy(logits, labels, weights)
self.assertAlmostEqual(12.0, loss.eval(), 3)
def testSoftmaxWithMeasurementSpecificWeightsRaisesException(self):
with self.test_session():
logits = constant_op.constant([[100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0],
[-100.0, -100.0, 100.0]])
labels = constant_op.constant([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
weights = constant_op.constant([[3, 4, 5],
[2, 6, 0],
[8, 0, 1]])
with self.assertRaises(ValueError):
loss_ops.softmax_cross_entropy(logits, labels, weights=weights).eval()
def testSoftmaxLabelSmoothing(self):
with self.test_session():
# Softmax Cross Entropy Loss is:
# -\sum_i p_i \log q_i
# where for a softmax activation
# \log q_i = x_i - \log \sum_j \exp x_j
# = x_i - x_max - \log \sum_j \exp (x_j - x_max)
# For our activations, [100, -100, -100] the log partion function becomes
# \log ( exp(0) + exp(-200) + exp(-200) ) = 0
# so our log softmaxes become: [0, -200, -200]
# so our cross entropy loss is:
# -(1 - L + L/n) * 0 + 400 * L/n = 400 L/n
logits = constant_op.constant([[100.0, -100.0, -100.0]])
labels = constant_op.constant([[1, 0, 0]])
label_smoothing = 0.1
loss = loss_ops.softmax_cross_entropy(
logits, labels, label_smoothing=label_smoothing)
self.assertEquals(loss.op.name, 'softmax_cross_entropy_loss/value')
expected_value = 400.0 * label_smoothing / 3.0
self.assertAlmostEqual(loss.eval(), expected_value, 3)
def testLossWithDynamicallyShapedWeights1D(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
weights = [2.3, 2.4, 2.5]
weights_placeholder = array_ops.placeholder(dtypes.float32, shape=[None])
loss = loss_ops.softmax_cross_entropy(logits, labels, weights_placeholder)
with self.test_session() as sess:
loss = sess.run(loss, {weights_placeholder: weights})
self.assertAlmostEqual(np.average(weights) * 10.0, loss, 3)
def testLossWithDynamicallyShapedWeights2D(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
weights = [[2.3], [2.4], [2.5]]
weights_placeholder = array_ops.placeholder(
dtypes.float32, shape=[None, None])
loss = loss_ops.softmax_cross_entropy(logits, labels, weights_placeholder)
with self.test_session() as sess:
loss = sess.run(loss, {weights_placeholder: weights})
self.assertAlmostEqual(np.average(weights) * 10.0, loss, 3)
class SparseSoftmaxCrossEntropyLossTest(test.TestCase):
def testNoneWeightRaisesValueError(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[0], [1], [2]])
with self.test_session():
with self.assertRaises(ValueError):
loss_ops.sparse_softmax_cross_entropy(logits, labels, weights=None)
def testAllCorrectInt32Labels(self):
with self.test_session():
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[0], [1], [2]], dtype=dtypes.int32)
loss = loss_ops.sparse_softmax_cross_entropy(logits, labels)
self.assertEquals(loss.op.name, 'sparse_softmax_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 0.0, 3)
def testAllCorrectInt64Labels(self):
with self.test_session():
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[0], [1], [2]], dtype=dtypes.int64)
loss = loss_ops.sparse_softmax_cross_entropy(logits, labels)
self.assertEquals(loss.op.name, 'sparse_softmax_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 0.0, 3)
def testAllCorrectNonColumnLabels(self):
with self.test_session():
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([0, 1, 2])
loss = loss_ops.sparse_softmax_cross_entropy(logits, labels)
self.assertEquals(loss.op.name, 'sparse_softmax_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 0.0, 3)
def testAllWrongInt32Labels(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[2], [0], [1]], dtype=dtypes.int32)
with self.test_session():
loss = loss_ops.sparse_softmax_cross_entropy(logits, labels)
self.assertEquals(loss.op.name, 'sparse_softmax_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 10.0, 3)
def testAllWrongInt64Labels(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[2], [0], [1]], dtype=dtypes.int64)
with self.test_session():
loss = loss_ops.sparse_softmax_cross_entropy(logits, labels)
self.assertEquals(loss.op.name, 'sparse_softmax_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 10.0, 3)
def testAllWrongNonColumnLabels(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([2, 0, 1])
with self.test_session():
loss = loss_ops.sparse_softmax_cross_entropy(logits, labels)
self.assertEquals(loss.op.name, 'sparse_softmax_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 10.0, 3)
def testNonZeroLossWithPythonScalarWeight(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[2], [0], [1]])
weights = 2.3
with self.test_session():
loss = loss_ops.sparse_softmax_cross_entropy(logits, labels, weights)
self.assertAlmostEqual(weights * 10.0, loss.eval(), 3)
def testNonZeroLossWithScalarTensorWeight(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[2], [0], [1]])
weights = 2.3
with self.test_session():
loss = loss_ops.sparse_softmax_cross_entropy(
logits, labels, constant_op.constant(weights))
self.assertAlmostEqual(weights * 10.0, loss.eval(), 3)
def testNonZeroLossWithOneDimBatchSpecificWeights(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[2], [0], [1]])
weights = constant_op.constant([1.2, 3.4, 5.6], shape=[3])
with self.test_session():
loss = loss_ops.sparse_softmax_cross_entropy(logits, labels, weights)
self.assertAlmostEqual((1.2 + 3.4 + 5.6) * 10.0 / 3.0, loss.eval(), 3)
def testNonZeroLossWithColumnWeights(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[2], [0], [1]])
weights = constant_op.constant([[1.2], [3.4], [5.6]])
with self.test_session():
loss = loss_ops.sparse_softmax_cross_entropy(logits, labels, weights)
self.assertAlmostEqual((1.2 + 3.4 + 5.6) * 10.0 / 3.0, loss.eval(), 3)
def testAllWrongAllWeightsMissing(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[2], [0], [1]])
weights = constant_op.constant([0, 0, 0], shape=[3])
with self.test_session():
loss = loss_ops.sparse_softmax_cross_entropy(logits, labels, weights)
self.assertAlmostEqual(0.0, loss.eval(), 3)
def testSomeWeightsMissing(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([[2], [0], [1]])
weights = constant_op.constant([1.2, 0, 0], shape=[3])
with self.test_session():
loss = loss_ops.sparse_softmax_cross_entropy(logits, labels, weights)
self.assertAlmostEqual(12.0, loss.eval(), 3)
def testMeasurementSpecificWeightsRaisesException(self):
with self.test_session():
logits = constant_op.constant([[100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0],
[-100.0, -100.0, 100.0]])
labels = constant_op.constant([[0], [1], [2]])
weights = constant_op.constant([[3, 4, 5], [2, 6, 0], [8, 0, 1]])
with self.assertRaises(ValueError):
loss_ops.sparse_softmax_cross_entropy(
logits, labels, weights=weights).eval()
def testInconsistentWeightSizeRaisesException(self):
"""The weight tensor has incorrect number of elements."""
with self.test_session():
logits = constant_op.constant([[100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0],
[-100.0, -100.0, 100.0]])
labels = constant_op.constant([[0], [1], [2]])
weights = constant_op.constant([1.2, 3.4, 5.6, 7.8])
with self.assertRaises(ValueError):
loss_ops.sparse_softmax_cross_entropy(
logits, labels, weights=weights).eval()
def testInconsistentLabelSizeRaisesException(self):
"""The label tensor has incorrect number of elements."""
with self.test_session():
logits = constant_op.constant([[100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0],
[-100.0, -100.0, 100.0]])
labels = constant_op.constant([[0], [1], [2], [3]])
weights = constant_op.constant([1.2, 3.4, 5.6])
with self.assertRaises(ValueError):
loss_ops.sparse_softmax_cross_entropy(
logits, labels, weights=weights).eval()
def testInconsistentWeightShapeRaisesException(self):
"""The weight tensor has incorrect shape."""
with self.test_session():
logits = constant_op.constant([[100.0, -100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0, -100.0],
[-100.0, -100.0, 100.0, -100.0],
[-100.0, -100.0, -100.0, 100.0]])
labels = constant_op.constant([[0], [1], [2], [3]])
weights = constant_op.constant([[1.2, 3.4], [5.6, 7.8]])
with self.assertRaises(ValueError):
loss_ops.sparse_softmax_cross_entropy(
logits, labels, weights=weights).eval()
def testInconsistentLabelShapeRaisesException(self):
"""The label tensor has incorrect shape."""
with self.test_session():
logits = constant_op.constant([[100.0, -100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0, -100.0],
[-100.0, -100.0, 100.0, -100.0],
[-100.0, -100.0, -100.0, 100.0]])
labels = constant_op.constant([[0, 1], [2, 3]])
weights = constant_op.constant([1.2, 3.4, 5.6, 7.8])
with self.assertRaises(errors_impl.InvalidArgumentError):
loss_ops.sparse_softmax_cross_entropy(
logits, labels, weights=weights).eval()
def testLossWithDynamicallyShapedWeights1D(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([2, 0, 1])
weights = [2.3, 2.4, 2.5]
weights_placeholder = array_ops.placeholder(
dtypes.float32, shape=[None])
loss = loss_ops.sparse_softmax_cross_entropy(
logits, labels, weights_placeholder)
with self.test_session() as sess:
loss = sess.run(loss, {weights_placeholder: weights})
self.assertAlmostEqual(np.average(weights) * 10.0, loss, 3)
def testLossWithDynamicallyShapedWeights2D(self):
logits = constant_op.constant([[10.0, 0.0, 0.0],
[0.0, 10.0, 0.0],
[0.0, 0.0, 10.0]])
labels = constant_op.constant([2, 0, 1])
weights = [[2.3], [2.4], [2.5]]
weights_placeholder = array_ops.placeholder(
dtypes.float32, shape=[None, None])
loss = loss_ops.sparse_softmax_cross_entropy(
logits, labels, weights_placeholder)
with self.test_session() as sess:
loss = sess.run(loss, {weights_placeholder: weights})
self.assertAlmostEqual(np.average(weights) * 10.0, loss, 3)
class SigmoidCrossEntropyLossTest(test.TestCase):
def testAllCorrectSigmoid(self):
with self.test_session():
logits = constant_op.constant([[100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0],
[-100.0, -100.0, 100.0]])
labels = constant_op.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
loss = loss_ops.sigmoid_cross_entropy(logits, labels)
self.assertEquals(loss.op.name, 'sigmoid_cross_entropy_loss/value')
self.assertAlmostEqual(0.0, loss.eval(), 3)
def testLossWithSingleDimPlaceholderForLogitsAndWeights1(self):
logits = array_ops.placeholder(dtypes.float32, shape=(None, 1))
labels = array_ops.placeholder(dtypes.float32, shape=(None, 1))
weights = array_ops.ones_like(logits, dtype=dtypes.float32)
loss = loss_ops.sigmoid_cross_entropy(logits, labels, weights)
with self.test_session() as sess:
loss = sess.run(loss,
feed_dict={
logits: np.ones((32, 1)),
labels: np.ones((32, 1)),
})
self.assertAlmostEqual(0.313, loss, 3)
def testLossWithSingleDimPlaceholderForLogitsAndWeights2(self):
logits = array_ops.placeholder(dtypes.float32, shape=(None, 2))
labels = array_ops.placeholder(dtypes.float32, shape=(None, 2))
weights = array_ops.ones_like(logits, dtype=dtypes.float32)
loss = loss_ops.sigmoid_cross_entropy(logits, labels, weights)
with self.test_session() as sess:
loss = sess.run(loss,
feed_dict={
logits: np.ones((32, 2)),
labels: np.ones((32, 2)),
})
self.assertAlmostEqual(0.313, loss, 3)
def testAllWrongSigmoid(self):
with self.test_session():
logits = constant_op.constant([[100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0],
[-100.0, -100.0, 100.0]])
labels = constant_op.constant([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
loss = loss_ops.sigmoid_cross_entropy(logits, labels)
self.assertEquals(loss.op.name, 'sigmoid_cross_entropy_loss/value')
self.assertAlmostEqual(loss.eval(), 600.0 / 9.0, 3)
def testAllWrongSigmoidWithMeasurementSpecificWeights(self):
with self.test_session():
logits = constant_op.constant([[100.0, -100.0, -100.0],
[-100.0, 100.0, -100.0],
[-100.0, -100.0, 100.0]])
labels = constant_op.constant([[0, 0, 1],
[1, 0, 0],
[0, 1, 0]])
weights = constant_op.constant([[3, 4, 5],
[2, 6, 0],
[8, 0, 1]])
loss = loss_ops.sigmoid_cross_entropy(logits, labels, weights)
self.assertEquals(loss.op.name, 'sigmoid_cross_entropy_loss/value')
self.assertAlmostEqual(1700.0 / 7.0, loss.eval(), 3)
def testMultiCorrectSigmoid(self):
logits = constant_op.constant([[100.0, -100.0, 100.0],
[100.0, 100.0, -100.0],
[-100.0, 100.0, 100.0]])
labels = constant_op.constant([[1, 0, 1],
[1, 1, 0],
[0, 1, 1]])
loss = loss_ops.sigmoid_cross_entropy(logits, labels)
self.assertEquals(loss.op.name, 'sigmoid_cross_entropy_loss/value')
with self.test_session():
self.assertAlmostEqual(loss.eval(), 0.0, 3)
def testSigmoidLabelSmoothingCorrect(self):
with self.test_session():
logits = constant_op.constant([[100.0, -100.0, -100.0]])
labels = constant_op.constant([[1, 0, 1]])
# Sigmoid cross entropy loss is:
# max(x,0) - x*z + log(1 + exp(-abs(x)))
# The new labels are:
# z' = z * (1 - L) + 0.5 L
# 1 -> 1 - 0.5 L
# 0 -> 0.5 L
# here we expect:
# 1/3 * (100 - 100 * (1 - 0.5 L) + 0
# + 0 + 100 * (0.5 L) + 0
# + 0 + 100 * (1 - 0.5 L) + 0)
# = 1/3 * (100 + 50 L)
label_smoothing = 0.1
loss = loss_ops.sigmoid_cross_entropy(
logits, labels, label_smoothing=label_smoothing)
self.assertEquals(loss.op.name, 'sigmoid_cross_entropy_loss/value')
expected_value = (100.0 + 50.0 * label_smoothing) / 3.0
self.assertAlmostEqual(loss.eval(), expected_value, 3)
def testSigmoidLabelSmoothingEqualsSoftmaxTwoLabel(self):
with self.test_session():
label_smoothing = 0.1
sigmoid_logits = constant_op.constant([[100.0, -100.0, -100.0]])
sigmoid_labels = constant_op.constant([[1, 0, 1]])
sigmoid_loss = loss_ops.sigmoid_cross_entropy(
sigmoid_logits, sigmoid_labels, label_smoothing=label_smoothing)
softmax_logits = constant_op.constant(
[[0.0, 100.0], [100.0, 0.0], [100.0, 0.0]])
softmax_labels = constant_op.constant([[0, 1], [1, 0], [0, 1]])
softmax_loss = loss_ops.softmax_cross_entropy(
softmax_logits, softmax_labels, label_smoothing=label_smoothing)
self.assertAlmostEqual(sigmoid_loss.eval(), softmax_loss.eval(), 3)
class LogLossTest(test.TestCase):
def setUp(self):
predictions = np.asarray([.9, .2, .2, .8, .4, .6]).reshape((2, 3))
labels = np.asarray([1.0, 0.0, 1.0, 1.0, 0.0, 0.0]).reshape((2, 3))
self._np_predictions = predictions
self._np_labels = labels
epsilon = 1e-7
self._expected_losses = np.multiply(
labels, np.log(predictions + epsilon)) + np.multiply(
1 - labels, np.log(1 - predictions + epsilon))
self._predictions = constant_op.constant(predictions)
self._labels = constant_op.constant(labels)
def testValueErrorThrownWhenWeightIsNone(self):
with self.test_session():
with self.assertRaises(ValueError):
loss_ops.log_loss(self._labels, self._labels, weights=None)
def testAllCorrectNoLossWeight(self):
loss = loss_ops.log_loss(self._labels, self._labels)
with self.test_session():
self.assertAlmostEqual(0.0, loss.eval(), 3)
def testAllCorrectNoLossWeightWithPlaceholder(self):
tf_predictions = array_ops.placeholder(
dtypes.float32, shape=self._np_labels.shape)
loss = loss_ops.log_loss(tf_predictions, self._labels)
with self.test_session():
self.assertAlmostEqual(
0.0, loss.eval(feed_dict={tf_predictions: self._np_labels}), 3)
def testNonZeroLoss(self):
loss = loss_ops.log_loss(self._predictions, self._labels)
with self.test_session():
self.assertAlmostEqual(-np.sum(self._expected_losses) / 6.0,
loss.eval(), 3)
def testNonZeroLossWithPythonScalarWeight(self):
weights = 2.3
loss = loss_ops.log_loss(self._predictions, self._labels, weights)
with self.test_session():
self.assertAlmostEqual(weights * -np.sum(self._expected_losses) / 6.0,
loss.eval(), 3)
def testNonZeroLossWithScalarTensorWeight(self):
weights = 2.3
loss = loss_ops.log_loss(self._predictions, self._labels,
constant_op.constant(weights))
with self.test_session():
self.assertAlmostEqual(weights * -np.sum(self._expected_losses) / 6.0,
loss.eval(), 3)
def testNonZeroLossWithScalarTensorWeightAndPlaceholder(self):
tf_predictions = array_ops.placeholder(
dtypes.float32, shape=self._np_predictions.shape)
weights = 2.3
loss = loss_ops.log_loss(tf_predictions, self._labels,
constant_op.constant(weights))
with self.test_session() as sess:
loss = sess.run(loss, feed_dict={tf_predictions: self._np_predictions})
self.assertAlmostEqual(weights * -np.sum(self._expected_losses) / 6.0,
loss, 3)
def testNonZeroLossWithScalarTensorWeightAndPlaceholderWithRankOnly(self):
tf_predictions = array_ops.placeholder(dtypes.float32, shape=[None, None])
weights = 2.3
loss = loss_ops.log_loss(tf_predictions, self._labels,
constant_op.constant(weights))
with self.test_session() as sess:
loss = sess.run(loss, feed_dict={tf_predictions: self._np_predictions})
self.assertAlmostEqual(weights * -np.sum(self._expected_losses) / 6.0,
loss, 3)
def testNonZeroLossWithOneDimBatchSpecificWeights(self):
weights = constant_op.constant([1.2, 3.4], shape=[2])
expected_losses = np.multiply(
self._expected_losses,
np.asarray([1.2, 1.2, 1.2, 3.4, 3.4, 3.4]).reshape((2, 3)))
loss = loss_ops.log_loss(self._predictions, self._labels, weights)
with self.test_session():
self.assertAlmostEqual(-np.sum(expected_losses) / 6.0, loss.eval(), 3)
def testNonZeroLossWithOneDimBatchSpecificWeightsSomeZero(self):
weights = constant_op.constant([1.2, 0], shape=[2])
expected_losses = np.multiply(self._expected_losses,
np.asarray([1.2, 1.2, 1.2, 0, 0, 0]).reshape(
(2, 3)))
loss = loss_ops.log_loss(self._predictions, self._labels, weights)
with self.test_session():
self.assertAlmostEqual(-np.sum(expected_losses) / 3.0, loss.eval(), 3)
def testNonZeroLossWithTwoDimBatchSpecificWeightsSomeZero(self):
weights = constant_op.constant([1.2, 0], shape=[2, 1])
expected_losses = np.multiply(self._expected_losses,
np.asarray([1.2, 1.2, 1.2, 0, 0, 0]).reshape(
(2, 3)))
loss = loss_ops.log_loss(self._predictions, self._labels, weights)
with self.test_session():
self.assertAlmostEqual(-np.sum(expected_losses) / 3.0, loss.eval(), 3)
def testWeightsWithSameNumDimsButWrongShapeThrowsException(self):
weights = constant_op.constant(np.random.normal(size=(2, 4)), shape=[2, 4])
with self.test_session():
with self.assertRaises(ValueError):
loss_ops.log_loss(self._predictions, self._labels, weights)
def testNonZeroLossWithMeasurementSpecificWeights(self):
weights = np.array([3, 6, 5, 0, 4, 2]).reshape((2, 3))
expected_losses = np.multiply(self._expected_losses, weights)
loss = loss_ops.log_loss(
self._predictions,
self._labels,
constant_op.constant(
weights, shape=(2, 3)))
with self.test_session():
self.assertAlmostEqual(-np.sum(expected_losses) / 5.0, loss.eval(), 3)
def testNonZeroLossWithMeasurementSpecificWeightsWithPlaceholder(self):
weights = np.array([3, 6, 5, 0, 4, 2]).reshape((2, 3))
expected_losses = np.multiply(self._expected_losses, weights)
tf_predictions = array_ops.placeholder(dtypes.float32, shape=[2, 3])
loss = loss_ops.log_loss(
tf_predictions,
self._labels,
constant_op.constant(
weights, shape=(2, 3)))
with self.test_session() as sess:
loss = sess.run(loss, feed_dict={tf_predictions: self._np_predictions})
self.assertAlmostEqual(-np.sum(expected_losses) / 5.0, loss, 3)
def testNonZeroLossWithSampleSpecificWeightsMostZero(self):
weights = np.array([0, 0, 0, 0, 0, 2]).reshape((2, 3))
expected_losses = np.multiply(self._expected_losses, weights)
loss = loss_ops.log_loss(
self._predictions,
self._labels,
constant_op.constant(
weights, shape=(2, 3)))
with self.test_session():
self.assertAlmostEqual(-np.sum(expected_losses), loss.eval(), 3)
def testNonZeroLossWithSampleSpecificWeightsMostZeroWithPlaceholder(self):
weights = np.array([0, 0, 0, 0, 0, 2]).reshape((2, 3))
expected_losses = np.multiply(self._expected_losses, weights)
tf_predictions = array_ops.placeholder(dtypes.float32, shape=[2, 3])
tf_weights = constant_op.constant(weights, shape=(2, 3))
loss = loss_ops.log_loss(tf_predictions, self._labels, tf_weights)
with self.test_session() as sess:
loss = sess.run(loss, feed_dict={tf_predictions: self._np_predictions})
self.assertAlmostEqual(-np.sum(expected_losses), loss, 3)
def testLossWithSampleSpecificWeightsAllZero(self):
tf_weights = array_ops.zeros(shape=(2, 3))
loss = loss_ops.log_loss(self._predictions, self._labels, tf_weights)
with self.test_session():
self.assertAlmostEqual(0.0, loss.eval(), 3)
class HingeLossTest(test.TestCase):
def testIncompatibleShapes(self):
with self.test_session():
logits = constant_op.constant([[-1.0], [2.1]])
labels = constant_op.constant([0.0, 1.0])
with self.assertRaises(ValueError):
_ = loss_ops.hinge_loss(logits, labels).eval()
def testAllOutsideMargin(self):
with self.test_session():
logits = constant_op.constant([1.2, -1.4, -1.0, 2.1])
labels = constant_op.constant([1.0, 0.0, 0.0, 1.0])
loss = loss_ops.hinge_loss(logits, labels)
self.assertAllClose(loss.eval(), [0.0, 0.0, 0.0, 0.0], atol=1e-3)
def testSomeInsideMargin(self):
with self.test_session():
logits = constant_op.constant([[-0.7], [-1.4], [1.4], [0.6]])
labels = constant_op.constant([[0.0], [0.0], [1.0], [1.0]])
loss = loss_ops.hinge_loss(logits, labels)
# Examples 1 and 4 are on the correct side of the hyperplane but within
# the margin so they incur some (small) loss.
self.assertAllClose(loss.eval(), [[0.3], [0.0], [0.0], [0.4]], atol=1e-3)
def testSomeMisclassified(self):
with self.test_session():
logits = constant_op.constant([[[1.2], [0.4], [-1.0], [-1.1]]])
labels = constant_op.constant([[[1.0], [0.0], [0.0], [1.0]]])
loss = loss_ops.hinge_loss(logits, labels)
# Examples 2 and 4 are on the wrong side of the hyperplane so they incur
# some (fairly large) loss.
self.assertAllClose(
loss.eval(), [[[0.0], [1.4], [0.0], [2.1]]], atol=1e-3)
class MeanSquaredErrorTest(test.TestCase):
def setUp(self):
self._predictions = constant_op.constant([4, 8, 12, 8, 1, 3], shape=(2, 3))
self._labels = constant_op.constant([1, 9, 2, -5, -2, 6], shape=(2, 3))
def testValueErrorThrownWhenWeightIsNone(self):
with self.test_session():
with self.assertRaises(ValueError):
loss_ops.mean_squared_error(
self._predictions, self._predictions, weights=None)
def testAllCorrectNoLossWeight(self):
loss = loss_ops.mean_squared_error(self._predictions, self._predictions)
with self.test_session():
self.assertAlmostEqual(0.0, loss.eval(), 3)
def testNonZeroLoss(self):
loss = loss_ops.mean_squared_error(self._predictions, self._labels)
with self.test_session():
self.assertAlmostEqual(49.5, loss.eval(), 3)
def testNonZeroLossWithPythonScalarWeight(self):
weights = 2.3
loss = loss_ops.mean_squared_error(self._predictions, self._labels, weights)
with self.test_session():
self.assertAlmostEqual(49.5 * weights, loss.eval(), 3)
def testNonZeroLossWithScalarTensorWeight(self):
weights = 2.3
loss = loss_ops.mean_squared_error(self._predictions, self._labels,
constant_op.constant(weights))
with self.test_session():
self.assertAlmostEqual(49.5 * weights, loss.eval(), 3)
def testNonZeroLossWithOneDimBatchSpecificWeights(self):
weights = constant_op.constant([1.2, 3.4], shape=[2,])
loss = loss_ops.mean_squared_error(self._predictions, self._labels, weights)
with self.test_session():
self.assertAlmostEqual(767.8 / 6.0, loss.eval(), 3)
def testNonZeroLossWithTwoDimBatchSpecificWeights(self):
weights = constant_op.constant([1.2, 3.4], shape=[2, 1])
loss = loss_ops.mean_squared_error(self._predictions, self._labels, weights)
with self.test_session():
self.assertAlmostEqual(767.8 / 6.0, loss.eval(), 3)
def testNonZeroLossWithSampleSpecificWeights(self):
weights = constant_op.constant([3, 6, 5, 0, 4, 2], shape=[2, 3])
loss = loss_ops.mean_squared_error(self._predictions, self._labels, weights)
with self.test_session():
self.assertAlmostEqual(587 / 5.0, loss.eval(), 3)
def testNonZeroLossWithSampleSpecificWeightsMostZero(self):
weights = constant_op.constant([0, 0, 0, 0, 0, 2], shape=[2, 3])
loss = loss_ops.mean_squared_error(self._predictions, self._labels, weights)
with self.test_session():
self.assertAlmostEqual(18.0, loss.eval(), 3)
def testLossWithSampleSpecificWeightsAllZero(self):
weights = array_ops.zeros((2, 3))
loss = loss_ops.mean_squared_error(self._predictions, self._labels, weights)
with self.test_session():
self.assertAlmostEqual(0.0, loss.eval(), 3)
class MeanPairwiseSquaresErrorTest(test.TestCase):
def setUp(self):
self._predictions = np.array([[4, 8, 12], [8, 1, 3]])
self._labels = np.array([[1, 9, 2], [-5, -5, 7]])
batch_size, dims = self._labels.shape
# Compute the expected loss 'manually'.
total = np.zeros((batch_size, 1))
for b in range(batch_size):
for i in range(dims):
for j in range(dims):
x = self._predictions[b, i].item() - self._predictions[b, j].item()
y = self._labels[b, i].item() - self._labels[b, j].item()
tmp = (x - y) * (x - y)
total[b] += tmp
self._expected_losses = np.divide(total, 9.0)
def testValueErrorThrownWhenWeightIsNone(self):
with self.test_session():
with self.assertRaises(ValueError):
loss_ops.mean_pairwise_squared_error(
predictions=constant_op.constant(self._labels),
labels=constant_op.constant(self._labels),
weights=None)
def testAllCorrectNoLossWeight(self):
loss = loss_ops.mean_pairwise_squared_error(
predictions=constant_op.constant(self._labels),
labels=constant_op.constant(self._labels))
with self.test_session():
self.assertAlmostEqual(0.0, loss.eval(), 3)
def testNonZeroLoss(self):
loss = loss_ops.mean_pairwise_squared_error(
predictions=constant_op.constant(self._predictions),
labels=constant_op.constant(self._labels))
with self.test_session():
self.assertAlmostEqual(np.sum(self._expected_losses), loss.eval(), 3)
def testGradientWithZeroWeight(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
inputs = array_ops.ones((2, 3))
weights = variable_scope.get_variable(
'weights',
shape=[3, 4],
initializer=init_ops.truncated_normal_initializer())
predictions = math_ops.matmul(inputs, weights)
optimizer = momentum_lib.MomentumOptimizer(
learning_rate=0.001, momentum=0.9)
loss = loss_ops.mean_pairwise_squared_error(predictions, predictions, 0)
gradients_to_variables = optimizer.compute_gradients(loss)
init_op = variables.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
for grad, _ in gradients_to_variables:
np_grad = sess.run(grad)
self.assertFalse(np.isnan(np_grad).any())
def testNonZeroLossWithPythonScalarWeight(self):
weights = 2.3
loss = loss_ops.mean_pairwise_squared_error(
predictions=constant_op.constant(self._predictions),
labels=constant_op.constant(self._labels),
weights=weights)
with self.test_session():
self.assertAlmostEqual(weights * np.sum(self._expected_losses),
loss.eval(), 3)
def testNonZeroLossWithScalarTensorWeight(self):
weights = 2.3
loss = loss_ops.mean_pairwise_squared_error(
predictions=constant_op.constant(self._predictions),
labels=constant_op.constant(self._labels),
weights=constant_op.constant(weights))
with self.test_session():
self.assertAlmostEqual(weights * np.sum(self._expected_losses),
loss.eval(), 3)
def testNonZeroLossWithScalarZeroWeight(self):
weights = 0
loss = loss_ops.mean_pairwise_squared_error(
predictions=constant_op.constant(self._predictions),
labels=constant_op.constant(self._labels),
weights=constant_op.constant(weights))
with self.test_session():
self.assertAlmostEqual(0, loss.eval(), 3)
def testNonZeroLossWithScalarTensorWeightWithPlaceholder(self):
weights = 2.3
tf_predictions = array_ops.placeholder(
dtypes.float32, shape=self._predictions.shape)
tf_labels = array_ops.placeholder(dtypes.float32, shape=self._labels.shape)
loss = loss_ops.mean_pairwise_squared_error(
predictions=tf_predictions,
labels=tf_labels,
weights=constant_op.constant(weights))
with self.test_session() as sess:
loss = sess.run(loss,
feed_dict={
tf_predictions: self._predictions,
tf_labels: self._labels,
})
self.assertAlmostEqual(weights * np.sum(self._expected_losses), loss, 3)
def testNonZeroLossWithOneDimBatchSpecificWeights(self):
weights = np.asarray([2.0, 1.0]).reshape((2, 1))
expected_losses = np.multiply(weights, self._expected_losses)
loss = loss_ops.mean_pairwise_squared_error(
predictions=constant_op.constant(self._predictions),
labels=constant_op.constant(self._labels),
weights=constant_op.constant(
weights, shape=[2]))
with self.test_session():
self.assertAlmostEqual(np.sum(expected_losses), loss.eval(), 3)
def testZeroLossWithOneDimBatchZeroWeights(self):
weights = np.asarray([0.0, 0.0]).reshape((2, 1))
loss = loss_ops.mean_pairwise_squared_error(
predictions=constant_op.constant(self._predictions),
labels=constant_op.constant(self._labels),
weights=constant_op.constant(
weights, shape=[2]))
with self.test_session():
self.assertAlmostEqual(0, loss.eval(), 3)
def testNonZeroLossWithOneDimBatchSpecificWeightsAndPlaceholders(self):
weights = np.asarray([1.2, 3.4]).reshape((2, 1))
expected_losses = np.multiply(weights, self._expected_losses)
tf_predictions = array_ops.placeholder(
dtypes.float32, shape=self._predictions.shape)
tf_labels = array_ops.placeholder(dtypes.int32, shape=self._labels.shape)
loss = loss_ops.mean_pairwise_squared_error(
predictions=tf_predictions,
labels=tf_labels,
weights=constant_op.constant(
weights, shape=[2]))
with self.test_session() as sess:
loss = sess.run(loss,
feed_dict={
tf_predictions: self._predictions,
tf_labels: self._labels,
})
self.assertAlmostEqual(np.sum(expected_losses), loss, 3)
def testLossWithAllZeroBatchSpecificWeights(self):
weights = np.zeros((2, 1))
loss = loss_ops.mean_pairwise_squared_error(
predictions=constant_op.constant(self._predictions),
labels=constant_op.constant(self._labels),
weights=constant_op.constant(
weights, shape=[2]))
with self.test_session():
self.assertAlmostEqual(0.0, loss.eval(), 3)
def testLossIsAssociativeAcrossBatchElements(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
height = 3
width = 4
shape = (1, height, width, 1)
labels0 = random_ops.random_uniform(
shape, minval=0, maxval=1, dtype=dtypes.float32)
predictions0 = random_ops.random_uniform(
shape, minval=0, maxval=1, dtype=dtypes.float32)
labels1 = random_ops.random_uniform(
shape, minval=0, maxval=1, dtype=dtypes.float32)
predictions1 = random_ops.random_uniform(
shape, minval=0, maxval=1, dtype=dtypes.float32)
loss0 = loss_ops.mean_pairwise_squared_error(
predictions=predictions0,
labels=labels0)
loss1 = loss_ops.mean_pairwise_squared_error(
predictions=predictions1,
labels=labels1)
loss0_1 = loss_ops.mean_pairwise_squared_error(
predictions=array_ops.concat([predictions0, predictions1], 0),
labels=array_ops.concat([labels0, labels1], 0))
with self.test_session() as session:
loss0, loss1, loss0_1 = session.run([loss0, loss1, loss0_1])
self.assertTrue(loss0 > 0)
self.assertTrue(loss1 > 0)
self.assertAlmostEqual(loss0 + loss1, loss0_1, 5)
class CosineDistanceLossTest(test.TestCase):
def setUp(self):
self._predictions = np.asarray([
[1, 0, 0], # Batch 1
[0, 0, -1],
[1, 0, 0], # Batch 2
[1, 0, 0],
[0, 0, -1], # Batch 3
[1, 0, 0]
]).reshape((3, 2, 3))
self._labels = np.asarray([[1, 0, 0],
[0, 0, 1],
[0, 1, 0],
[1, 0, 0],
[0, 0, 1],
[0, 1, 0]]).reshape((3, 2, 3))
def testValueErrorThrownWhenWeightIsNone(self):
with self.test_session():
with self.assertRaises(ValueError):
loss_ops.cosine_distance(
predictions=constant_op.constant(self._labels),
labels=constant_op.constant(self._labels),
dim=2,
weights=None)
def testAllCorrectNoWeights(self):
loss = loss_ops.cosine_distance(
predictions=constant_op.constant(self._labels),
labels=constant_op.constant(self._labels),
dim=2)
with self.test_session():
self.assertAlmostEqual(0, loss.eval(), 5)
def testPartiallyCorrectWithIntegerValues(self):
loss = loss_ops.cosine_distance(
predictions=constant_op.constant(self._predictions),
labels=constant_op.constant(self._labels),
dim=2)
with self.test_session():
self.assertAlmostEqual(1, loss.eval(), 5)
def testPartiallyCorrectFloatingPointValues(self):
predictions = np.matrix(
('0.819031913261206 0.567041924552012 0.087465312324590;'
'-0.665139432070255 -0.739487441769973 -0.103671883216994;'
'0.707106781186548 -0.707106781186548 0'))
labels = np.matrix(('0.819031913261206 0.567041924552012 0.087465312324590;'
'0.665139432070255 0.739487441769973 0.103671883216994;'
'0.707106781186548 0.707106781186548 0'))
tf_preds = constant_op.constant(
predictions, shape=(3, 1, 3), dtype=dtypes.float32)
tf_labels = constant_op.constant(
labels, shape=(3, 1, 3), dtype=dtypes.float32)
loss = loss_ops.cosine_distance(tf_preds, tf_labels, dim=2)
with self.test_session():
self.assertAlmostEqual(1.0, loss.eval(), 5)
def testSampleSpecificWeights(self):
loss = loss_ops.cosine_distance(
predictions=constant_op.constant(self._predictions),
labels=constant_op.constant(self._labels),
dim=2,
weights=constant_op.constant([1, 0, 0]))
with self.test_session():
self.assertEqual(1.0, loss.eval())
def testMeasurementSpecificWeights(self):
loss = loss_ops.cosine_distance(
predictions=constant_op.constant(self._predictions),
labels=constant_op.constant(self._labels),
dim=2,
weights=constant_op.constant(
[1, 0, 0, 1, 1, 1], shape=(3, 2)))
with self.test_session():
self.assertEqual(3.0 / 4.0, loss.eval())
def testValueErrorThrownWithShapelessPlaceholder(self):
tf_predictions = array_ops.placeholder(dtypes.float32)
with self.test_session():
with self.assertRaises(ValueError):
loss_ops.cosine_distance(
predictions=tf_predictions,
labels=constant_op.constant(self._labels),
dim=2,
weights=constant_op.constant(
[1, 0, 0, 1, 1, 1], shape=(3, 2)))
def testMeasurementSpecificWeightsWithPlaceholderWithShape(self):
tf_predictions = array_ops.placeholder(
dtypes.float32, shape=self._labels.shape)
loss = loss_ops.cosine_distance(
predictions=tf_predictions,
labels=constant_op.constant(self._labels),
dim=2,
weights=constant_op.constant(
[1, 0, 0, 1, 1, 1], shape=(3, 2)))
with self.test_session() as sess:
loss = sess.run(loss, feed_dict={tf_predictions: self._predictions})
self.assertEqual(3.0 / 4.0, loss)
def testZeroLossWhenAllSampleSpecificWeightsAreZero(self):
loss = loss_ops.cosine_distance(
predictions=constant_op.constant(self._predictions),
labels=constant_op.constant(self._labels),
dim=2,
weights=array_ops.zeros((3,)))
with self.test_session():
self.assertEqual(0, loss.eval())
def testZeroLossWhenAllMeasurementSpecificWeightsAreZero(self):
loss = loss_ops.cosine_distance(
predictions=constant_op.constant(self._predictions),
labels=constant_op.constant(self._labels),
dim=2,
weights=array_ops.zeros((3, 2)))
with self.test_session():
self.assertEqual(0, loss.eval())
class ComputeWeightedLossTest(test.TestCase):
def testHingeLoss(self):
logits = constant_op.constant([1.2, 0.4, -1.0, -1.1])
labels = constant_op.constant([1.0, 0.0, 0.0, 1.0])
losses = loss_ops.hinge_loss(logits, labels)
self.assertFalse(loss_ops.get_losses())
loss = loss_ops.compute_weighted_loss(losses)
self.assertTrue(loss_ops.get_losses())
with self.test_session():
self.assertAllClose(losses.eval(), [0.0, 1.4, 0.0, 2.1], atol=1e-3)
self.assertAllClose(loss.eval(), 3.5 / 4.0, atol=1e-3)
class AddLossTest(test.TestCase):
def testAddExternalLoss(self):
logits = constant_op.constant([[1.2, 0.4, -1.0, -1.1]])
labels = constant_op.constant([[1.0, 0.0, 0.0, 1.0]])
losses = loss_ops.hinge_loss(logits, labels)
self.assertFalse(loss_ops.get_losses())
loss_ops.add_loss(math_ops.reduce_mean(losses))
self.assertTrue(loss_ops.get_losses())
total_loss = loss_ops.get_total_loss()
with self.test_session():
self.assertAllClose(losses.eval(), [[0.0, 1.4, 0.0, 2.1]], atol=1e-3)
self.assertAllClose(total_loss.eval(), 3.5 / 4.0, atol=1e-3)
def testNoneLossCollection(self):
logits = constant_op.constant([[1.2, 0.4, -1.0, -1.1]])
labels = constant_op.constant([[1.0, 0.0, 0.0, 1.0]])
losses = loss_ops.hinge_loss(logits, labels)
self.assertFalse(loss_ops.get_losses())
loss_ops.add_loss(math_ops.reduce_mean(losses), loss_collection=None)
self.assertFalse(loss_ops.get_losses())
with self.test_session():
self.assertAllClose(losses.eval(), [[0.0, 1.4, 0.0, 2.1]], atol=1e-3)
def testNoCollectLosses(self):
logits = constant_op.constant([[1.2, 0.4, -1.0, -1.1]])
labels = constant_op.constant([[1.0, 0.0, 0.0, 1.0]])
self.assertFalse(loss_ops.get_losses())
with arg_scope([loss_ops.add_loss], loss_collection=None):
loss_ops.absolute_difference(logits, labels)
loss_ops.log_loss(logits, labels)
loss_ops.mean_squared_error(logits, labels)
loss_ops.sigmoid_cross_entropy(logits, labels)
loss_ops.softmax_cross_entropy(logits, labels)
self.assertFalse(loss_ops.get_losses())
def testNoCollectLossesBatch2(self):
logits = constant_op.constant([[1.2, 0.4, -1.0, -1.1]] * 2)
labels = constant_op.constant([[1.0, 0.0, 0.0, 1.0]] * 2)
self.assertFalse(loss_ops.get_losses())
with arg_scope([loss_ops.add_loss], loss_collection=None):
loss_ops.absolute_difference(logits, labels)
loss_ops.log_loss(logits, labels)
loss_ops.mean_squared_error(logits, labels)
loss_ops.sigmoid_cross_entropy(logits, labels)
loss_ops.softmax_cross_entropy(logits, labels)
self.assertFalse(loss_ops.get_losses())
if __name__ == '__main__':
test.main()
| apache-2.0 |
ryfeus/lambda-packs | Opencv_pil/source/pip/_vendor/requests/packages/chardet/chardistribution.py | 2755 | 9226 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .euctwfreq import (EUCTWCharToFreqOrder, EUCTW_TABLE_SIZE,
EUCTW_TYPICAL_DISTRIBUTION_RATIO)
from .euckrfreq import (EUCKRCharToFreqOrder, EUCKR_TABLE_SIZE,
EUCKR_TYPICAL_DISTRIBUTION_RATIO)
from .gb2312freq import (GB2312CharToFreqOrder, GB2312_TABLE_SIZE,
GB2312_TYPICAL_DISTRIBUTION_RATIO)
from .big5freq import (Big5CharToFreqOrder, BIG5_TABLE_SIZE,
BIG5_TYPICAL_DISTRIBUTION_RATIO)
from .jisfreq import (JISCharToFreqOrder, JIS_TABLE_SIZE,
JIS_TYPICAL_DISTRIBUTION_RATIO)
from .compat import wrap_ord
ENOUGH_DATA_THRESHOLD = 1024
SURE_YES = 0.99
SURE_NO = 0.01
MINIMUM_DATA_THRESHOLD = 3
class CharDistributionAnalysis:
def __init__(self):
# Mapping table to get frequency order from char order (get from
# GetOrder())
self._mCharToFreqOrder = None
self._mTableSize = None # Size of above table
# This is a constant value which varies from language to language,
# used in calculating confidence. See
# http://www.mozilla.org/projects/intl/UniversalCharsetDetection.html
# for further detail.
self._mTypicalDistributionRatio = None
self.reset()
def reset(self):
"""reset analyser, clear any state"""
# If this flag is set to True, detection is done and conclusion has
# been made
self._mDone = False
self._mTotalChars = 0 # Total characters encountered
# The number of characters whose frequency order is less than 512
self._mFreqChars = 0
def feed(self, aBuf, aCharLen):
"""feed a character with known length"""
if aCharLen == 2:
# we only care about 2-bytes character in our distribution analysis
order = self.get_order(aBuf)
else:
order = -1
if order >= 0:
self._mTotalChars += 1
# order is valid
if order < self._mTableSize:
if 512 > self._mCharToFreqOrder[order]:
self._mFreqChars += 1
def get_confidence(self):
"""return confidence based on existing data"""
# if we didn't receive any character in our consideration range,
# return negative answer
if self._mTotalChars <= 0 or self._mFreqChars <= MINIMUM_DATA_THRESHOLD:
return SURE_NO
if self._mTotalChars != self._mFreqChars:
r = (self._mFreqChars / ((self._mTotalChars - self._mFreqChars)
* self._mTypicalDistributionRatio))
if r < SURE_YES:
return r
# normalize confidence (we don't want to be 100% sure)
return SURE_YES
def got_enough_data(self):
# It is not necessary to receive all data to draw conclusion.
# For charset detection, certain amount of data is enough
return self._mTotalChars > ENOUGH_DATA_THRESHOLD
def get_order(self, aBuf):
# We do not handle characters based on the original encoding string,
# but convert this encoding string to a number, here called order.
# This allows multiple encodings of a language to share one frequency
# table.
return -1
class EUCTWDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = EUCTWCharToFreqOrder
self._mTableSize = EUCTW_TABLE_SIZE
self._mTypicalDistributionRatio = EUCTW_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for euc-TW encoding, we are interested
# first byte range: 0xc4 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char = wrap_ord(aBuf[0])
if first_char >= 0xC4:
return 94 * (first_char - 0xC4) + wrap_ord(aBuf[1]) - 0xA1
else:
return -1
class EUCKRDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = EUCKRCharToFreqOrder
self._mTableSize = EUCKR_TABLE_SIZE
self._mTypicalDistributionRatio = EUCKR_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for euc-KR encoding, we are interested
# first byte range: 0xb0 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char = wrap_ord(aBuf[0])
if first_char >= 0xB0:
return 94 * (first_char - 0xB0) + wrap_ord(aBuf[1]) - 0xA1
else:
return -1
class GB2312DistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = GB2312CharToFreqOrder
self._mTableSize = GB2312_TABLE_SIZE
self._mTypicalDistributionRatio = GB2312_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for GB2312 encoding, we are interested
# first byte range: 0xb0 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char, second_char = wrap_ord(aBuf[0]), wrap_ord(aBuf[1])
if (first_char >= 0xB0) and (second_char >= 0xA1):
return 94 * (first_char - 0xB0) + second_char - 0xA1
else:
return -1
class Big5DistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = Big5CharToFreqOrder
self._mTableSize = BIG5_TABLE_SIZE
self._mTypicalDistributionRatio = BIG5_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for big5 encoding, we are interested
# first byte range: 0xa4 -- 0xfe
# second byte range: 0x40 -- 0x7e , 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char, second_char = wrap_ord(aBuf[0]), wrap_ord(aBuf[1])
if first_char >= 0xA4:
if second_char >= 0xA1:
return 157 * (first_char - 0xA4) + second_char - 0xA1 + 63
else:
return 157 * (first_char - 0xA4) + second_char - 0x40
else:
return -1
class SJISDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = JISCharToFreqOrder
self._mTableSize = JIS_TABLE_SIZE
self._mTypicalDistributionRatio = JIS_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for sjis encoding, we are interested
# first byte range: 0x81 -- 0x9f , 0xe0 -- 0xfe
# second byte range: 0x40 -- 0x7e, 0x81 -- oxfe
# no validation needed here. State machine has done that
first_char, second_char = wrap_ord(aBuf[0]), wrap_ord(aBuf[1])
if (first_char >= 0x81) and (first_char <= 0x9F):
order = 188 * (first_char - 0x81)
elif (first_char >= 0xE0) and (first_char <= 0xEF):
order = 188 * (first_char - 0xE0 + 31)
else:
return -1
order = order + second_char - 0x40
if second_char > 0x7F:
order = -1
return order
class EUCJPDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = JISCharToFreqOrder
self._mTableSize = JIS_TABLE_SIZE
self._mTypicalDistributionRatio = JIS_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for euc-JP encoding, we are interested
# first byte range: 0xa0 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
char = wrap_ord(aBuf[0])
if char >= 0xA0:
return 94 * (char - 0xA1) + wrap_ord(aBuf[1]) - 0xa1
else:
return -1
| mit |
nzavagli/UnrealPy | UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/pycrypto-2.6.1/lib/Crypto/Hash/hashalgo.py | 124 | 3984 | # -*- coding: utf-8 -*-
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
from binascii import hexlify
class HashAlgo:
"""A generic class for an abstract cryptographic hash algorithm.
:undocumented: block_size
"""
#: The size of the resulting hash in bytes.
digest_size = None
#: The internal block size of the hash algorithm in bytes.
block_size = None
def __init__(self, hashFactory, data=None):
"""Initialize the hash object.
:Parameters:
hashFactory : callable
An object that will generate the actual hash implementation.
*hashFactory* must have a *new()* method, or must be directly
callable.
data : byte string
The very first chunk of the message to hash.
It is equivalent to an early call to `update()`.
"""
if hasattr(hashFactory, 'new'):
self._hash = hashFactory.new()
else:
self._hash = hashFactory()
if data:
self.update(data)
def update(self, data):
"""Continue hashing of a message by consuming the next chunk of data.
Repeated calls are equivalent to a single call with the concatenation
of all the arguments. In other words:
>>> m.update(a); m.update(b)
is equivalent to:
>>> m.update(a+b)
:Parameters:
data : byte string
The next chunk of the message being hashed.
"""
return self._hash.update(data)
def digest(self):
"""Return the **binary** (non-printable) digest of the message that has been hashed so far.
This method does not change the state of the hash object.
You can continue updating the object after calling this function.
:Return: A byte string of `digest_size` bytes. It may contain non-ASCII
characters, including null bytes.
"""
return self._hash.digest()
def hexdigest(self):
"""Return the **printable** digest of the message that has been hashed so far.
This method does not change the state of the hash object.
:Return: A string of 2* `digest_size` characters. It contains only
hexadecimal ASCII digits.
"""
return self._hash.hexdigest()
def copy(self):
"""Return a copy ("clone") of the hash object.
The copy will have the same internal state as the original hash
object.
This can be used to efficiently compute the digests of strings that
share a common initial substring.
:Return: A hash object of the same type
"""
return self._hash.copy()
def new(self, data=None):
"""Return a fresh instance of the hash object.
Unlike the `copy` method, the internal state of the object is empty.
:Parameters:
data : byte string
The next chunk of the message being hashed.
:Return: A hash object of the same type
"""
pass
| mit |
dominicelse/scipy | benchmarks/benchmarks/go_benchmark_functions/go_funcs_univariate.py | 46 | 17043 | # -*- coding: utf-8 -*-
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy import (abs, arctan2, asarray, cos, exp, floor, log, log10,
arange, pi, prod, roll, sign, sin, sqrt, sum, where,
zeros, tan, tanh, dot)
try:
from scipy.misc import factorial
except ImportError:
pass
from .go_benchmark import Benchmark
#-----------------------------------------------------------------------
# UNIVARIATE SINGLE-OBJECTIVE PROBLEMS
#-----------------------------------------------------------------------
class Problem02(Benchmark):
"""
Univariate Problem02 objective function.
This class defines the Univariate Problem02 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Problem02}}(x) = \\sin(x) + \\sin \\left(\\frac{10}{3}x \\right)
Bound constraints: :math:`x \\in [2.7, 7.5]`
.. figure:: figures/Problem02.png
:alt: Univariate Problem02 function
:align: center
**Univariate Problem02 function**
*Global optimum*: :math:`f(x)=-1.899599` for :math:`x = 5.145735`
"""
def __init__(self, dimensions=1):
Benchmark.__init__(self, dimensions)
self._bounds = [(2.7, 7.5)]
self.global_optimum = 5.145735
self.fglob = -1.899599
def fun(self, x, *args):
self.nfev += 1
x = x[0]
return sin(x) + sin(10.0 / 3.0 * x)
class Problem03(Benchmark):
"""
Univariate Problem03 objective function.
This class defines the Univariate Problem03 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Problem03}}(x) = - \\sum_{k=1}^6 k \\sin[(k+1)x+k]
Bound constraints: :math:`x \\in [-10, 10]`
.. figure:: figures/Problem03.png
:alt: Univariate Problem03 function
:align: center
**Univariate Problem03 function**
*Global optimum*: :math:`f(x)=-12.03124` for :math:`x = -6.7745761`
"""
def __init__(self, dimensions=1):
Benchmark.__init__(self, dimensions)
self._bounds = [(-10, 10)]
self.global_optimum = -6.7745761
self.fglob = -12.03124
def fun(self, x, *args):
self.nfev += 1
x = x[0]
y = 0.0
for k in range(1, 6):
y += k * sin((k + 1) * x + k)
return -y
class Problem04(Benchmark):
"""
Univariate Problem04 objective function.
This class defines the Univariate Problem04 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Problem04}}(x) = - \\left(16x^2 - 24x + 5 \\right) e^{-x}
Bound constraints: :math:`x \\in [1.9, 3.9]`
.. figure:: figures/Problem04.png
:alt: Univariate Problem04 function
:align: center
**Univariate Problem04 function**
*Global optimum*: :math:`f(x)=-3.85045` for :math:`x = 2.868034`
"""
def __init__(self, dimensions=1):
Benchmark.__init__(self, dimensions)
self._bounds = [(1.9, 3.9)]
self.global_optimum = 2.868034
self.fglob = -3.85045
def fun(self, x, *args):
self.nfev += 1
x = x[0]
return -(16 * x ** 2 - 24 * x + 5) * exp(-x)
class Problem05(Benchmark):
"""
Univariate Problem05 objective function.
This class defines the Univariate Problem05 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Problem05}}(x) = - \\left(1.4 - 3x \\right) \\sin(18x)
Bound constraints: :math:`x \\in [0, 1.2]`
.. figure:: figures/Problem05.png
:alt: Univariate Problem05 function
:align: center
**Univariate Problem05 function**
*Global optimum*: :math:`f(x)=-1.48907` for :math:`x = 0.96609`
"""
def __init__(self, dimensions=1):
Benchmark.__init__(self, dimensions)
self._bounds = [(0.0, 1.2)]
self.global_optimum = 0.96609
self.fglob = -1.48907
def fun(self, x, *args):
self.nfev += 1
x = x[0]
return -(1.4 - 3 * x) * sin(18.0 * x)
class Problem06(Benchmark):
"""
Univariate Problem06 objective function.
This class defines the Univariate Problem06 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Problem06}}(x) = - \\left[x + \\sin(x) \\right] e^{-x^2}
Bound constraints: :math:`x \\in [-10, 10]`
.. figure:: figures/Problem06.png
:alt: Univariate Problem06 function
:align: center
**Univariate Problem06 function**
*Global optimum*: :math:`f(x)=-0.824239` for :math:`x = 0.67956`
"""
def __init__(self, dimensions=1):
Benchmark.__init__(self, dimensions)
self._bounds = [(-10.0, 10.0)]
self.global_optimum = 0.67956
self.fglob = -0.824239
def fun(self, x, *args):
self.nfev += 1
x = x[0]
return -(x + sin(x)) * exp(-x ** 2.0)
class Problem07(Benchmark):
"""
Univariate Problem07 objective function.
This class defines the Univariate Problem07 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Problem07}}(x) = \\sin(x) + \\sin \\left(\\frac{10}{3}x \\right) + \\log(x) - 0.84x + 3
Bound constraints: :math:`x \\in [2.7, 7.5]`
.. figure:: figures/Problem07.png
:alt: Univariate Problem07 function
:align: center
**Univariate Problem07 function**
*Global optimum*: :math:`f(x)=-1.6013` for :math:`x = 5.19978`
"""
def __init__(self, dimensions=1):
Benchmark.__init__(self, dimensions)
self._bounds = [(2.7, 7.5)]
self.global_optimum = 5.19978
self.fglob = -1.6013
def fun(self, x, *args):
self.nfev += 1
x = x[0]
return sin(x) + sin(10.0 / 3.0 * x) + log(x) - 0.84 * x + 3
class Problem08(Benchmark):
"""
Univariate Problem08 objective function.
This class defines the Univariate Problem08 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Problem08}}(x) = - \\sum_{k=1}^6 k \\cos[(k+1)x+k]
Bound constraints: :math:`x \\in [-10, 10]`
.. figure:: figures/Problem08.png
:alt: Univariate Problem08 function
:align: center
**Univariate Problem08 function**
*Global optimum*: :math:`f(x)=-14.508` for :math:`x = -7.083506`
"""
def __init__(self, dimensions=1):
Benchmark.__init__(self, dimensions)
self._bounds = [(-10, 10)]
self.global_optimum = -7.083506
self.fglob = -14.508
def fun(self, x, *args):
self.nfev += 1
x = x[0]
y = 0.0
for k in range(1, 6):
y += k * cos((k + 1) * x + k)
return -y
class Problem09(Benchmark):
"""
Univariate Problem09 objective function.
This class defines the Univariate Problem09 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Problem09}}(x) = \\sin(x) + \\sin \\left(\\frac{2}{3} x \\right)
Bound constraints: :math:`x \\in [3.1, 20.4]`
.. figure:: figures/Problem09.png
:alt: Univariate Problem09 function
:align: center
**Univariate Problem09 function**
*Global optimum*: :math:`f(x)=-1.90596` for :math:`x = 17.039`
"""
def __init__(self, dimensions=1):
Benchmark.__init__(self, dimensions)
self._bounds = [(3.1, 20.4)]
self.global_optimum = 17.039
self.fglob = -1.90596
def fun(self, x, *args):
self.nfev += 1
x = x[0]
return sin(x) + sin(2.0 / 3.0 * x)
class Problem10(Benchmark):
"""
Univariate Problem10 objective function.
This class defines the Univariate Problem10 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Problem10}}(x) = -x\\sin(x)
Bound constraints: :math:`x \\in [0, 10]`
.. figure:: figures/Problem10.png
:alt: Univariate Problem10 function
:align: center
**Univariate Problem10 function**
*Global optimum*: :math:`f(x)=-7.916727` for :math:`x = 7.9787`
"""
def __init__(self, dimensions=1):
Benchmark.__init__(self, dimensions)
self._bounds = [(0, 10)]
self.global_optimum = 7.9787
self.fglob = -7.916727
def fun(self, x, *args):
self.nfev += 1
x = x[0]
return -x * sin(x)
class Problem11(Benchmark):
"""
Univariate Problem11 objective function.
This class defines the Univariate Problem11 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Problem11}}(x) = 2\\cos(x) + \\cos(2x)
Bound constraints: :math:`x \\in [-\\pi/2, 2\\pi]`
.. figure:: figures/Problem11.png
:alt: Univariate Problem11 function
:align: center
**Univariate Problem11 function**
*Global optimum*: :math:`f(x)=-1.5` for :math:`x = 2.09439`
"""
def __init__(self, dimensions=1):
Benchmark.__init__(self, dimensions)
self._bounds = [(-pi / 2, 2 * pi)]
self.global_optimum = 2.09439
self.fglob = -1.5
def fun(self, x, *args):
self.nfev += 1
x = x[0]
return 2 * cos(x) + cos(2 * x)
class Problem12(Benchmark):
"""
Univariate Problem12 objective function.
This class defines the Univariate Problem12 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Problem12}}(x) = \\sin^3(x) + \\cos^3(x)
Bound constraints: :math:`x \\in [0, 2\\pi]`
.. figure:: figures/Problem12.png
:alt: Univariate Problem12 function
:align: center
**Univariate Problem12 function**
*Global optimum*: :math:`f(x)=-1` for :math:`x = \\pi`
"""
def __init__(self, dimensions=1):
Benchmark.__init__(self, dimensions)
self._bounds = [(0, 2 * pi)]
self.global_optimum = pi
self.fglob = -1
def fun(self, x, *args):
self.nfev += 1
x = x[0]
return (sin(x)) ** 3.0 + (cos(x)) ** 3.0
class Problem13(Benchmark):
"""
Univariate Problem13 objective function.
This class defines the Univariate Problem13 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Problem13}}(x) = -x^{2/3} - (1 - x^2)^{1/3}
Bound constraints: :math:`x \\in [0.001, 0.99]`
.. figure:: figures/Problem13.png
:alt: Univariate Problem13 function
:align: center
**Univariate Problem13 function**
*Global optimum*: :math:`f(x)=-1.5874` for :math:`x = 1/\\sqrt(2)`
"""
def __init__(self, dimensions=1):
Benchmark.__init__(self, dimensions)
self._bounds = [(0.001, 0.99)]
self.global_optimum = 1.0 / sqrt(2)
self.fglob = -1.5874
def fun(self, x, *args):
self.nfev += 1
x = x[0]
return -x ** (2.0 / 3.0) - (1.0 - x ** 2) ** (1.0 / 3.0)
class Problem14(Benchmark):
"""
Univariate Problem14 objective function.
This class defines the Univariate Problem14 global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Problem14}}(x) = -e^{-x} \\sin(2\\pi x)
Bound constraints: :math:`x \\in [0, 4]`
.. figure:: figures/Problem14.png
:alt: Univariate Problem14 function
:align: center
**Univariate Problem14 function**
*Global optimum*: :math:`f(x)=-0.788685` for :math:`x = 0.224885`
"""
def __init__(self, dimensions=1):
Benchmark.__init__(self, dimensions)
self._bounds = [(0.0, 4.0)]
self.global_optimum = 0.224885
self.fglob = -0.788685
def fun(self, x, *args):
self.nfev += 1
x = x[0]
return -exp(-x) * sin(2.0 * pi * x)
class Problem15(Benchmark):
"""
Univariate Problem15 objective function.
This class defines the Univariate Problem15 global optimization problem.
This is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Problem15}}(x) = \\frac{x^{2} - 5 x + 6}{x^{2} + 1}
Bound constraints: :math:`x \\in [-5, 5]`
.. figure:: figures/Problem15.png
:alt: Univariate Problem15 function
:align: center
**Univariate Problem15 function**
*Global optimum*: :math:`f(x)=-0.03553` for :math:`x = 2.41422`
"""
def __init__(self, dimensions=1):
Benchmark.__init__(self, dimensions)
self._bounds = [(-5.0, 5.0)]
self.global_optimum = 2.41422
self.fglob = -0.03553
def fun(self, x, *args):
self.nfev += 1
x = x[0]
return -(-x ** 2.0 + 5 * x - 6) / (x ** 2 + 1)
class Problem18(Benchmark):
"""
Univariate Problem18 objective function.
This class defines the Univariate Problem18 global optimization problem.
This is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Problem18}}(x) = \\begin{cases}(x-2)^2 & \\textrm{if} \\hspace{5pt} x \\leq 3 \\\\
2\\log(x-2)+1&\\textrm{otherwise}\\end{cases}
Bound constraints: :math:`x \\in [0, 6]`
.. figure:: figures/Problem18.png
:alt: Univariate Problem18 function
:align: center
**Univariate Problem18 function**
*Global optimum*: :math:`f(x)=0` for :math:`x = 2`
"""
def __init__(self, dimensions=1):
Benchmark.__init__(self, dimensions)
self._bounds = [(0.0, 6.0)]
self.global_optimum = 2
self.fglob = 0
def fun(self, x, *args):
self.nfev += 1
x = x[0]
if x <= 3:
return (x - 2.0) ** 2.0
return 2 * log(x - 2.0) + 1
class Problem20(Benchmark):
"""
Univariate Problem20 objective function.
This class defines the Univariate Problem20 global optimization problem.
This is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Problem20}}(x) = -[x-\\sin(x)]e^{-x^2}
Bound constraints: :math:`x \\in [-10, 10]`
.. figure:: figures/Problem20.png
:alt: Univariate Problem20 function
:align: center
**Univariate Problem20 function**
*Global optimum*: :math:`f(x)=-0.0634905` for :math:`x = 1.195137`
"""
def __init__(self, dimensions=1):
Benchmark.__init__(self, dimensions)
self._bounds = [(-10, 10)]
self.global_optimum = 1.195137
self.fglob = -0.0634905
def fun(self, x, *args):
self.nfev += 1
x = x[0]
return -(x - sin(x)) * exp(-x ** 2.0)
class Problem21(Benchmark):
"""
Univariate Problem21 objective function.
This class defines the Univariate Problem21 global optimization problem.
This is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Problem21}}(x) = x \\sin(x) + x \\cos(2x)
Bound constraints: :math:`x \\in [0, 10]`
.. figure:: figures/Problem21.png
:alt: Univariate Problem21 function
:align: center
**Univariate Problem21 function**
*Global optimum*: :math:`f(x)=-9.50835` for :math:`x = 4.79507`
"""
def __init__(self, dimensions=1):
Benchmark.__init__(self, dimensions)
self._bounds = [(0, 10)]
self.global_optimum = 4.79507
self.fglob = -9.50835
def fun(self, x, *args):
self.nfev += 1
x = x[0]
return x * sin(x) + x * cos(2.0 * x)
class Problem22(Benchmark):
"""
Univariate Problem22 objective function.
This class defines the Univariate Problem22 global optimization problem.
This is a multimodal minimization problem defined as follows:
.. math::
f_{\\text{Problem22}}(x) = e^{-3x} - \\sin^3(x)
Bound constraints: :math:`x \\in [0, 20]`
.. figure:: figures/Problem22.png
:alt: Univariate Problem22 function
:align: center
**Univariate Problem22 function**
*Global optimum*: :math:`f(x)=e^{-27\\pi/2} - 1` for :math:`x = 9\\pi/2`
"""
def __init__(self, dimensions=1):
Benchmark.__init__(self, dimensions)
self._bounds = [(0, 20)]
self.global_optimum = 9.0 * pi / 2.0
self.fglob = exp(-27.0 * pi / 2.0) - 1.0
def fun(self, x, *args):
self.nfev += 1
x = x[0]
return exp(-3.0 * x) - (sin(x)) ** 3.0
| bsd-3-clause |
vjmac15/Lyilis | lib/youtube_dl/extractor/howstuffworks (VJ Washington's conflicted copy 2017-08-29).py | 34 | 4741 | from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
find_xpath_attr,
int_or_none,
js_to_json,
unescapeHTML,
determine_ext,
)
class HowStuffWorksIE(InfoExtractor):
_VALID_URL = r'https?://[\da-z-]+\.howstuffworks\.com/(?:[^/]+/)*(?:\d+-)?(?P<id>.+?)-video\.htm'
_TESTS = [
{
'url': 'http://adventure.howstuffworks.com/5266-cool-jobs-iditarod-musher-video.htm',
'info_dict': {
'id': '450221',
'ext': 'flv',
'title': 'Cool Jobs - Iditarod Musher',
'description': 'Cold sleds, freezing temps and warm dog breath... an Iditarod musher\'s dream. Kasey-Dee Gardner jumps on a sled to find out what the big deal is.',
'display_id': 'cool-jobs-iditarod-musher',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 161,
},
'skip': 'Video broken',
},
{
'url': 'http://adventure.howstuffworks.com/7199-survival-zone-food-and-water-in-the-savanna-video.htm',
'info_dict': {
'id': '453464',
'ext': 'mp4',
'title': 'Survival Zone: Food and Water In the Savanna',
'description': 'Learn how to find both food and water while trekking in the African savannah. In this video from the Discovery Channel.',
'display_id': 'survival-zone-food-and-water-in-the-savanna',
'thumbnail': r're:^https?://.*\.jpg$',
},
},
{
'url': 'http://entertainment.howstuffworks.com/arts/2706-sword-swallowing-1-by-dan-meyer-video.htm',
'info_dict': {
'id': '440011',
'ext': 'mp4',
'title': 'Sword Swallowing #1 by Dan Meyer',
'description': 'Video footage (1 of 3) used by permission of the owner Dan Meyer through Sword Swallowers Association International <www.swordswallow.org>',
'display_id': 'sword-swallowing-1-by-dan-meyer',
'thumbnail': r're:^https?://.*\.jpg$',
},
},
{
'url': 'http://shows.howstuffworks.com/stuff-to-blow-your-mind/optical-illusions-video.htm',
'only_matching': True,
}
]
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
clip_js = self._search_regex(
r'(?s)var clip = ({.*?});', webpage, 'clip info')
clip_info = self._parse_json(
clip_js, display_id, transform_source=js_to_json)
video_id = clip_info['content_id']
formats = []
m3u8_url = clip_info.get('m3u8')
if m3u8_url and determine_ext(m3u8_url) == 'm3u8':
formats.extend(self._extract_m3u8_formats(m3u8_url, video_id, 'mp4', format_id='hls', fatal=True))
flv_url = clip_info.get('flv_url')
if flv_url:
formats.append({
'url': flv_url,
'format_id': 'flv',
})
for video in clip_info.get('mp4', []):
formats.append({
'url': video['src'],
'format_id': 'mp4-%s' % video['bitrate'],
'vbr': int_or_none(video['bitrate'].rstrip('k')),
})
if not formats:
smil = self._download_xml(
'http://services.media.howstuffworks.com/videos/%s/smil-service.smil' % video_id,
video_id, 'Downloading video SMIL')
http_base = find_xpath_attr(
smil,
'./{0}head/{0}meta'.format('{http://www.w3.org/2001/SMIL20/Language}'),
'name',
'httpBase').get('content')
URL_SUFFIX = '?v=2.11.3&fp=LNX 11,2,202,356&r=A&g=A'
for video in smil.findall(
'./{0}body/{0}switch/{0}video'.format('{http://www.w3.org/2001/SMIL20/Language}')):
vbr = int_or_none(video.attrib['system-bitrate'], scale=1000)
formats.append({
'url': '%s/%s%s' % (http_base, video.attrib['src'], URL_SUFFIX),
'format_id': '%dk' % vbr,
'vbr': vbr,
})
self._sort_formats(formats)
return {
'id': '%s' % video_id,
'display_id': display_id,
'title': unescapeHTML(clip_info['clip_title']),
'description': unescapeHTML(clip_info.get('caption')),
'thumbnail': clip_info.get('video_still_url'),
'duration': int_or_none(clip_info.get('duration')),
'formats': formats,
}
| gpl-3.0 |
krast/suse_xen | tools/python/xen/xend/XendAPIStore.py | 44 | 2431 | #============================================================================
# This library is free software; you can redistribute it and/or
# modify it under the terms of version 2.1 of the GNU Lesser General Public
# License as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#============================================================================
# Copyright (C) 2007 Tom Wilkie <tom.wilkie@gmail.com>
#============================================================================
"""
This is a place to put instances of XenAPI objects,
instead of just holding them in arbitrary places.
All objects which subclass XendBase should use this
mechanism.
You must register both the uuid and type, and get objects
by type, to ensure safety
"""
import threading
__classes = {}
__classes_lock = threading.RLock()
def register(uuid, type, inst):
__classes_lock.acquire()
try:
__classes[(uuid, type)] = inst
return inst
finally:
__classes_lock.release()
def deregister(uuid, type):
__classes_lock.acquire()
try:
old = get(uuid, type)
if old is not None:
del __classes[(uuid, type)]
return old
finally:
__classes_lock.release()
def get(uuid, type):
"""
Get the instances by uuid and type
"""
__classes_lock.acquire()
try:
return __classes.get((uuid, type), None)
finally:
__classes_lock.release()
def get_all(all_type):
"""
Get all instances by type
"""
__classes_lock.acquire()
try:
return [inst
for ((uuid, t), inst) in __classes.items()
if t == all_type]
finally:
__classes_lock.release()
def get_all_uuid(all_type):
"""
Get all uuids by type
"""
__classes_lock.acquire()
try:
return [uuid
for (uuid, t) in __classes.keys()
if t == all_type]
finally:
__classes_lock.release()
| gpl-2.0 |
krafczyk/spack | var/spack/repos/builtin/packages/py-fastaindex/package.py | 5 | 1697 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyFastaindex(PythonPackage):
"""FastA index (.fai) handler compatible with samtools faidx is extended
with 4 columns storing counts for A, C, G & T for each sequence.."""
homepage = "https://github.com/lpryszcz/FastaIndex"
url = "https://pypi.io/packages/source/F/FastaIndex/FastaIndex-0.11rc7.tar.gz"
version('0.11rc7', '882c973d968d9db596edfd0fbb07e3a8')
depends_on('py-setuptools', type='build')
| lgpl-2.1 |
marcoantoniooliveira/labweb | sites/us/apps/partner/south_migrations/0011_auto__chg_field_partner_code.py | 13 | 20515 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
from oscar.core.compat import AUTH_USER_MODEL, AUTH_USER_MODEL_NAME
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Partner.code'
db.alter_column(u'partner_partner', 'code', self.gf('oscar.models.fields.autoslugfield.AutoSlugField')(allow_duplicates=False, max_length=128, separator=u'-', unique=True, populate_from='name', overwrite=False))
def backwards(self, orm):
# Changing field 'Partner.code'
db.alter_column(u'partner_partner', 'code', self.gf('django.db.models.fields.SlugField')(max_length=128, unique=True))
models = {
u'address.country': {
'Meta': {'ordering': "('-display_order', 'name')", 'object_name': 'Country'},
'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0', 'db_index': 'True'}),
'is_shipping_country': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'iso_3166_1_a2': ('django.db.models.fields.CharField', [], {'max_length': '2', 'primary_key': 'True'}),
'iso_3166_1_a3': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'db_index': 'True'}),
'iso_3166_1_numeric': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'printable_name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
AUTH_USER_MODEL: {
'Meta': {'object_name': AUTH_USER_MODEL_NAME},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'catalogue.attributeentity': {
'Meta': {'object_name': 'AttributeEntity'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'blank': 'True'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'entities'", 'to': u"orm['catalogue.AttributeEntityType']"})
},
u'catalogue.attributeentitytype': {
'Meta': {'object_name': 'AttributeEntityType'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'blank': 'True'})
},
u'catalogue.attributeoption': {
'Meta': {'object_name': 'AttributeOption'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'options'", 'to': u"orm['catalogue.AttributeOptionGroup']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'option': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'catalogue.attributeoptiongroup': {
'Meta': {'object_name': 'AttributeOptionGroup'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
u'catalogue.category': {
'Meta': {'ordering': "['full_name']", 'object_name': 'Category'},
'depth': ('django.db.models.fields.PositiveIntegerField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'full_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'numchild': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'})
},
u'catalogue.option': {
'Meta': {'object_name': 'Option'},
'code': ('oscar.models.fields.autoslugfield.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '128', 'separator': "u'-'", 'blank': 'True', 'unique': 'True', 'populate_from': "'name'", 'overwrite': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'Required'", 'max_length': '128'})
},
u'catalogue.product': {
'Meta': {'ordering': "['-date_created']", 'object_name': 'Product'},
'attributes': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['catalogue.ProductAttribute']", 'through': u"orm['catalogue.ProductAttributeValue']", 'symmetrical': 'False'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['catalogue.Category']", 'through': u"orm['catalogue.ProductCategory']", 'symmetrical': 'False'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_discountable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'variants'", 'null': 'True', 'to': u"orm['catalogue.Product']"}),
'product_class': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'products'", 'null': 'True', 'to': u"orm['catalogue.ProductClass']"}),
'product_options': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['catalogue.Option']", 'symmetrical': 'False', 'blank': 'True'}),
'rating': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'recommended_products': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['catalogue.Product']", 'symmetrical': 'False', 'through': u"orm['catalogue.ProductRecommendation']", 'blank': 'True'}),
'related_products': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'relations'", 'blank': 'True', 'to': u"orm['catalogue.Product']"}),
'score': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}),
'status': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'upc': ('oscar.models.fields.NullCharField', [], {'max_length': '64', 'unique': 'True', 'null': 'True', 'blank': 'True'})
},
u'catalogue.productattribute': {
'Meta': {'ordering': "['code']", 'object_name': 'ProductAttribute'},
'code': ('django.db.models.fields.SlugField', [], {'max_length': '128'}),
'entity_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalogue.AttributeEntityType']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'option_group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalogue.AttributeOptionGroup']", 'null': 'True', 'blank': 'True'}),
'product_class': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'attributes'", 'null': 'True', 'to': u"orm['catalogue.ProductClass']"}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'text'", 'max_length': '20'})
},
u'catalogue.productattributevalue': {
'Meta': {'object_name': 'ProductAttributeValue'},
'attribute': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalogue.ProductAttribute']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'attribute_values'", 'to': u"orm['catalogue.Product']"}),
'value_boolean': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'value_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'value_entity': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalogue.AttributeEntity']", 'null': 'True', 'blank': 'True'}),
'value_file': ('django.db.models.fields.files.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'value_float': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'value_image': ('django.db.models.fields.files.ImageField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'value_integer': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'value_option': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalogue.AttributeOption']", 'null': 'True', 'blank': 'True'}),
'value_richtext': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'value_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'catalogue.productcategory': {
'Meta': {'ordering': "['product', 'category']", 'object_name': 'ProductCategory'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalogue.Category']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalogue.Product']"})
},
u'catalogue.productclass': {
'Meta': {'ordering': "['name']", 'object_name': 'ProductClass'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'options': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['catalogue.Option']", 'symmetrical': 'False', 'blank': 'True'}),
'requires_shipping': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'slug': ('oscar.models.fields.autoslugfield.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '128', 'separator': "u'-'", 'blank': 'True', 'unique': 'True', 'populate_from': "'name'", 'overwrite': 'False'}),
'track_stock': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
u'catalogue.productrecommendation': {
'Meta': {'object_name': 'ProductRecommendation'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'primary': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'primary_recommendations'", 'to': u"orm['catalogue.Product']"}),
'ranking': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'recommendation': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catalogue.Product']"})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'partner.partner': {
'Meta': {'object_name': 'Partner'},
'code': ('oscar.models.fields.autoslugfield.AutoSlugField', [], {'allow_duplicates': 'False', 'max_length': '128', 'separator': "u'-'", 'blank': 'True', 'unique': 'True', 'populate_from': "'name'", 'overwrite': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'partners'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['{0}']".format(AUTH_USER_MODEL)})
},
u'partner.partneraddress': {
'Meta': {'object_name': 'PartnerAddress'},
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['address.Country']"}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'line1': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'line2': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'line3': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'line4': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'partner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'addresses'", 'to': u"orm['partner.Partner']"}),
'postcode': ('oscar.models.fields.UppercaseCharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'search_text': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'})
},
u'partner.stockalert': {
'Meta': {'ordering': "('-date_created',)", 'object_name': 'StockAlert'},
'date_closed': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'Open'", 'max_length': '128'}),
'stockrecord': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'alerts'", 'to': u"orm['partner.StockRecord']"}),
'threshold': ('django.db.models.fields.PositiveIntegerField', [], {})
},
u'partner.stockrecord': {
'Meta': {'unique_together': "(('partner', 'partner_sku'),)", 'object_name': 'StockRecord'},
'cost_price': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'low_stock_threshold': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'num_allocated': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'num_in_stock': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'partner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'stockrecords'", 'to': u"orm['partner.Partner']"}),
'partner_sku': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'price_currency': ('django.db.models.fields.CharField', [], {'default': "'GBP'", 'max_length': '12'}),
'price_excl_tax': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'price_retail': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'stockrecords'", 'to': u"orm['catalogue.Product']"})
}
}
complete_apps = ['partner'] | bsd-3-clause |
o11s/open80211s | tools/perf/scripts/python/futex-contention.py | 11261 | 1486 | # futex contention
# (c) 2010, Arnaldo Carvalho de Melo <acme@redhat.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Translation of:
#
# http://sourceware.org/systemtap/wiki/WSFutexContention
#
# to perf python scripting.
#
# Measures futex contention
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Util import *
process_names = {}
thread_thislock = {}
thread_blocktime = {}
lock_waits = {} # long-lived stats on (tid,lock) blockage elapsed time
process_names = {} # long-lived pid-to-execname mapping
def syscalls__sys_enter_futex(event, ctxt, cpu, s, ns, tid, comm,
nr, uaddr, op, val, utime, uaddr2, val3):
cmd = op & FUTEX_CMD_MASK
if cmd != FUTEX_WAIT:
return # we don't care about originators of WAKE events
process_names[tid] = comm
thread_thislock[tid] = uaddr
thread_blocktime[tid] = nsecs(s, ns)
def syscalls__sys_exit_futex(event, ctxt, cpu, s, ns, tid, comm,
nr, ret):
if thread_blocktime.has_key(tid):
elapsed = nsecs(s, ns) - thread_blocktime[tid]
add_stats(lock_waits, (tid, thread_thislock[tid]), elapsed)
del thread_blocktime[tid]
del thread_thislock[tid]
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
for (tid, lock) in lock_waits:
min, max, avg, count = lock_waits[tid, lock]
print "%s[%d] lock %x contended %d times, %d avg ns" % \
(process_names[tid], tid, lock, count, avg)
| gpl-2.0 |
flavour/eden | languages/ps.py | 5 | 231843 | # -*- coding: utf-8 -*-
{
"A location that specifies the geographic area for this region. This can be a location from the location hierarchy, or a 'group location', or a location that has a boundary for the area.": 'هغه ځای چی د جغرافیایی د لحاظ ځای مشخصوی. دا کیدای شی یو ځای د سلسله ځایونو نه وی، او یا ،ګروپ د ځایونو، او یا ځای چی سرحد د هغه ځای لپاره لری',
"A volunteer is defined as active if they've participated in an average of 8 or more hours of Program work or Trainings per month in the last year": 'یو داوطلب عبارت د یو فعال کس څخه دی که دوی په اوسطه توګه سره په هر پروګرام کی تر ۸ ساعتونو او یا په هر میاشت کی تیر کال یی ګډون کړی وی',
"Acronym of the organization's name, eg. IFRC.": 'د موسسی لومړی نوم لکه (IFRC ).',
"Add New Person's Details": 'اضافه کړی د نوی کس معلومات',
"Add Person's Details": 'اضافه کړی د کسانو معلومات',
"Address of an image to use for this Layer in the Legend. This allows use of a controlled static image rather than querying the server automatically for what it provides (which won't work through GeoWebCache anyway).": 'آدرس د دی تصویر استعمالیږی آفسانوی پاڼی کی. استعمال د بی حرکته کنترول شوی تصویر اجازه ورکوی نسبت د خپل سر (automatic ) تحقیق د سرور څخه د څه شی لپاره چی تهیه شوی دی( چی د GeoWebCache له طریقه کار نه ورکوی)',
"Children's Education": 'د ماشومانو تعلیم',
"Couldn't open %s!": '%s خلاصیدلی نشی',
"Create 'More Info'": 'جوړ کړی نور معلومات',
"Describe the procedure which this record relates to (e.g. 'medical examination')": 'تشریح کړی رویه چی دا تاریخچه ورسره اړیکه لری ( لکه ؛ طبی معاینات)',
"Edit 'More Info'": 'تنظیم نورو معلومات',
"Edit Person's Details": 'د کس د معلوماتو تنظیم',
"Enter a name to search for. You may use % as wildcard. Press 'Search' without input to list all items.": 'یو نوم دننه کړی چی پلټنه د هغه لپاره وشی. تاسی کیدای شی چی استعمال کړی ٪ په حیث د وحشی کارټ. پلټنه ته زور ورکړی بغیر د دننی چی ټول عنوانونه لست شی',
"Go to %(url)s, sign up & then register your application. You can put any URL in & you only need to select the 'modify the map' permission.": 'ولاړ شی ٪( url)s, اول دننه شی بیا ځان راجستر کړی. تاسو کولای شی هر یو URL دننه کړی او صرف تاسی د نقشی د مشخصول د انتخاب ته اړه لری',
"If this configuration is displayed on the GIS config menu, give it a name to use in the menu. The name for a personal map configuration will be set to the user's name.": 'او که دا تنظیمات ښکاره شی په د GIS د تنظیم په مینو کی، یو نوم ورکړی چی استعمال شی په مینو کی. نوم د شخصی نقشی تنظیمات کیدای شی ځای په ځای شی د استعمالوونکی په نوم کی',
"If this field is populated then a user who specifies this Organization when signing up will be assigned as a Staff of this Organization unless their domain doesn't match the domain field.": 'او که دا ځای پر نفوس وی نو بیا استعمالوونکی چی مشخصوی دا موسسه کله چی داخلیږی نو تعینیږی منحیث د موسسه کارمند او که چیری د دوی قلمرو صدق نه کړی د قلمرو ځای سره',
"If you don't see the Cluster in the list, you can add a new one by clicking link 'Create Cluster'.": 'که تاسی غونچه په لست کی نه وی لیدلی، تاسی کولای شی چی اضافه کړی یو نوی د نصب د کلیکولو په واسطه ؛ اضافه کړی یو نوی غونچه؛',
"If you don't see the Organization in the list, you can add a new one by clicking link 'Create Organization'.": 'که تاسی موسسه په لست کی نه وی لیدلی، تاسی کولای شی چی اضافه کړی یو نوی د نصب د کلیکولو په واسطه ؛ اضافه کړی یو موسسه؛',
"If you don't see the Sector in the list, you can add a new one by clicking link 'Create Sector'.": 'که تاسی برخه په لست کی نه وی لیدلی، تاسی کولای شی چی اضافه کړی یو نوی د نصب د کلیکولو په واسطه ؛ اضافه کړی یو نوی برخه؛',
"If you don't see the Type in the list, you can add a new one by clicking link 'Create Facility Type'.": 'که تاسی بڼه په لست کی نه وی لیدلی، تاسی کولای شی چی اضافه کړی یو نوی د نصب د کلیکولو په واسطه ؛ اضافه کړی د امکاناتو بڼه؛',
"If you don't see the Type in the list, you can add a new one by clicking link 'Create Office Type'.": 'که تاسی بڼه په لست کی نه وی لیدلی، تاسی کولای شی چی اضافه کړی یو نوی د نصب د کلیکولو په واسطه ؛ اضافه کړی یو نوی دفتر؛',
"If you don't see the Type in the list, you can add a new one by clicking link 'Create Organization Type'.": 'که تاسی بڼه په لست کی نه وی لیدلی، تاسی کولای شی چی اضافه کړی یو نوی د نصب د کلیکولو په واسطه ؛ اضافه کړی د موسسه بڼه؛',
"If you don't see the Type in the list, you can add a new one by clicking link 'Create Region'.": 'که تاسی بڼه په لست کی نه وی لیدلی، تاسی کولای شی چی اضافه کړی یو نوی د نصب د کلیکولو په واسطه ؛ اضافه کړی یوه ساحه؛',
"If you don't see the activity in the list, you can add a new one by clicking link 'Create Activity'.": 'که تاسی فعالیت په لست کی نه وی لیدلی، تاسی کولای شی چی اضافه کړی یو نوی د نصب د کلیکولو په واسطه ؛ اضافه کړی فعالیت؛',
"If you don't see the beneficiary in the list, you can add a new one by clicking link 'Create Beneficiary'.": 'که تاسی فایده په لست کی نه وی لیدلی، تاسی کولای شی چی اضافه کړی یو نوی د نصب د کلیکولو په واسطه ؛ اضافه کړی فایده؛',
"If you don't see the campaign in the list, you can add a new one by clicking link 'Create Campaign'.": 'که تاسی کمپین په لست کی نه وی لیدلی، تاسی کولای شی چی اضافه کړی یو نوی د نصب د کلیکولو په واسطه ؛ اضافه کړی کمپین؛',
"If you don't see the community in the list, you can add a new one by clicking link 'Create Community'.": 'که تاسی جامعه په لست کی نه وی لیدلی، تاسی کولای شی چی اضافه کړی یو نوی د نصب د کلیکولو په واسطه ؛ اضافه کړی جامعه؛',
"If you don't see the location in the list, you can add a new one by clicking link 'Create Location'.": 'که تاسی ځای په لست کی نه وی لیدلی، تاسی کولای شی چی اضافه کړی یو نوی د نصب د کلیکولو په واسطه ؛ اضافه کړی ځای؛',
"If you don't see the milestone in the list, you can add a new one by clicking link 'Create Milestone'.": 'که تاسی مهمه دوره په لست کی نه وی لیدلی، تاسی کولای شی چی اضافه کړی یو نوی د نصب د کلیکولو په واسطه ؛ اضافه کړی مهمه دوره؛',
"If you don't see the project in the list, you can add a new one by clicking link 'Create Project'.": 'که تاسی پروژه په لست کی نه وی لیدلی، تاسی کولای شی چی اضافه کړی یو نوی د نصب د کلیکولو په واسطه ؛ اضافه کړی پروژه؛',
"If you don't see the type in the list, you can add a new one by clicking link 'Create Activity Type'.": 'که تاسی بڼه په لست کی نه وی لیدلی، تاسی کولای شی چی اضافه کړی یو نوی د نصب د کلیکولو په واسطه ؛ اضافه کړی یو نوی دفتر؛',
"If you enter a foldername then the layer will appear in this folder in the Map's layer switcher. A sub-folder can be created by separating names with a '/'": "که تاسی د فولدر نوم داخل کړی نو بیا پاڼه ښکاریږی په فولدر کی د نقشی د پاڼی سویچونکی. یوه فرعی فولدر کیدای شی جوړه شی د جدا کول د نوم د a '/'",
"Last Month's Work": 'ورستی د میاشتی دنده',
"Last Week's Work": 'د ورستی هفتی دنده',
"Level is higher than parent's": 'سطح لوړه ده د منشا څخه',
"List Persons' Details": 'د کسانو معلومات لست کړی',
"Need a 'url' argument!": 'اړه د ؛ url ؛ بحث',
"No UTC offset found. Please set UTC offset in your 'User Profile' details. Example: UTC+0530": 'هیڅ UTC آفست پیدا شوی نه ده. هیله ده UTC آفست ځای پر ځای کړی په خپل ؛ د استعمالوونکی منظره کی؛ معلومات. لکه: UTC+0530',
"Optional. The name of the geometry column. In PostGIS this defaults to 'the_geom'.": 'اختیاری. نوم د هندسی د ستون. د GIS پوست دا قراردادی ته په؛the" geom"',
"Parent level should be higher than this record's level. Parent level is": 'دمنشا تاریخچه باید د دی تاریخچه څخه ډیره وی. د والدینو سطح',
"Person's Details added": 'د کس معلومات اضافه شوی',
"Person's Details deleted": 'د کس معلومات له منځه تللی',
"Person's Details updated": 'د کس معلومات ننی شوی',
"Person's Details": 'د کس معلومات',
"Persons' Details": 'د کسانو معلومات',
"Please provide as much detail as you can, including the URL(s) where the bug occurs or you'd like the new feature to go.": 'هیله ده مهیا کړی په هغه اندازه باندی معلومات چی کولای شی، د URL (S) هم په ځان کی ولری چیرته چی مایکروفون واقع کیږی یا تاسی کیدای شی یوه نوی شکل ته ولاړ شی',
"Policy or Strategy added, awaiting administrator's approval": 'سیاست یا ستراتیژی اضافه شوی، د اداری تایید ته انتظار لری',
"Search Person's Details": 'د کس د معلوماتو پلټنه',
"Select 2 records from this list, then click 'Merge'.": 'د دی لست څخه دوه تاریخچی انتخاب کړی، بیا یوزای کلیک کړی',
"Select a Room from the list or click 'Add Room'": 'انتخاب کړه کوټه د لست نه او کلیک ؛کوټه اضافه کړی؛',
"Select this if all specific locations need a parent at the deepest level of the location hierarchy. For example, if 'district' is the smallest division in the hierarchy, then all specific locations would be required to have a district as a parent.": 'دا انتخاب کړی که ټول مشخصه ځایونه ضرورت ولری د منشا په ژور سطح د سلسله د مراتبو ځای. لکه ، ولسوالی؛ وړکیترین برخه د سلسله مراتبو، نو بیا ټول مشخصه ځایونه ضرورت لری چی ولری یوه ولسوالی',
"Select this if all specific locations need a parent location in the location hierarchy. This can assist in setting up a 'region' representing an affected area.": 'دا انتخاب کړی که ټول مشخصه ځایونه ضرورت ولری د منشا ځای ته په سلسله د مراتبو ځای کی. دا همکاری کولای شی په تنظیم د ؛ساحه؛ چی یوه متاثره ساحه وړاندی کوی',
"Status 'assigned' requires the %(fieldname)s to not be blank": 'حالت ؛تعین شوی؛ ضرورت دی %(fieldname)s چی خالی نه ده',
"The Project module can be used to record Project Information and generate Who's Doing What Where reports.": 'د پروژی اندازه استعمالیږی چی زیرمه کړی د پروژی معلومات او تولیدوی چی څوک یی کوی څه شی چیرته راپور ورکوی',
"The URL of the image file. If you don't upload an image file, then you must specify its location here.": 'د url د سند تصویر. که تاسی رانه وړی د تصویر سند، نو بیا تاسی باید مشخصه کړی دهغه ځای.',
"The provided 'formuuid' is invalid. You have selected a Form revision which does not exist on this server.": 'مهیا شوی ( فورمید) بی اعتباره شوی. تاسو انتحاب کړی یو د اصلاح فورم چی په دی سرور کی موجود نه دی',
"The provided 'jobuuid' is invalid. The session of Form upload is invalid. You should retry uploading.": 'مهیا شوی (جابوید) بی اعتباره شوی. جلسه د فورم چی اچول شوی دی بی اعتباره دی. تاسی باید بیا له سره راوړی',
"The staff member's official job title": 'د کارمندانو رسمی د دندی عنوان ',
"The volunteer's role": 'د رضاکار نقش',
"There are no details for this person yet. Add Person's Details.": 'هیڅ معلومات ددی کس لپاره تر اوسه نشته ده. د کس معلومات اضافه کړی ',
"To search for a location, enter the name. You may use % as wildcard. Press 'Search' without input to list all locations.": 'چی د ځای د پلټنی لپاره، داخل کړی نوم. چی کیدای شی تاسی استعمال کړی ٪ منحیث د وایلد کارد. ز',
"To search for a member, enter any portion of the name of the person or group. You may use % as wildcard. Press 'Search' without input to list all members.": 'چی پلټنه د عضو لپاره، داخل کړی هر یوه برخه د نوم یا د ګروپ. تاسی کیدای شی استعمال کړی ٪ منحیث د وایلد کارد. د پلټنی تڼی کیږدی بدون داخلی نه لست کړی ټول اعضاوی',
"To search for a person, enter any of the first, middle or last names and/or an ID number of a person, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.": 'چی کس ولټوی، نو داخل کړی هره یو لومړی، میانه یا وروستی نومونه او/ یا یو د تذکړی شمیره د یوه کس، د مصافی په واسطه جدا کړی. تاسی کیدای شی استعمال کړی ٪ منحیث د وایلد کارد. د پلټنی تڼی کیږدی بدون داخلی نه لست کړی ټول کسان',
"Type the first few characters of one of the Participant's names.": 'ولیکلی لومړنی څو نقشونه یو د ګډونوالو نومونه',
"Type the first few characters of one of the Person's names.": 'ولیکلی لومړنی څو نقشونه یو د کسانو نومونه',
"Upload an image file here. If you don't upload an image file, then you must specify its location in the URL field.": 'د تصویر سند دلته راوړی. که تاسی را نه وړه د تصویر سند، نو بیا تاسی مشخصه کړی د هغه ځای په URL ځای کی',
"Uploaded file(s) are not Image(s). Supported image formats are '.png', '.jpg', '.bmp', '.gif'.": 'رواړل شوی سندونه تصویر/تصویرونه نه دی. حمایه شوی تصویر شکل د png, jpg, bmp, gif دی',
"You can search by by group name, description or comments and by organization name or acronym. You may use % as wildcard. Press 'Search' without input to list all.": 'تاسی کولای شی چی د گروپ د نوم په واسطه ، توزیحات یا نظریه او د موسسه د نوم په واسطه یا سر نوم ولټوی. تاسی کیدای شی استعمال کړی ٪ منحیث د وایلد کارد. د لټون تڼی کیږدی بدون د داخلی چی ټول لست کړی',
"You can search by course name, venue name or event comments. You may use % as wildcard. Press 'Search' without input to list all events.": 'تاسی کولای شی د کورس نوم په واسطه ولټوی، د اغاز نوم یا د نظری. تاسی کیدای شی استعمال کړی ٪ منحیث د وایلد کارد. د لټون تڼی کیږدی بدون د داخلی چی ټول واقع لست کړی',
"You can search by job title or person name - enter any of the first, middle or last names, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.": 'تاسی کولای شی د دندی عنوان یا د کس نوم ولټوی - داخل کړی هر یو لومړی، مینځنی یا ورستی نوم ، د مصافه په واسطه یو بل څخه بیل کړی. تاسی کوالای شی چی استعمال کړی ٪ منحیث د وایلد کارد. د لټون تڼی کیږدی بدون د داخلی چی ټول کسانو لست ولټوی',
"You can search by person name - enter any of the first, middle or last names, separated by spaces. You may use % as wildcard. Press 'Search' without input to list all persons.": 'تاسی د کس د نوم په واسطه لټول کولای شی - ',
"You can search by trainee name, course name or comments. You may use % as wildcard. Press 'Search' without input to list all trainees.": 'تاسی کولایی شی چی د کار زده کوونکی نوم، د کورس نوم یا نظریه په واسطه ولټوی. تاسی کیدای شی استعمال کړی ٪ منحیث د وایلد کارد، د لټون تڼی کیږدی بدون د داخلی چی ټول د کار د زده کړی لست ولټوی',
"You have unsaved changes. Click Cancel now, then 'Save' to save them. Click OK now to discard them.": 'تاسی نه زیرمه شوی تغیرات لری. اوس د لغوی تڼی کیږدی، بیا د زیرمی چی زیرمه یی کړی. اوس د ok تڼی کیږدی د چی لری یی کړی',
'# of International Staff': '# جهانی کارمندان',
'# of National Staff': '# ملی کارمندان',
'# selected': '# انتخاب شوی',
'%(app)s not installed. Ask the Server Administrator to install on Server.': 'د %(app)s نصب شوی نه دی. د سرور د اداری څخه وپښتی چی نصب کړی په سرور کی',
'%(count)s Roles of the user removed': 'د %(count)s نقشونه د استعمالوونکی له منځه تللی',
'%(count)s Users removed from Role': 'د %(count)s استعمالوونکی نقش له منځه وړلی',
'%(label)s contains %(values)s': 'د %(label)s په ځان کی لری %(values)s',
'%(label)s contains any of %(values)s': 'د %(label)s په ځان کی هر یو د %(values)s',
'%(label)s does not contain %(values)s': 'د %(label)s په ځان کی نه لری %(values)s',
'%(label)s is %(values)s': 'د %(label)s دی %(values)s',
'%(label)s like %(values)s': 'د %(label)s لکه %(values)s',
'%(label)s not like %(values)s': 'د %(label)s نه دی لکه %(values)s',
'%(module)s not installed': 'د %(module)s نصب شوی نه دی',
'%(pe)s in %(location)s': 'د %(pe)s په %(location)s',
'%(proj4js)s definition': 'د %(proj4js)s تعریف',
'%(resource)s Filter': 'د %(resource)s فیلتر',
'%(site_label)s Status added': 'د %(site_label)s حالت اضافه شوی',
'%(site_label)s Status deleted': 'د %(site_label)s حالت له منځه تللی',
'%(site_label)s Status updated': 'د %(site_label)s حالت ننی شوی',
'%(site_label)s Status': 'د %(site_label)s حالت',
'%(system_name)s - New User Registered': 'د %(system_name)s نوی د استعمالوونکی راجستر',
'%(system_name)s - New User Registration Approval Pending': 'د %(system_name)s نوی د استعمالوونکی راجستر تایید په وخت کی',
'%(system_name)s has sent an email to %(email)s to verify your email address.\\nPlease check your email to verify this address. If you do not receive this email please check you junk email or spam filters.': 'د%(system_name)s یوه برقی لیک یی لیږلی ده ٪ (برقی لیک)s چی تصدیق کړی خپل د برقی لیک ادرس./ n هیله ده خپل برقی لیک وګوری چی تصدیق کړی ادرس. که تاسی دا برقی لیک تر لاسه نه کړ هیله ده خپل پخوانی برقی لیک یا د سپام فیلتر وګوری',
'& then click on the map below to adjust the Lat/Lon fields': 'او بیا په لاندنی نقشه باندی کلیک وکړی چی برابر شی Lat/Lon ځایونه',
'(filtered from _MAX_ total entries)': '(فیلتر شوی د _MAX_ مجموعی داخل شوی)',
'* Required Fields': '* ضروری ځایونه',
'...or add a new bin': '... یا اضافه کړی یو نوی صندوق',
'1 location, shorter time, can contain multiple Tasks': '۱ ځای، لګ وخت، شامل د مختلفو کارونو',
'1. Fill the necessary fields in BLOCK CAPITAL letters.': '۱.د ضروری ځایونود لویو حرفونوپه بلوک کی ډکول ',
'2. Always use one box per letter and leave one box space to separate words.': '۲. همیشه یو باکس په هر یو حرف کی استعمال کړی او یو باکس مسافه د کلماتو د جدا کولو لپاره پریږدی',
'3. Fill in the circles completely.': '۳. دایری په بشپړه توګه ډک کړی',
'3W Report': '۳.w راپور',
'3W': '۳.w',
'A Marker assigned to an individual Location is set if there is a need to override the Marker assigned to the Feature Class.': 'ښکاره کوونکی (marker) د فردی تنظیموونکی ځایونو لپاره شکل ورکړل شوی دی او که اړه ښکاره کوونکی ته نه وی نو بیا یی صنف ته شکل ورکوو',
'A brief description of the group (optional)': 'یوه مختصره معلومات د ګروپ (اختیاری)',
'A file in GPX format taken from a GPS.': 'د GPX فایلونه د GPS فایلونو څخه اخیستل کیږی',
'A project milestone marks a significant date in the calendar which shows that progress towards the overall objective is being made.': 'د پروژی مهمه مرحله یوه مهمه علامه په جنتری کی شمیرل کیږی چی د هدف د پرمختګ د کیدو ښکارونکی دی',
'A strict location hierarchy cannot have gaps.': 'د مشخصی سلسله مراتبو په ځای کی خالیګاه نه وی ',
'A task is a piece of work that an individual or team can do in 1-2 days.': 'یوه وظیفه عبارت د یوه کار څخه دی چی یو کس او یو ګروپ د کسانو په یوه او یا هم به دوه ورزو کی تر سره کوی',
'ACTION REQUIRED': 'ضروری اقدامات',
'ANY': 'هر یو',
'Abbreviation': 'مخففافات',
'About Us': 'زمونږ په باره کی',
'About': 'په باره کی/ تقریبا',
'Academic': 'علمی',
'Access denied': 'د لاسرسی امکانات موجود نه دی',
'Account Registered - Please Check Your Email': 'د راجستر حساب- هیله ده خپل ایمیل وګوری',
'Acronym': 'لومړی نوم',
'Activate': 'فعالول',
'Active Missions': 'فعال هدف',
'Active': 'فعال ',
'Active?': 'فعال؟',
'Activities matching Assessments': 'د تطبیق د فعالیتونو ارزیابی',
'Activities': 'فعالیتونه',
'Activity Added': 'فعالیت اضافه شو',
'Activity Deleted': ' فعالیت له منځه وړل شو',
'Activity Details': 'د فعالیتونو معلومات',
'Activity Organization Added': 'د اضافه شوی فعالیتونو موسسه',
'Activity Organization Deleted': 'له منځه وړل شوی فعالیتونو موسسه',
'Activity Organization Updated': 'د ننی فعالینونو موسسه',
'Activity Organization': 'د فعالیتونو موسسه',
'Activity Organizations': 'د فعالیت موسسی',
'Activity Report': 'د فعالیت راپور',
'Activity Type Added': 'د اضافه شوی فعالیت بڼه ',
'Activity Type Deleted': 'له منځه وړل شوی د فعالیت بڼه ',
'Activity Type Updated': 'د ننی فعالینونو بڼه',
'Activity Type added to Activity': 'د فعالیت بڼه اضافه شوه فعالیت ته',
'Activity Type added to Project Location': 'د فعالیت بنه اضافه شوه د پروژی ځای ته',
'Activity Type removed from Activity': 'د فعالیت بنه له منځه ولاړه د فعالیت نه',
'Activity Type removed from Project Location': 'د فعالیت بڼه له منځه ولاړه د پروژی د ځای',
'Activity Type': 'د فعالیت بڼه',
'Activity Types': 'د فعالیت بڼی',
'Activity Updated': 'ننی (updated ) فعالیت',
'Activity': 'فعالیت ',
'Add %(site_label)s Status': 'اضافه%(site_label)s حالت',
'Add Activity Type to Activity': 'اضافه کړی د فعالیت بڼه فعالیت ته',
'Add Activity Type to Project Location': 'اضافه کړی د فعالیت بڼه د پروژی ځای ته',
'Add Activity Type': 'نوی د فعالیت بڼه',
'Add Address': 'اضافه کړی ادرس',
'Add Affiliation': 'اضافه کړی ضمیمه',
'Add Annual Budget': 'نوی کلنی بودیجه',
'Add Appraisal': 'اضافه کړی ارزیابی',
'Add Award': 'اضافه کړی انعام',
'Add Beneficiaries': 'اضافه کړی ګټه',
'Add Branch Organization': 'اضافه کړی د موسسی څانګه',
'Add Certificate for Course': 'اضافه کړی د کورس شهادت نامه',
'Add Certification': 'اضافه کړی شهادت ',
'Add Contact Information': 'اضافه کړی د اړیکی معلمومات',
'Add Contact': 'اضافه کړی اړیکه',
'Add Credential': 'اضافه کړی د اعتبار لیک',
'Add Data to Theme Layer': 'اضافه کړی معلومات د موضوع پاڼی ته',
'Add Deployment': 'اضافه کړی پراخوالی',
'Add Education Detail': 'اضافه کړی علمی معلومات',
'Add Group Member': 'اضافه کړی د ګروپ کسان',
'Add Hazard to Project': 'اضافه کړی خطر پروژی ته',
'Add Hazard': 'اضافه کړی خطر پروژی ته',
'Add Hours': 'اضافه کړی ساعتونه',
'Add Identity': 'اضافه کړی پیژنګلنه',
'Add Image': 'اضافه کړی تصویر',
'Add Keyword': 'اضافه کړی مهمی کلمی',
'Add Layer from Catalog': 'اضافه کړی پاڼی د کتالوګ ( Catalog )',
'Add Layer to this Profile': 'اضافه کړی پاڼی دی نمایه ته',
'Add Line': 'اضافه کړی خط (line)',
'Add Location to Organization': 'اضافه کړی ځای موسسی ته',
'Add Location': 'اضافه کړی ځای',
'Add Log Entry': 'اضافه کړی د داخلیدو د ثبت',
'Add Member': 'اضافه کړی کسان',
'Add Membership': 'اضافه کړی عضویت',
'Add New Address': 'اضافه کړی نوی ادرس',
'Add New Affiliation': 'اضافه کړی نوی ضمیمه',
'Add New Appraisal': 'اضافه کړی نوی ارزیابی',
'Add New Award': 'اضافه کړی نوی تحفه',
'Add New Beneficiaries': 'اضافه کړی نوی ګټه',
'Add New Beneficiary Type': 'اضافه کړی نوی د ګټی بڼه',
'Add New Branch Organization': 'اضافه کړی نوی د موسسه څانګه',
'Add New Branch': 'اضافه کړی نوی څانګه',
'Add New Campaign': 'اضافه کړی نوی کمپاین',
'Add New Certificate': 'اضافه کړی نوی شهادت نامه',
'Add New Certification': 'اضافه کړی نوی شهادت ',
'Add New Cluster': 'اضافه کړی نوی خوشه/ ګروپ',
'Add New Coalition': 'اضافه کړی نوی اتحاد',
'Add New Community': 'اضافه کړی نوی جامعه',
'Add New Competency Rating': 'اضافه کړی نوی د صلاحیت اندازه',
'Add New Contact': 'اضافه کړی نوی اړیکه',
'Add New Course Certificate': 'اضافه کړی نوی کورس شهادت نامه',
'Add New Course': 'اضافه کړی نوی کورس',
'Add New Credential': 'اضافه کړی نوی اعتبار لیک',
'Add New Data to Theme Layer': 'اضافه کړی نوی معلومات د موضوع پاڼی ته',
'Add New Department': 'اضافه کړی نوی شعبه',
'Add New Deployment': 'اضافه کړی نوی پراخوالی',
'Add New Donor': 'اضافه کړی نوی ورکوونکی ته',
'Add New Entry': 'اضافه کړی نوی دفتر د زیرمه',
'Add New Facility Type': 'اضافه کړی نوی د اسانتیاوی بڼه',
'Add New Facility': 'اضافه کړی نوی اسانتیاوی',
'Add New Feature Layer': 'اضافه کړی نوی د شکل پاڼه',
'Add New Group': 'اضافه کړی نوی ګروپ',
'Add New Hazard': 'اضافه کړی نوی خطر',
'Add New Hours': 'اضافه کړی نوی ساعتونه',
'Add New Identity': 'اضافه کړی نوی پیژنګلنه',
'Add New Image': 'اضافه کړی نوی تصویر',
'Add New Job Title': 'اضافه کړی نوی د وظیفی عنوان',
'Add New Keyword': 'اضافه کړی نوی مهمی کلمی',
'Add New Layer to Symbology': 'اضافه کړی نوی پاڼی ته نماد',
'Add New Layer': 'اضافه کړی نوی پاڼی',
'Add New Location Hierarchy': 'اضافه کړی نوی سلسله مراتبو ځای',
'Add New Location': 'اضافه کړی نوی ځای',
'Add New Log Entry': 'اضافه کړی نوی د داخلیدو د ثبت',
'Add New Mailing List': 'اضافه کړی نوی د پست لست',
'Add New Map Profile': 'اضافه کړی نوی د نقشه تنظیمات',
'Add New Marker': 'اضافه کړی نوی ښکارونکی ( marker) ',
'Add New Member': 'اضافه کړی نوی عضو',
'Add New Membership Type': 'اضافه کړی نوی د عضویت بڼه',
'Add New Membership': 'اضافه کړی نوی عضویت',
'Add New Milestone': 'اضافه کړی نوی مهمه مرحله',
'Add New Network': 'اضافه کړی نوی شبکه',
'Add New Office Type': 'اضافه کړی نوی د دفتر بڼه',
'Add New Office': 'اضافه کړی نوی دفتر ',
'Add New Organization Type': 'اضافه کړی نوی د موسسه بڼه',
'Add New Organization': 'اضافه کړی نوی موسسه',
'Add New Output': 'اضافه کړی نوی تولید',
'Add New Participant': 'اضافه کړی نوی ګډونوال',
'Add New PoI Type': 'اضافه کړی نوی د مول (mol) بڼه',
'Add New Point of Interest': 'اضافه کړی نوی زړه پوری نقطه',
'Add New Policy or Strategy': 'اضافه کړی نوی د ستراتژی سیاست',
'Add New Professional Experience': 'اضافه کړی نوی مسلکی تجربه',
'Add New Profile Configuration': 'اضافه کړی د نوی برخی تنظیمات',
'Add New Program': 'اضافه کړی نوی پروګرام',
'Add New Project': 'اضافه کړی نوی پروژه',
'Add New Projection': 'اضافه کړی نوی پلان',
'Add New Record': 'اضافه کړی نوی تاریخچه',
'Add New Region': 'اضافه کړی نوی ځای/ محل',
'Add New Resource': 'اضافه کړی نوی منبع',
'Add New Response Summary': 'اضافه کړی نوی د ځواب خلاصه',
'Add New Role': 'اضافه کړی نوی وظیفه/ نقش',
'Add New Room': 'اضافه کړی نوی کوټه',
'Add New Sector': 'اضافه کړی نوی سکتور',
'Add New Service': 'اضافه کړی نوی خدمات',
'Add New Skill Equivalence': 'اضافه کړی نوی مساوی مهارت',
'Add New Skill Type': 'اضافه کړی نوی د مهارت بڼه',
'Add New Skill': 'اضافه کړی نوی مهارتونه',
'Add New Staff Assignment': 'اضافه کړی نوی د کارمندانو وظیفه',
'Add New Staff Member': 'اضافه کړی نوی د کارمندانو عضو',
'Add New Status': 'اضافه کړی نوی حالت',
'Add New Symbology for Layer': 'اضافه کړی نوی نماد پاڼی ته',
'Add New Symbology': 'اضافه کړی نوی نماد',
'Add New Task': 'اضافه کړی نوی وظیفه ',
'Add New Team Member': 'اضافه کړی نوی ګروپ عضو',
'Add New Team': 'اضافه کړی نوی ګروپ',
'Add New Theme': 'اضافه کړی نوی موضوع',
'Add New Training Event': 'اضافه کړی نوی د زده کړی واقعه',
'Add New Training': 'اضافه کړی نوی زده کړی',
'Add New Volunteer Cluster Position': 'اضافه کړی نوی د رضاکار خوشه/ ګروپ ځای',
'Add New Volunteer Cluster Type': 'اضافه کړی نوی رضاکار خوشه/ګروپ بڼه',
'Add New Volunteer Cluster': 'اضافه کړی نوی د رضاکار خوشه/ ګروپ',
'Add New Volunteer Role': 'اضافه کړی نوی رضاکار دنده',
'Add New Volunteer': 'اضافه کړی نوی رضاکار',
'Add Office': 'اضافه کړی دفتر',
'Add Organization to Activity': 'اضافه کړی د فعالیت موسسه',
'Add Organization to Project': 'اضافه کړی موسسه پروژی ته',
'Add Organization': 'اضافه کړی موسسه',
'Add Output': 'نوی د باندی تللی',
'Add Participant': 'اضافه کړی ګډونوال',
'Add Person': 'اضافه کړی کسان',
'Add PoI Type': 'اضافه کړی د پل (pol ) بڼه',
'Add Point of Interest': 'اضافه کړی زړه پوری هدف',
'Add Point': 'اضافه کړی هدف',
'Add Policy or Strategy': 'اضافه کړی ستراتیژی او سیاست',
'Add Polygon': 'اضافه کړی څو ضلعی',
'Add Professional Experience': 'اضافه کړی مسلکی تجربه',
'Add Profile Configuration for this Layer': 'اضافه کړی د برخی تنظیمات دی پاڼی ته',
'Add Profile Configuration': 'اضافه کړی د برخی تنظیمات',
'Add Project': 'اضافه کړی پروژه ',
'Add Resource': 'اضافه کړی منبع',
'Add Response Summary': 'اضافه کړی د خلاصه د ځواب',
'Add Role': 'اضافه کړی وظیفه',
'Add Room': 'اضافه کړی کوټه',
'Add Sector to Organization': 'اضافه کړی سکتور موسسه ته',
'Add Sector to Project': 'اضافه کړی سکتور پروژی ته',
'Add Sector to Theme': 'اضافه کړی سکتور موضوع ته',
'Add Sector': 'اضافه کړی ځای (سکتور)',
'Add Service to Organization': 'اضافه کړی خدمات موسسی ته',
'Add Service': 'اضافه کړی خدمات',
'Add Skill Equivalence': 'اضافه کړی معادل مهارت',
'Add Skill Type': 'اضافه کړی د مهارت بڼه',
'Add Skill': 'اضافه کړی مهارت',
'Add Staff Assignment': 'اضافه کړی د کارمندانو دنده',
'Add Staff Member to Project': 'اضافه کړی د کارمندانو عضو پروژه ته',
'Add Status': 'اضافه کړی حالت',
'Add Symbology for Layer': 'اضافه کړی نماد پاڼی ته',
'Add Symbology': 'اضافه کړی نماد',
'Add Task': 'اضافه کړی دنده',
'Add Team Member': 'اضافه کړی د ګروپ کسان',
'Add Team': 'اضافه کړی کروپ',
'Add Theme to Activity': 'اضافه کړی موضوع فعالیت ته',
'Add Theme to Project Location': 'اضافه کړی موضوع د پروژی ځای ته',
'Add Theme to Project': 'اضافه کړی موضوع پروژی ته',
'Add Theme': 'اضافه کړی موضوع',
'Add Training': 'اضافه کړی تعلیم',
'Add Volunteer to Project': 'اضافه کړی رضاکار پروژی ته',
'Add a Person': 'اضافه کړی یو کس',
'Add a new certificate to the catalog.': 'اضافه کړی یو نوی شهادت نامه فهرست ته ',
'Add a new competency rating to the catalog.': 'اضافه کړی یو نوی د صلاحیت اندازه فهرست ته',
'Add a new membership type to the catalog.': 'اضافه کړی یو نوی د عضو بڼه فهرست ته',
'Add a new program to the catalog.': 'اضافه کړی یو نوی پروګرام فهرست ته',
'Add a new skill type to the catalog.': 'اضافه کړی یو نوی د مهارت بڼه فهرست ته',
'Add all organizations which are involved in different roles in this project': 'اضافه کړی ټولی موسسی کومی چی په مختلفو دندو د پروژی بوخت دی',
'Add saved search': 'اضافه کړی ثبت شوی پلټنی',
'Add search': 'اضافه کړی پلټنی',
'Add this entry': 'اضافه کړی د ثبت دفتر',
'Add to a Team': 'اضافه کړی یو ګروپ ته',
'Add': 'اضافه',
'Add...': 'اضافه کړی....',
'Address Details': 'د آدرس معلومات',
'Address Mapped': 'آدرس نقشه شوی ده',
'Address NOT Mapped': 'آدرس نقشه شوی نه ده',
'Address Type': 'د آدرس بڼه',
'Address added': 'آدرس اضافه شوی ده',
'Address deleted': 'آدرس له منځه وړل شوی ده',
'Address updated': 'ننی شوی آدرس',
'Address': 'آدرس ',
'Addresses': 'آدرسونه',
'Adjust Stock Levels': 'د اسهامو د تنظیم سطحه',
'Admin Assistant': 'معاون د اداری',
'Admin': 'اداره',
'Administrador Database': 'د اداری دیتابیس',
'Adolescent (12-20)': 'بلوغ (12-20)',
'Adult (21-50)': 'ځوان (۲۱-۵۰)',
'Advanced Search': 'پرمختللی پلټنه',
'Advocacy': 'دفاع/وکالت',
'Affiliation Details': 'د اتحاد معلومات',
'Affiliation added': 'اتحاد اضافه شوه',
'Affiliation deleted': 'اتحاد له منځه تللی ده',
'Affiliation updated': 'اتحاد ننی شوه',
'Affiliations': 'اتحاد ',
'Age Group': 'د عمر ګروپ',
'Age': 'عمر',
'Airport': 'هوای ډګر',
'Alerts': 'د خطر علامه',
'All Entities': 'ټول خلک',
'All Open Tasks': 'ټول خلاص شوی وظایف',
'All Records': 'ټول تاریخچه',
'All Tasks': 'ټولی وظیفی',
'All selected': 'ټول انتخاب شوی ',
'All': 'ټول',
'Amount of the Project Budget spent at this location': 'د پروژی د بودیجی اندازه چی مصرف شوی په دی ځای کی',
'Amount': 'اندازه',
'An ESRI Shapefile (zipped)': 'یو د (ESRI Shapefile) (ziped)',
'An error occured, please %(reload)s the page.': 'یوه غلطی واقع شوه هیله ده چی %(reload)s دا پاڼه',
'Annual Budget deleted': 'کلنی بودیجه له منځه ولاړه',
'Annual Budget updated': 'کلنی بودیجه ننی شوه',
'Annual Budget': 'کلنی بودیجه',
'Annual Budgets': 'کلنی بودیجه',
'Anonymous': 'نامعلومه',
'Any': 'هر یو',
'Appeal Code': 'د درخواست کود',
'Applicable to projects in Pacific countries only': 'یوازی په اقیانوسی هیوادونو کی امکان د اجرا دی',
'Application Permissions': 'اجازه د درخواست',
'Application': 'درخواست',
'Appraisal Details': 'د ارزیابی معلومات',
'Appraisal added': 'ارزیابی اضافه شوی ده',
'Appraisal deleted': 'ارزیابی له منځه وړل شوی ده',
'Appraisal updated': 'ارزیابی ننی شوی ده',
'Appraisals': 'ارزیابیانی',
'Approve': 'منل شوی ',
'Approver': 'منل',
'ArcGIS REST Layer': 'د ArcGIS REST پاڼه',
'Are you sure you want to delete this record?': 'ایا تاسی مطمین یاست چی دا تاریخچه له منځی یوسی؟',
'Assessment and Community/Beneficiary Identification': 'ازریابی او جامعه / مفیده پیژندګلنه',
'Assessments': 'ازریابی ',
'Asset': 'دولت/ دارای',
'Assets': 'دارایانی',
'Assign %(staff)s': 'تعینول ٪(کارمند/کارمندان)',
'Assign Asset': 'تعین د دارای',
'Assign Role to a User': 'تعین یو نقش استعمالوونکی ته',
'Assign Staff': 'کارمند تعینول',
'Assign another Role': 'یو بل نقش تعینول',
'Assigned To': 'تعین شوی ....ته',
'Assigned to': 'تعین شوی ....ته',
'Assigned': 'تعین شوی',
'Association': 'اتحادیه',
'Attachments': 'ضمیمه',
'Attributes': 'ویشلول',
'Attribution': 'ویشل ',
'Australian Dollars': 'د استرلیا ډالر',
'Authentication Required': 'تصدیق ضرورت دی',
'Auxiliary Role': 'کمکی نقش',
'Availability': 'لاسرسی',
'Available Forms': 'حاضر فورم',
'Available in Viewer?': 'ایا د کتوونکو په لاسرسی کی شته',
'Average Rating': 'متوسطه اندازه',
'Award added': 'انعام ورکړ شو',
'Award deleted': 'انعام له منځه ولاړ',
'Award updated': 'انعام ننی شو',
'Award': 'انعام',
'Awards': 'انعامونه',
'Awareness Raising': 'د پوهی پر مختګ',
'Bachelor': 'لیسانس',
'Back to Roles List': 'بیرته د نقش لست ته',
'Back to Top': 'بیرته پاسنی برخی ته',
'Back to Users List': 'بیرته د استعمالوونکو لست ته',
'Background Color': 'د منظری (Background) رنګ',
'Bahai': 'بهایی (Bahai )',
'Baldness': 'بی ویښته والی',
'Base Layer?': 'اصلی پاڼه؟',
'Base Layers': 'اصلی پاڼی',
'Base Location': 'اصلی ځای',
'Basic Details': 'اساسی معلومات',
'Basic Search': 'اساسی پلټنه',
'Bdrt (Branch Disaster Response Teams)': 'Bdrt(د څانګی د مصیبت د ځواب ګروپ)',
'Behaviour Change Communication': 'د رفتار د تغیر اړیکه',
'Beneficiaries Added': 'فایدی اضافه شوی',
'Beneficiaries Deleted': 'له منځه وړل شوی فایدیی',
'Beneficiaries Details': 'د معلوماتو فایدی',
'Beneficiaries Updated': 'د ننی فایدی',
'Beneficiaries': 'فایدی',
'Beneficiary Report': 'د فایدی راپور',
'Beneficiary Type Added': 'د فایدی بڼه اضافه شوی ',
'Beneficiary Type Deleted': 'د فایدی بڼه له منځه وړل شوی ',
'Beneficiary Type Updated': 'د فایدی بڼه ننی شوی',
'Beneficiary Type': 'د فایدی بڼه',
'Beneficiary Types': 'د فایدی بڼی',
'Better Programming Initiative Guidance': 'طراحی د ښه د پروګرام راهنمای',
'Bilateral': 'د دواړو خواوو نه',
'Bing Layer': 'د بینګ (Bing) پاڼه',
'Blocked': 'بنده شوی',
'Blood Banking': 'د وینی بانک',
'Blood Donor Recruitment': 'د وینی ورکونکو راټولول ',
'Blood Group': 'وينه ګروپ',
'Blood Type (AB0)': 'د وینی بڼه (ABO)',
'Body Hair': 'د ځان ویښته',
'Body': 'ځان',
'Boq and Cost Estimation': 'د باق (bog) او مصرف تخمین',
'Both': 'دواړه',
'Branch Coordinator': 'د څانګی برابرونکی',
'Branch Organization Details': 'د څانګی د موسسی معلومات',
'Branch Organization added': 'د څانګی موسسی اضافه شوی',
'Branch Organization deleted': 'څانګه د موسسی له منځه ولاړه',
'Branch Organization updated': 'څانګه د موسسی ننی شوی',
'Branch Organizations': 'څانګی د موسسی',
'Branch Planning': 'د څانګه پلانول',
'Branch': 'څانګه',
'Branches': 'څانګی',
'Breakdown': 'ناڅاپه غورځیدل',
'Buddhist': 'بودایی',
'Budget': 'بودیجه',
'Buffer': 'سپر',
'Building Name': 'د کور نوم',
'By selecting this you agree that we may contact you.': 'ددی په انتخابولو سره تاسی موافق یاست چی درسره به اړیکه ټینګیږی',
'CDRT (Community Disaster Response Teams)': 'CDR ( د جامعه د مصیبت د ځواب ګروپ)',
'COPY': 'د مخی لیکل',
'CREATE': 'جوړول',
'CV': 'خلاص د اسنادو',
'Calendar': 'جنتری',
'Camp': 'د فوځ ځای',
'Campaign Added': 'کمپین (compaign ) اضافه شوی',
'Campaign Deleted': 'کمپین (compaign ) له منځه وړل شوی ',
'Campaign Message': 'د کمپین (compaign ) پیغام',
'Campaign Updated': 'کمپین (compaign ) ننی شی',
'Campaign': 'کمپین (compaign )',
'Campaigns': 'کمپینونه (compaigns )',
'Can read PoIs either from an OpenStreetMap file (.osm) or mirror.': 'ایا پولس( pols) کوالای شی چی یو خلاص د نقشه سرک دوسیه (osm) او یا شیشه ولولی',
'Canadian Dollars': 'کانادایی دالر',
'Cancel Crop': 'لغوکول د محصول',
'Cancel editing': 'لغو کول تنظیمات',
'Cancel': 'لغوکول',
'Canceled': 'لغو کول د رهبری',
'Cannot make an Organization a branch of itself!': 'موسسه په خپله نشوی کوالای چی یو څانګه جوړه کړی ',
'Cannot open created OSM file!': 'جوړه شوی دوسیه د OSM نه خلاصیږی',
'Cannot read from file: %(filename)s': 'د دوسیه د مخی نه لوستل کیږی: %(filename)s',
'Capacity Building of Governance': 'د دولت د ظرفیت جوړونه',
'Capacity Building of Management Staff': 'د کارمندانو اداری د ظرفیت جوړونه',
'Capacity Building of Staff': 'د کارمندانو د ظرفیت جوړونه',
'Capacity Building of Volunteers': 'د رضاکاروو د ظرفیت جوړونه',
'Capacity Building': 'د ظرفیت جوړونه',
'Catalogs': 'فهرستونه',
'Catchment Protection': 'د آوبو د بند محافظت',
'Cell Tower': 'د قلعه کوټه',
'Certificate Catalog': 'د شهادت نامه فهرست',
'Certificate Details': 'د شهادت نامه معلومات',
'Certificate added': 'د شهادت نامه اضافه شوی',
'Certificate deleted': 'د شهادت نامه له منځه تللی ',
'Certificate updated': 'د شهادت نامه ننی شوی',
'Certificate': 'شهادت نامه',
'Certificates': 'د شهادت نامی',
'Certification Details': 'د جواز معلومات',
'Certification added': 'جواز اضافه شوی',
'Certification deleted': 'جواز له منځه تللی',
'Certification updated': 'جواز ننی شوی',
'Certifications': 'جوازونه',
'Certifying Organization': 'د شهادت ورکولو موسسه',
'Chairman': 'ریس',
'Change Password': 'د پاسورد تبدیلول',
'Chapter': 'برخه',
'Check all': 'ټول ولټوه',
'Check this to make your search viewable by others.': 'دا ولټوه تر څو ستاسو پلټنه قابلیت د ښکاریدو نورو ته وی',
'Check-In': 'داخل شو',
'Check-Out': 'د باندی ولاړ',
'Child (2-11)': 'ماشوم ( ۲-۱۱)',
'Choose Country': 'هیواد وټاکی',
'Christian': 'عیسوی',
'Clean-Up Campaign': 'د کمپین (campaign) پاکول',
'Cleaner': 'پاکوونکی',
'Clear All': 'ټول پاک کړه',
'Clear Filter': 'فیلتر پاک کړه',
'Clear selection': 'انخاب پاک کړه',
'Clear': 'پاک ',
'Click anywhere on the map for full functionality': 'په هر ځای د نقشه کی کلیک وکړه د مکمل عملکرد لپاره',
'Click on the link': 'په لینک باندی کلیک وکړه',
'Click on the slider to choose a value': 'په سلیدر ( slider ) باندی کلیک کړی چی یو قیمت انتخاب کړی',
'Click to edit': 'کلیک کړی د جوړولو لپاره',
'Click where you want to open Streetview': 'کلیک وکړی په هغه ځای باندی چی د سرک منظره خلاصه شی',
'Climate Change Adaptation ': 'د اقلیم د بدلون تنظیم',
'Climate Change Mitigation': 'د اقلیم د بدلون تخفیف',
'Close map': 'نقشه وتړه',
'Close': 'تړلی',
'Closed': 'تړل شوی',
'Club 25 / Pledge 25': '۲۵ ګروپ',
'Cluster Attribute': 'متحده ګروپ/ خوشه ',
'Cluster Details': 'ګروپ/ خوشه معلومات',
'Cluster Distance': 'ګروپ/ خوشه مصافه',
'Cluster Threshold': 'د ګروپ / خوشه ځای',
'Cluster added': ' ګروپ/ خوشه اضافه شوی',
'Cluster deleted': ' ګروپ/ خوشه له منځه تللی',
'Cluster updated': ' ګروپ/ خوشه ننی شوی',
'Cluster': 'ګروپ/ خوشه',
'Clusters': ' ګروپونه / خوشی',
'Coalition Details': 'د اتحاد معلومات',
'Coalition added': 'اتحاد اضافه شوی',
'Coalition removed': 'اتحاد له منځه تللی ده',
'Coalition updated': 'اتحاد ننی شوی',
'Coalitions': 'اتحادونه',
'Coastal Conservation ': 'ساحلی محافظت',
'Code No.': 'کد لمبر',
'Code': 'کود',
'College': 'فوق بکلوریا',
'Comment': 'نظریه',
'Comments': 'نظریی',
'Commitments': 'ژمنه',
'Communication Officer': 'د ارتباط افسر',
'Communication': 'ارتباط',
'Communities': 'جامعی',
'Community Action Planning': 'د جامعه عملی پلان',
'Community Added': 'جامعه اضافه شوی',
'Community Based Disaster Preparedness Refresher': 'د ټولنې په کچه د پيښو په وړاندې چمتووالی تجددي کورسونه',
'Community Based Disaster Preparedness': 'د ټولنې په کچه د پيښو په وړاندې چمتووالی',
'Community Based Disaster Risk Reduction Refresher': 'د ټولنې په کچه د پيښو د ګواښونو کمول تجددي کورسونه',
'Community Based Disaster Risk Reduction': 'د ټولنې په کچه د پيښو د ګواښونو کمول',
'Community Based Health First Aid Refresher': 'ټولنې ته لومړنۍ مرستې او روغتيا تجددي کورسونه',
'Community Based Health First Aid': 'ټولنې ته لومړنۍ مرستې او روغتيا',
'Community Based Health and First Aid (CBHFA)': 'د جامعه اساسی صحت او لومړنی کمک (cbhfa)',
'Community Contacts': 'د جامعه اړیکه',
'Community Deleted': 'جامعه له منځه تللی',
'Community Details': 'د جامعه معلومات',
'Community Disaster Awareness': 'د جامعه د مصیبت خبرتیا',
'Community Early Warning Systems': 'د جامعه د مصیبت د مخکی تر مخکی خبرتیا ',
'Community Health Committees': 'د جامعه د صحت کمیته',
'Community Health Initiative/Projects': 'د جامعه د صحت پروژه',
'Community Health Risk Assessments': 'د جامعه د صحت د خطر ارزیابی',
'Community Mobilisation': 'اماده جامعه',
'Community Mobilization': 'اماده جامعه',
'Community Organisation': 'د جامعه موسسه',
'Community Organization': 'د جامعه موسسه',
'Community Preparedness': 'د جامعی اماده ګی',
'Community Updated': 'جامعه ننی شوی',
'Community': 'جامعه',
'Company': 'شرکت',
'Competency Rating Catalog': 'د صلاحیت د اندازه فهرست',
'Competency Rating Details': 'د صلاحیت د اندازی معلومات',
'Competency Rating added': 'د صلاحیت اندازه اضافه شوه',
'Competency Rating deleted': 'د صلاحیت اندازه له منځه ولاړه',
'Competency Rating updated': 'د صلاحیت اندازه ننی شوی',
'Competency Rating': 'د صلاحیت اندازه',
'Competency': 'صلاحیت',
'Completed': 'مکمل شوی',
'Complexion': 'چهره',
'Compromised': 'توافق',
'Config not found!': 'تنظیمات پیدا نشو',
'Configuration': 'تنظیمات ',
'Configure Layer for this Symbology': 'د تنظیماتو پاڼه د دی نماد لپاره',
'Confirmed': 'تایید شوی',
'Confirming Organization': 'موسسه تاییدیږی',
'Construction Activities': 'ساختمانی فعالیتونه',
'Construction of Water Supply Systems': 'ساختمان د آوبو د رسولو سیستم',
'Contact #': 'د اړیکی شمیره',
'Contact Added': 'اړیکه اضافه شوی',
'Contact Data': 'د اړیکی معلومات',
'Contact Deleted': 'اړیکه له منځه تللی',
'Contact Details updated': 'د اړیکی معلومات ننی شوی',
'Contact Details': 'د اړیکی معلومات',
'Contact Info': 'د اړیکی معلومات',
'Contact Information Added': 'د اړیکی معلومات اضافه شوی ',
'Contact Information Deleted': 'د اړیکی معلومات له منځه تللی',
'Contact Information Updated': 'د اړیکی معلومات ننی شوی',
'Contact Information': 'د اړیکی معلومات',
'Contact Method': 'د اړیکی طریقه',
'Contact People': 'د اړیکی خلک',
'Contact Person': 'د اړیکی کس',
'Contact Updated': 'اړیکه ننی شوی',
'Contact Us': 'مونږ سره په اړیکه کی شی',
'Contact added': 'اړیکه اضافه شوی',
'Contact deleted': 'اړیکه له منځه تللی',
'Contact us': 'مونږ سره په اړیکه کی شی',
'Contact': 'اړیکه',
'Contacts': 'اړیکی',
'Context': 'متن',
'Contingency/Preparedness Planning': 'د اړیکی/ آماده ګی پلان',
'Contract End Date': 'قرارداد په اخر تاریخ کی',
'Contractual Agreements (Community/Individual)': 'قراردادی موافقی ( جامعه/ په تنهایی)',
'Contractual Agreements (Governmental)': 'قراردادی موافقی ( دولت)',
'Controller': 'کنترولر',
'Cook Islands': 'د آشپز جزیری',
'Coordinate Layer': 'د همکاری پاڼه',
'Coordination and Partnerships': 'همکاری او شراکت',
'Coordinator': 'شریک',
'Corporate Entity': 'وجود مشترک',
'Could not add person record': 'د کس تاریخچه اضافه نه شوه',
'Could not create record.': 'تاریخچه جوړه نه شوه',
'Could not generate report': 'راپور تولید نه شو',
'Could not merge records. (Internal Error: %s)': 'راپور یو ځای نه شو (داخلی غلطی:%s)',
'Country Code': 'د هیواد کود',
'Country is required!': 'هیواد ضرورت دی',
'Country': 'هیواد ',
'Course Catalog': 'د کورس فهرست',
'Course Certificate Details': 'د کورس د شهادت ناامه معلومات',
'Course Certificate added': 'د کورس شهادت نامه اضافه شوی',
'Course Certificate deleted': 'د کورس شهادت نامه له منځه تللی ',
'Course Certificate updated': 'د کورس شهادت نامه ننی شوی',
'Course Certificates': 'د کورس شهادت نامی',
'Course Details': 'د کورس معلومات',
'Course added': 'کورس اضافه شوی',
'Course deleted': 'کورس له منځه تللی',
'Course updated': 'کورس ننی شوی',
'Course': 'کورس',
'Create Activity Type': 'اضافه کړی د فعالیت ؟بڼه',
'Create Activity': 'اضافه کړی فعالیت',
'Create Award': 'اضافه کړی انعام',
'Create Beneficiary Type': 'اضافه کړی د ګټی بڼه',
'Create Campaign': 'اضافه کړی کمپین (campaign)',
'Create Certificate': 'اضافه کړی شهادت نامه',
'Create Cluster': 'اضافه کړی ګروپ/ خوشه',
'Create Coalition': 'اضافه کړی یوالی',
'Create Community': 'اضافه کړی جامعه',
'Create Competency Rating': 'اضافه کړی د صلاحیت اندازه',
'Create Contact': 'اضافه کړی اړیکه',
'Create Course': 'اضافه کړی جریان (course)',
'Create Department': 'اضافه کړی شعبه',
'Create Facility Type': 'اضافه کړی د اسانتیاوو بڼه',
'Create Facility': 'اضافه کړی اسانیاوی',
'Create Feature Layer': 'اضافه کړی د شکل پاڼه',
'Create Group': 'ګروپ جوړ کړی',
'Create Hazard': 'اضافه کړی خطرات',
'Create Job Title': 'اضافه کړی د وظیفی عنوان',
'Create Job': 'اضافه کړی وظیفه',
'Create Layer': 'اضافه کړی پانی',
'Create Location Hierarchy': 'اضافه کړی د سلسله مراتبو ځای',
'Create Location': 'اضافه کړی ځای',
'Create Mailing List': 'اضافه کړی د پست لست',
'Create Map Profile': 'اضافه کړی د نقشی تنظیمات',
'Create Marker': 'اضافه کړی ښکارونکی ( marker) ',
'Create Member': 'اضافه کړی کسان',
'Create Membership Type': 'اضافه کړی د عضویت بڼه',
'Create Milestone': 'اضافه کړی مهمه مرحله',
'Create National Society': 'اضافه کړی ملی ټولنه ',
'Create Network': 'اضافه کړی شبکه',
'Create Office Type': 'اضافه کړی د دفتر بڼه',
'Create Organization Type': 'اضافه کړی د موسسی بڼه',
'Create Partner Organization': 'اضافه کړی همکاره موسسی',
'Create Program': 'اضافه کړی پروګرام',
'Create Project': 'اضافه کړی پروژه ',
'Create Projection': 'اضافه کړی نقشه',
'Create Record': 'اضافه کړی تاریخچه',
'Create Region': 'اضافه کړی ځای',
'Create Resource Type': 'اضافه کړی بڼه',
'Create Resource': 'اضافه کړی منبع',
'Create Role': 'نوی نقش جوړ کړی',
'Create Sector': 'اضافه کړی ځای (سکتور)',
'Create Staff Member': 'اضافه کړی د کارمندانو عضو',
'Create Team': 'نوی ګروپ جوړ کړی',
'Create Training Event': 'اضافه کړی د تعلیم موقع',
'Create User': 'نوی استعمالوونکی جوړکړی',
'Create Volunteer Cluster Position': 'اضافه کړی د رضاکار خوشه/ ګروپ موقیعت',
'Create Volunteer Cluster Type': 'اضافه کړی د رضاکار خوشه/ګروپ بڼه',
'Create Volunteer Cluster': 'اضافه کړی د رضاکار خوشه/ ګروپ',
'Create Volunteer Role': 'اضافه کړی د رضاکار دنده',
'Create Volunteer': 'اضافه کړی رضاکار',
'Create a new Group.': 'یو نوی ګروپ جوړ کړی',
'Create a new Team.': 'یو نوی ګروپ جوړ کړی',
'Create a new facility or ensure that you have permissions for an existing facility.': 'یو نوی سهولیت او یا مطمین شی چی تاسی اجازه د حاضر سهولیت ته لری',
'Create a new organization or ensure that you have permissions for an existing organization.': 'یو نوی موسسه او یا مطمین شی چی تاسی اجازه د حاضر موسسه ته لری',
'Create': 'جوړول',
'Created By': 'جوړ شوی په واسطه',
'Created on %s by %s': 'جوړ شوی په %s په واسطه %s',
'Created on %s': 'جوړ شوی په %s',
'Credential Details': 'د باور لیک معلومات',
'Credential added': 'باور لیک اضافه شوی ',
'Credential deleted': 'باور لیک له منځه تللی',
'Credential updated': 'د باور لیک ننی شوی',
'Credential': 'د باور لیک',
'Credentialling Organization': 'موسسه باوری کیږی',
'Credentials': 'د باور لیکونه',
'Crop Image': 'د ګروپ تصویر',
'Currency': 'اسعار',
'Current Home Address': 'د اوسنی د کور ادرس',
'Current Location': 'اوسنی ځای',
'Current': 'اوس',
'Currently no Appraisals entered': 'اوس هیڅ یو ارزیابی راغلی نه دی',
'Currently no Certifications registered': 'تر اوسه هیڅ شهادت نامه راجستر شوی نه دی',
'Currently no Course Certificates registered': 'تر اوسه هیڅ کورس شهادت نامه راجستر شوی نه دی',
'Currently no Credentials registered': 'تر اوسه هڅ د باور لیک راجستر شوی نه دی',
'Currently no Participants registered': 'تر اوسه هیڅ ګډونوال راجستر شوی نه دی',
'Currently no Professional Experience entered': 'تر اوسه هیڅ مسلکی با تجربه راغلی نه دی',
'Currently no Skill Equivalences registered': 'تر اوسه هیڅ معادل ماهر ته هم راجستر شوی نه دی ',
'Currently no Skills registered': 'تر اوسه هیڅ ماهران راجستر شوی نه دی',
'Currently no Trainings registered': 'تر اوسه هیڅ تعلیم راجستر نه دی',
'Currently no entries in the catalog': 'تر اوسه هیڅ څوک فهرست کی نه راغلی',
'Currently no hours recorded for this volunteer': 'تر اوسه د دی رضاکار لپاره هیڅ ساعتونو زیرمه شوی نه دی',
'Currently no programs registered': 'تر اوسه هیڅ پروګرام راجستر شوی نه دی',
'Currently no staff assigned': 'تر اوسه هیڅ کارمند تعین شوی نه دی',
'Currently no training events registered': 'تر اوسه هیڅ د تعلیم واقعه راجستر شوی نه ده',
'DELETE': 'له منځه وړل',
'DM / Relief': 'DM/ آرامول',
'DM Planning': 'DM پلانول',
'DRRPP Extensions': 'DRRPP توسعه',
'Daily Work': 'د هری ورز کار',
'Daily': 'هره ورز',
'Data Type': 'د معلوماتو بڼه',
'Data added to Theme Layer': 'معلومات اضافه شوی د موضوع پاڼی ته',
'Data import error': 'معلوماتو غلطی یی وارده کړی ده',
'Data uploaded': 'معلومات راغلی دی',
'Data': 'معلومات',
'Database Development': 'پرمختګ د دیتابیس',
'Database': 'دیتابیس',
'Date Created': 'جوړ شوی تاریخ',
'Date Due': 'د ورکړی تاریخ',
'Date Joined': 'د یوزای کیدو تاریخ',
'Date Modified': 'د تشریح تاریخ',
'Date Printed': 'د چاپ تاریخ',
'Date Received': 'د حصول تاریخ',
'Date Resigned': 'د استعفا تاریخ',
'Date must be %(max)s or earlier!': 'تاریخ باید %(max)s او د هغه څخه مخکی',
'Date must be %(min)s or later!': 'تاریخ باید %(min)s او د هغه څخه وروسته',
'Date must be between %(min)s and %(max)s!': 'تاریخ باید د %(min)s او %(max)s تر منځ اوسی',
'Date of Birth': 'تاریخ د زیږیدو',
'Date of Dismissal': 'د انفکاک نیټه',
'Date of Re-recruitment': 'د بیا غړیتوب نیټه',
'Date of Recruitment': 'د غړیتوب نیټه',
'Date': 'تاریخ',
'Date/Time': 'تاریخ/ وخت',
'Day': 'ورز',
'De-duplicate Records': 'نقلی تاریخچه ',
'De-duplicate': 'نقلی',
'Dead Body Transfer Refresher': 'د مړو تبادله تجددي کورسونه',
'Dead Body Transfer': 'د مړو تبادله',
'Dead Body': 'جسد',
'Deceased': 'مړشوی',
'Decision': 'فیصله',
'Default Base layer?': 'پخوانی اساسی پاڼی',
'Default Location': 'پخوانی ځای',
'Default Marker': 'پخوانی ښکارونکی (marker)',
'Default Realm = All Entities the User is a Staff Member of': 'پخوانی قلمرو = تول ماهیت د استعمالوونکو د کارمندانو عضوو د',
'Default Realm': 'پخوانی قلمرو',
'Default': 'پخوانی',
'Default?': 'پخوانی؟',
'Defines the icon used for display of features on handheld GPS.': 'هغه شکل معرفی کوی چی د شکل ښکاروندوی په لاس کی نیولی د GPS. ',
'Defines the icon used for display of features on interactive map & KML exports.': 'هغه شکل معرفی کوی چی د شکل ښکاروندوی په متقابله نقشه کی او صادرول د KML وی',
'Degrees in a latitude must be between -90 to 90.': 'درجه باید په وسعت د جغرافیاوی کی د ۹۰- تر ۹۰ پوری وی',
'Degrees in a longitude must be between -180 to 180.': 'درجه باید په طول د جغرافیاوی کی د۱۸۰- تر ۱۸۰ پوری وی',
'Degrees must be a number.': 'درجه باید شمیره وی',
'Delete Affiliation': 'یوازی والی له منځه وړل',
'Delete Appraisal': 'ارزیابی له منځه وړل ',
'Delete Award': 'انعام له منځه وړل',
'Delete Branch': 'څانګه له منځه وړل',
'Delete Certificate': 'شهادت نامه له منځه وړل',
'Delete Certification': 'شهادت له منځه وړل',
'Delete Cluster': 'ګروپ له منځه وړل',
'Delete Competency Rating': 'د صلاحیت اندازه له منځه وړل',
'Delete Contact Information': 'د اړیکی معلومات له منځه وړل',
'Delete Contact': 'اړیکه له منځه وړل',
'Delete Course Certificate': 'د کورس شهادت نامه له منځه وړل',
'Delete Course': 'کورس له منځه وړل',
'Delete Credential': 'د اعتبار لیک له منځه وړل',
'Delete Data from Theme layer': 'د معلوماتو له منځه وړل د موضوع د پاڼی څخه',
'Delete Department': 'بخش له منځه وړل',
'Delete Deployment': 'وسعت له منځه وړل',
'Delete Donor': 'د همکار له منځه وړل',
'Delete Facility Type': 'د امکاناتو د بڼه له منځه وړل',
'Delete Facility': 'امکانات له منځه وړل',
'Delete Feature Layer': 'د شکل پاڼه له منځه وړل',
'Delete Group': 'ګروپ له منځه وړل',
'Delete Hazard': 'خطر له منځه وړل',
'Delete Hours': 'ساعتونه له منځه وړل',
'Delete Image': 'تصویر له منځه وړل',
'Delete Job Title': 'د دندی عنوان له منځه وړل',
'Delete Layer': 'پاڼه له منځه وړل',
'Delete Location Hierarchy': 'د ځای د سلسله مراتبو له منځه وړل',
'Delete Location': 'ځای له منځه وړل',
'Delete Mailing List': 'د پوست لیست له منځه وړل',
'Delete Map Profile': 'د نقشی د تنظیماتو له منځه وړل',
'Delete Marker': 'ښکارندوی له منځه وړل',
'Delete Member': 'عضو له منځه وړل',
'Delete Membership Type': 'د عضویت بڼه له منځه وړل',
'Delete Membership': 'عضویت له منځه وړل',
'Delete National Society': 'د ملی ټولنه له منځه وړل',
'Delete Office Type': 'د دفتر بڼه له منځه وړل',
'Delete Office': 'دفتر له منځه وړل',
'Delete Organization Type': 'د موسسه بڼه له منځه وړل',
'Delete Organization': 'موسسه له منځه وړل',
'Delete Participant': 'ګډون وال له منځه وړل',
'Delete Partner Organization': 'د موسسه د شریک له منځه وړل',
'Delete Person': 'کس له منځه وړل',
'Delete PoI Type': 'د پول بڼه له منځه وړل',
'Delete Point of Interest': 'له منځه وړل د زړه پوری بخش',
'Delete Professional Experience': 'مسلکی تجربه له منځه وړل',
'Delete Program': 'پروګرام له منځه وړل',
'Delete Project': 'پروژه له منځه وړل',
'Delete Projection': 'د طرح له منځه وړل',
'Delete Record': 'د تاریخچه له منځه وړل',
'Delete Region': 'ساحه له منځه وړل',
'Delete Resource Type': 'د منبع د بڼه له منځه وړل',
'Delete Resource': 'د منبع له منځه وړل',
'Delete Role': 'نقش له منځه وړل',
'Delete Room': 'کوټه له منځه وړل',
'Delete Sector': 'سکتور له منځه وړل',
'Delete Service': 'خدمت له منځه وړل',
'Delete Skill Equivalence': 'د مهارت معادل له منځه وړل',
'Delete Skill Type': 'د مهارت د بڼه له منځه وړل',
'Delete Skill': 'مهارت له منځه وړل',
'Delete Staff Assignment': 'د کارمندانو دنده له منځه وړل',
'Delete Staff Member': 'د کارمندانو عضو له منځه وړل',
'Delete Status': 'حالت له منځه وړل',
'Delete Symbology': 'د نماد له منځه وړل',
'Delete Theme': 'د موضوع له منځه وړل',
'Delete Training Event': 'د تعلیم د واقعه له منځه وړل',
'Delete Training': 'د تعلیم له منځه وړل',
'Delete Volunteer Cluster Position': 'د رضاکار د ګروپ ځای له منځه وړل',
'Delete Volunteer Cluster Type': 'د داوطلب د ګروپ بڼه له منځه وړل',
'Delete Volunteer Cluster': 'د رضاکار د ګروپ له منځه وړل',
'Delete Volunteer Role': 'د رضاکار د نقش له منځه وړل',
'Delete Volunteer': 'رضاکار له منځه وړل',
'Delete all data of this type which the user has permission to before upload. This is designed for workflows where the data is maintained in an offline spreadsheet and uploaded just for Reads.': 'ټول معلومات چی په دی رنګه وی چی استعمالوونکی مخکی د راړل څخه ولری. دا جوړ شوی دی د دی رنګه جریان د کار ته چیرته چی معلومات لری د توزیع پاڼی او راوړل یی یوازی د مطالعه لپاره دی.',
'Delete saved search': 'زیرمه شوی پلټنه له منځه وړل',
'Delete this Filter': 'دا فیلتر له منځه وړل',
'Delete': 'له منځه وړل',
'Demographics': 'نفوس شمیرنه',
'Department / Unit': 'برخه',
'Department Catalog': 'برخه د فهرست',
'Department Details': 'معلومات د برخی',
'Department added': 'برخه اضافه شوی ',
'Department deleted': ' برخه له منځه وړل شوی ',
'Department updated': 'برخه ننی شوی',
'Deployed': 'پراختیا',
'Deploying NS': 'پراختیا د NS',
'Deployment Alert': 'وسعت د خطر خبرتیا',
'Deployment Date': 'وسعت نیټه',
'Deployment Details updated': 'د وسعت معلومات ننی شوی',
'Deployment Details': 'وسعت معلومات',
'Deployment Location': 'وسعت ځای',
'Deployment added': 'وسعت اضافه شوی',
'Deployment deleted': 'وسعت له منځه تللی',
'Deployment': 'وسعت',
'Deployments': 'وسعتونه',
'Description': 'تشریح ',
'Desluding ': 'دیسلودینګ',
'Destination': 'مقصد',
'Detailed Description/URL': 'معلومات د تشریح',
'Details': 'معلوماتونه',
'Disable': 'نا توان',
'Disaster Law': 'د مصیبت قانون',
'Disaster Management System Officer': 'آفسر د مصیبت د تنظیمولو سیستم',
'Disaster Management Unit Assistant': 'د مصیبت د تنظیم د برخی معاون',
'Disaster Preparedness Refresher': 'پيښو ته چمتووالی تجددي کورسونه',
'Disaster Preparedness': 'پيښو ته چمتووالی',
'Disaster Response Refresher': 'پيښو ته ځواب ورکوونه تجددي کورسونه',
'Disaster Response': 'پيښو ته ځواب ورکوونه',
'Disaster Risk Reduction': 'د مصیبت د خطر کموول',
'Disaster Type': 'د مصیبت بڼه',
'Disease Prevention': 'د مرض مخنیوی',
'Display Polygons?': 'څو ضلعی ښکاره کړی؟',
'Display Routes?': 'لاری ښکاره کړی؟',
'Display Tracks?': 'د پښو چاپونه ښکاره کړی؟',
'Display Waypoints?': 'د لاری راهنمایانی ښکاره کړی؟',
'Distribution of Food': 'د خوراک توزیع',
'Distribution of Non-Food Items': 'توزیع د غیر خوراکی مواد',
'Do you really want to approve this record?': 'ته واقعا غواړی چی تایید کړی دا تاریخچه',
'Do you really want to delete these records?': 'ته واقعا غواړی چی له منځه یوسی دا تاریخچه',
'Do you really want to delete this record? (This action can not be reversed)': 'ته واقعا غواړی چی له منځه یوسی دا تاریخچه ( دا عمل زیرمه کیدای نه شی)',
'Document Scan': 'سند په اجمالی توګه باندی لټول',
'Documents': 'اسناد',
'Domain': 'قلمرو',
'Donor Details': 'د بښوونکی معلومات',
'Donor added': 'بښوونکی اضافه شوی',
'Donor deleted': 'بښوونکی له منځه تللی',
'Donor updated': 'بښوونکی ننی شوی',
'Donor': 'بښونکی',
'Donors Report': 'د بښوونکو تاریخچه',
'Donors': 'بښوونکي',
'Download OCR-able PDF Form': 'رواړی د ( OCR ) توانا د PDF فورم',
'Draft Features': 'د شکل حواله',
'Draft': 'حواله',
'Drag an image below to crop and scale it before uploading it:': 'یو تصویر لاندی کش کړی چی قطع او په اندازه یی کړی مخکی د راوړلو څخه',
'Draw a square to limit the results to just those within the square.': 'یوه مربع رسم کړی چی محدوده شی نتیجه یوازی د دننه د مربع ته',
'Driver': 'موټر چلوونکی',
'Driving License': 'د موټر چلوونی جواز',
'Duplicate label selected': 'انتخاب شوی علامه دوه دانی کړی',
'Duplicate': 'دوه دانی',
'Duration (months)': 'جریان ( میاشت)',
'Duration': 'جریان',
'E-mail': 'برقی لیک (E-mail )',
'ESRI Shape File': 'د ESRI د سند شکل',
'Early Warning Systems': 'مخکینی د اخطار سیستم',
'Edit %(site_label)s Status': 'تنظیم %(site_label)s حالت',
'Edit Activity Organization': 'تنظیم د موسسه فعالیت',
'Edit Activity Type': 'تنظیم د فعالیت بڼه',
'Edit Activity': 'فعالیت تنظیم کړی',
'Edit Address': 'تنظیم د ادرس',
'Edit Affiliation': 'د یوالی تنظیم',
'Edit Annual Budget': 'تنظیم د کلنی بودیجه',
'Edit Appraisal': 'د ارزیابی تنظیم',
'Edit Award': 'د انعام تنظیم ',
'Edit Beneficiaries': 'د فایدی تنظیم',
'Edit Beneficiary Type': 'تنظیم د فایدی بڼه',
'Edit Branch Organization': 'د موسسه څانګی تنظیم',
'Edit Campaign': 'د کمپین تنظیم',
'Edit Certificate': 'د شهادت نامی تنظیم',
'Edit Certification': 'د شهادت تنظیم',
'Edit Cluster': 'تنظیم د وسعت',
'Edit Community Details': 'د جامعه معلوماتو تنظیم',
'Edit Competency Rating': 'د صلاحیت د اندازه تنظیم',
'Edit Contact Details': 'د اړیکی د تنظیم معلومات',
'Edit Contact Information': 'د اړیکی د تنظیم معلومات',
'Edit Contact': 'د اړیکی تنظیم',
'Edit Course Certificate': 'د کورس د شهادت نامی تنظیم',
'Edit Course': 'د کورس تنظیم',
'Edit Credential': 'د اعتبار لیک تنظیم',
'Edit DRRPP Extensions': 'تنظیم د ERRPP توسعه',
'Edit Department': 'د برخی تنظیم',
'Edit Deployment Details': 'تنظیم د وسعتو د معلومات',
'Edit Details': 'د معلوماتو تنظیم',
'Edit Donor': 'د بښوونکو تنظیم',
'Edit Education Details': 'د تعلیم د معلوماتو تنظیم',
'Edit Entry': 'د داخلیدو تنظیم',
'Edit Experience': 'د تجربی تنظیم',
'Edit Facility Type': 'د امکاناتو د تنظیم بڼه',
'Edit Facility': 'د امکاناتو تنظیم',
'Edit Feature Layer': 'تنظیم د شکل پاڼی',
'Edit Group': 'تنظیم د ګروپ',
'Edit Hazard': 'د خطر تنظیم',
'Edit Hours': 'د ساعتونه تنظیم',
'Edit Identity': 'پیژندګلنی تنظیم',
'Edit Image Details': 'د تصویر د معلوماتو تنظیم',
'Edit Job Title': 'د دندی د عنوان تنظیم',
'Edit Job': 'د دندی تنظیم',
'Edit Keyword': 'اساسی تڼیو تنظیم',
'Edit Layer': 'د پاڼی تنظیم',
'Edit Level %d Locations?': 'ځای؟ %d د تنظیم درجه',
'Edit Location Details': 'د ځای د تنظیم معلومات',
'Edit Location Hierarchy': 'د ځای د سلسه مراتبو تنظیم',
'Edit Location': 'د ځای تنظیم',
'Edit Log Entry': 'د داخلیدو د ثبت تنظیم',
'Edit Logged Time': 'تنظیم د زیرمی وخت',
'Edit Mailing List': 'د پست د لست تنظیم',
'Edit Map Profile': 'د نقشی د تنظیم تنظیمات',
'Edit Marker': 'د ښکارندوی تنظیم',
'Edit Member': 'د عضو تنظیم',
'Edit Membership Type': 'د عضویت د بڼی تنظیم',
'Edit Membership': 'د عضویت تنظیم',
'Edit Milestone': 'د مهمی مرحلی تنظیم',
'Edit National Society': 'د ملی ټولنه تنظیم',
'Edit Network': 'د شبکی تنظیم',
'Edit Office Type': 'د دفتر د بڼی تنظیم',
'Edit Office': 'د دفتر تنظیم',
'Edit Organization Type': 'د موسسه د بڼی تنظیم',
'Edit Organization': 'د موسسه تنظیم',
'Edit Output': 'د بهرنی تنظیم',
'Edit Participant': 'د ګډونوال تنظیم',
'Edit Partner Organization': 'د موسسه د شریک تنظیم',
'Edit Permissions for %(role)s': '%(role)s د تنظیم اجازه د',
'Edit Person Details': 'د کس د معلوماتو تنظیم',
'Edit PoI Type': 'د پول بڼه تنظیم',
'Edit Point of Interest': 'د زړه پوری نکتی تنظیم',
'Edit Policy or Strategy': 'تنظیم د ستراتیژی او سیاست',
'Edit Professional Experience': 'د مسلکی تجربی تنظیم',
'Edit Profile Configuration': 'د منظری د تنظیماتو تنظیم',
'Edit Program': 'د پروګرام تنظیم',
'Edit Project Organization': 'د موسسه د پروژی تنظیم',
'Edit Project': 'د پروژی تنظیم',
'Edit Projection': 'د طرح تنظیم',
'Edit Record': 'تنظیم د تاریخچه',
'Edit Region': 'د ناحیی تنظیم',
'Edit Resource Type': 'د منبع د بڼه تنظیم',
'Edit Resource': 'د منبع تنظیم',
'Edit Response Summary': 'د خلاصه د ځواب تنظیم',
'Edit Role': 'د نقش تنظیم',
'Edit Room': 'د کوټی تنظیم ',
'Edit Sector': ' د کتور تنظیم',
'Edit Service': 'د خدمت تنظیم',
'Edit Skill Equivalence': 'د مهارت د معادل تنظیم',
'Edit Skill Type': 'د مهارت د بڼه تنظیم',
'Edit Skill': 'د مهارت تنظیم',
'Edit Staff Assignment': 'د کارمندانو د دندی تنظیم',
'Edit Staff Member Details': 'د کارمندانود عضوو د معلوماتو تنظیم',
'Edit Status': 'تنظیم د حالت',
'Edit Symbology': 'د نماد تنظیم',
'Edit Task': 'د دندی تنظیم',
'Edit Team': 'د ګروپ تنظیم',
'Edit Theme Data': 'د موضوع د معلوماتو تنظیم',
'Edit Theme': 'د موضوع تنظیم',
'Edit Training Event': 'د تعلیم د واقعه تنظیم',
'Edit Training': 'د تعلیم تنظیم',
'Edit Volunteer Cluster Position': 'د رضاکار د ګروپ د ځای تنظیم',
'Edit Volunteer Cluster Type': 'تنظیم د رضاکار د ګروپ بڼه',
'Edit Volunteer Cluster': 'د رضاکار د ګروپ تنظیم',
'Edit Volunteer Details': 'د رضاکار د معلومات تنظیم',
'Edit Volunteer Role': 'د رضاکار د نقش تنظیم',
'Edit saved search': 'د زیرمه شوی پلټنی تنظیم',
'Edit the OpenStreetMap data for this area': 'د خلاص سرک نقشی د معلوماتو تنظیم د دی ناحیه لپاره',
'Edit this entry': 'د داخلیدو تنظیم',
'Edit': 'تنظیم',
'Education & Advocacy': 'تعلیم او وکالت',
'Education Details': 'د تعلیم معلومات',
'Education Level': 'د زده کړی درجه',
'Education details added': 'د تعلیم معلومات اضافه شوی ',
'Education details deleted': 'د تعلیم معلومات له منځه تللی',
'Education details updated': 'د تعلیم معلومات ننی شوی',
'Education': 'تعلیم',
'Effort Report': 'د کوښښ راپور',
'Either a shelter or a location must be specified': 'یا پناګاه او یا یو ځای باید مشخصه شی',
'Either file upload or image URL required.': 'رواړلی شوی سند او یا د URL تصویر ضرورت دی',
'Email Address': 'د برقی لیک ادرس',
'Email': 'برقی لیک',
'Emergency Contact Number': 'عاجل شمیره',
'Emergency Contacts': 'عاجلی اړیکی',
'Emergency Householdwater Treatment and Storage': 'عاجل د کور د آوبو د چاری او زیرمه',
'Emergency Shelter': 'عاجله پناګاه',
'Emergency Telecommunications': 'عاجله د تیلیفونی اړیکی',
'Emergency Water Supply': 'عاجله د اوبو رسوونکی',
'Enable in Default Config?': 'توانا کول په مخکینی تنظیماتو کی',
'Enable': 'توانا',
'End Date': 'اخر تاریخ',
'Enter a valid email': 'فعال برقی لیک دننه کړی',
'Enter a valid phone number': 'یو فعال د تیلیفون شمیره دننه کړی',
'Enter a value carefully without spelling mistakes, this field needs to match existing data.': 'یو قیمت ډیر په دقت سره دننه کړی بدون د لیک د غلطی څخه ، دا ساحه یو معلومات چی موجود وی ضرورت لری',
'Enter some characters to bring up a list of possible matches': 'بعضی شخصیتونه داخل کړی چی را جګ کړی چی یو لست د امکان د یو ځای کیدو ولری',
'Enter the same password as above': 'پاسنی شکل په شان پاسورد داخل کړی',
'Enter your first name': 'خپل لومړنی نوم مو داخل کړی',
'Enter your organization': 'خپل موسسه مو داخله کړی',
'Entering a phone number is optional, but doing so allows you to subscribe to receive SMS messages.': 'د اړیکی شمیره اختیاری ده، اما که ولیکی نو تاسی کوالای شی چی د SMS پیغامونه تر لاسه کړی',
'Enterprise Development Training ': 'د مهمی کار د ترقی تعلیم',
'Entity': 'وجود ',
'Epidemic Control for Volunteers Refresher': 'خطر د خپلې خوښې کارکوونکې د ګواښ په حالت کې تجددي کورسونه',
'Epidemic Control for Volunteers': 'خطر د خپلې خوښې کارکوونکې د ګواښ په حالت کې',
'Errors': 'غلطیانی',
'Essential Staff?': 'مهمی کارمندان',
'Estimated Reopening Date': 'تخمینی د خلاصیدو تاریخ',
'Ethnicity': 'قومی',
'Euros': 'ایرو',
'Evacuating': 'خالی کول',
'Evacuation Drills': 'د خالی کیدو سلسله',
'Events': 'واقعه',
'Excellent': 'عالی',
'Experience': 'تجربه',
'Expiring Staff Contracts Report': 'د کارمندانو د قرارداد د خلاصیدو راپور',
'Expiry (months)': 'خلاصیدل (میاشت)',
'Expiry Date': 'د خلاصیدو نیټه',
'Exploring Humanitarian Law / Humanitarian Principle Values Refresher': 'د بشري ا رزښتونو اصول/ د بشرپالنې نړيوال قوانين تجددي کورسونه',
'Exploring Humanitarian Law / Humanitarian Principle Values': 'د بشري ا رزښتونو اصول/ د بشرپالنې نړيوال قوانين',
'Export as': 'صادرول په حیث د',
'Export in %(format)s format': 'وارد په ٪ (شکل/شکلونو ) شکل',
'Export in GPX format': 'د GPX په شکل صادرول',
'Export in KML format': 'د KML په شکل صادرول',
'Export in OSM format': 'د OSM په شکل صادرول',
'Eye Color': 'د سترګو رنګ',
'Facial hair, color': 'د مخ ویښته، رنګ',
'Facial hair, comment': 'د مخ ویښته، نظریه',
'Facial hair, length': 'د مخ ویښته، اوږدوالی',
'Facial hair, type': 'د مخ ویښته، بڼه',
'Facilities': 'سهولیتونه',
'Facility Contact': 'د اړیکی سهولیت',
'Facility Details': 'د معلوماتو سهولیت',
'Facility Status': 'د حالت سهولیت',
'Facility Type Details': 'د بڼی د معلوماتو سهولیت',
'Facility Type added': 'د سهولیت بڼه اضافه شوی',
'Facility Type deleted': 'د سهولیت بڼه له منځه تللی',
'Facility Type updated': 'د سهولیت بڼه ننی شوی',
'Facility Type': 'د بڼی سهولیت',
'Facility Types': 'د سهولیت بڼی',
'Facility added': 'سهولیت اضافه شوی ',
'Facility deleted': 'سهولیت له منځه تللی',
'Facility updated': 'سهولیت ننی شوی',
'Facility': 'سهولیت',
'Fail': 'نا کامیدل',
'Fair': 'مناسب',
'Family': 'کورنی',
'Father Name': 'د پلار نوم',
'Fax': 'فاکس',
'Feature Info': 'د شکل معلومات',
'Feature Layer Details': 'د شکل پاڼی معلومات',
'Feature Layer added': 'د شکل پاڼی اضافه شوی',
'Feature Layer deleted': 'د شکل پاڼه له منځه تللی',
'Feature Layer updated': 'د شکل پاڼه ننی شوی',
'Feature Layer': 'د c پاڼه ',
'Feature Layers': 'د شکل پاڼی',
'Feature Namespace': 'د شکل نوم مصافه',
'Feature Type': 'د شکل بڼه',
'Features Include': 'شکلونه شامل',
'Feedback': 'فید بک',
'Feeding Programmes': 'د پروګرامونو فید بک',
'Field': 'میدان',
'File': 'سند ',
'Files': 'سندونه',
'Filter Options': 'د فیلتر اختیار',
'Filter by Location': 'فیلتر د ځای په واسطه',
'Filter type ': 'د فیلتر بڼه',
'Filter type': 'د فیلتر بڼه',
'Filter': 'فیلتر',
'Finance / Admin': 'مالی/ اداری',
'Finance Officer': 'د مالی افسر',
'Financial Risk Sharing ': 'د مالی خطر تقسیمول',
'Financial Services': 'مالی خدمت',
'Find more': 'نور پیدا کوول',
'Find on Map': 'نقشه پیدا کول',
'Fingerprint': 'د ګوتی چاپ',
'First Aid Refresher': 'لومړنۍ مرستې تجددي کورسونه',
'First Aid': 'لومړنی مرستی',
'First Aid': 'لومړنۍ مرستې',
'First Name': 'لومړنی نوم',
'First': 'لومړی',
'Fleet Manager': 'د کشتیو منیجر',
'Focal Person': 'مرکزی شخص',
'Folder': 'فولدر',
'Food Security': 'امنیت د خوارکی موادو',
'Food Supplementation': 'د خوارکی موادو تکلمیلول',
'For Entity': 'د هویت لپاره',
'For live help from the Sahana community on using this application, go to': 'د ژوند د مرستی لپاره د ساحانا د جامعی په استعمالولو د دی درخواست، ځی',
'For more details on the Sahana Eden system, see the': 'د نورو معلوماتو لپاره د ساحانا ایدن په سیسم کی، وګوری',
'Form Settings': 'د فورم تنظیم',
'Format': 'شکل ',
'Frequency': 'تکرار',
'Full beard': 'مکمله ګیره',
'Fullscreen Map': 'مکمله د مخی نقشه',
'Function Permissions': 'د دندی اجازه',
'Function': 'دندی',
'Funding Report': 'د قرضی راپور',
'Funding': 'قرضه ورکول',
'Funds Contributed': 'قرضه وویشل شوه',
'GPS Marker': 'د GPS ښکارونکی',
'GPS Track File': 'د GPS دمسیر سند',
'GPS Track': 'د GPS د مسیر',
'GPX Layer': 'د GPX پاڼه',
'Gap Analysis Map': 'د خالیګاه د تحلیل نقشه',
'Gap Analysis Report': 'د خالیګاه د تحلیل راپور',
'Gender': 'جنسیت',
'Generator': 'جنراتور',
'GeoJSON Layer': 'GeoJSON پاڼه',
'GeoRSS Layer': 'پاڼه GeoRSS ',
'Geocode': 'جغرافیاوی کود',
'Geocoder Selection': 'جغرافیاوی کود ورکوونکی',
'Geometry Name': 'د هندسی نوم',
'Get Feature Info': 'مخصوصه معلومات تر لاسه کول',
'Give a brief description of the image, e.g. what can be seen where on the picture (optional).': 'ورکړی مختصره معلومات د تصویر په اړه',
'Go to Functional Map': 'وظیفوی نقشی ته تلل',
'Go': 'تلل',
'Goatee': 'ګواتی',
'Good': 'ښه',
'Google Layer': 'د ګوګل پاڼه',
'Government': 'دولت',
'Grade': 'درجه',
'Grand Father Name': 'د نیکه نوم',
'Graph': 'ګراف',
'Great British Pounds': 'حوض بزرګ بریتانوی',
'Greater than 10 matches. Please refine search further': 'ډیر د لسو لوبو څخه. هیله ده د نور تصفیه شوی ولټوی ',
'Grid': 'سیخ',
'Group Description': 'د ګروپ معلومات',
'Group Details': 'د ګروپ معلومات',
'Group Head': 'د ګروپ سر',
'Group Leader': 'د ګروپ رهبر',
'Group Member added': 'د ګروپ عضو اضافه شوی',
'Group Members': 'دګروپ عضوی',
'Group Name': 'د ګروپ نوم',
'Group Type': 'د ګروپ بڼه',
'Group added': 'ګروپ اضافه شوی',
'Group deleted': 'ګروپ له منځه تللی',
'Group description': 'د ګروپ معلومات',
'Group updated': 'ګروپ ننی شوی',
'Group': 'ګروپ ',
'Grouped by': 'ګروپ په واسطه',
'Groups': 'ګروپونه',
'HFA Priorities': 'د HFA مخکی والی',
'HFA1: Ensure that disaster risk reduction is a national and a local priority with a strong institutional basis for implementation.': 'د HGA1: مطمین شی چی د مصیبت خطر کموالی ملی او محلی مخکی والی د قوی قضایی اساساتو لپاره اجرا شوی',
'HFA2: Identify, assess and monitor disaster risks and enhance early warning.': 'د HFA2 : مشخص کول، یو د مصیبت د لید د خطر تشخیص او توسعه ورکړی مخکنی اخطار ته',
'HFA3: Use knowledge, innovation and education to build a culture of safety and resilience at all levels.': 'د HFA3 : علم استعمال، نو اوری او تعلیم چی جوړ کړی یی دی یو فرهنګی محافظت او ارتجاعی حالت په ټولو سطحو کی',
'HFA4: Reduce the underlying risk factors.': 'د HFA4 : کمول د لاندنی د خطر ضرایب',
'HFA5: Strengthen disaster preparedness for effective response at all levels.': 'د HFA5: قوی اماده ګی د مصیبت د ښه ځواب لپاره په ټولو سطحو کی ',
'Hair Color': 'د ویښتو رنګ',
'Hair Comments': 'د ویښتو نظریه',
'Hair Length': 'د ويښتو اوږدوالی',
'Hair Style': 'د ویښتو قسم',
'Hand Washing Facilities': 'د لاس وینځلو اسانتیاوی',
'Hazard Details': 'د خطر معلومات',
'Hazard added to Project': 'خطر اضافه شوی پروژی ته',
'Hazard added': 'خطر اضافه شوی ',
'Hazard deleted': 'خطر له منځه تللی',
'Hazard removed from Project': 'خطر له منځه تللی د پروژی څخه',
'Hazard updated': 'خطر ننی شوی',
'Hazard': 'خطر',
'Hazards': 'خطرونه',
'Headquarters': 'قومندانی',
'Health Awareness, Promotion': 'د روغتیا خبرتیا، پرمختګ',
'Health Facilities - Construction and Operation': 'د روغتیا د امکاناتو- ساختمان او عملیات',
'Health Policy, Strategy Development': 'د روغتیا سیاست، د سترتیژی پرمختګ',
'Health': 'روغتیا',
'Height (cm)': 'لوړوالی ( سانتی متر)',
'Height': 'لوړوالی',
'Heliport': 'د هلیکپتر میدان',
'Help': 'مرسته',
'Hide Chart': 'پټ چوکات',
'Hide Pivot Table': 'پټ تاویدونکی میز',
'Hide Table': 'پټ میز',
'Hide': 'پټ',
'Hierarchy Level 1 Name (e.g. State or Province)': 'سلسله د مراتبو لومړی سطح نوم ( لکه ایالت یا ولایت)',
'Hierarchy Level 2 Name (e.g. District or County)': 'سلسله د مراتبو د دوهمی سطحی نوم ( لکه ولسوالی یا ځای)',
'Hierarchy Level 3 Name (e.g. City / Town / Village)': 'سلسله د مراتبو د دریمی سطحی نوم (لکه ښار/ وړوکی ښارګوټی/ کلی)',
'Hierarchy Level 4 Name (e.g. Neighbourhood)': 'سلسله د مراتبو د څلورمی سطحی نوم (لکه ګاونډیان)',
'Hierarchy Level 5 Name': 'سلسه د مراتبو د پنځم سطحی نوم',
'Hierarchy': 'سلسله د مراتب',
'High school': 'بکلوریا',
'High': 'لوړ',
'Highest Priority Open Requests': 'لوړترین د هیلی د اولویت خلاصیدل',
'Hindu': 'هندو',
'Home Address': 'د کور ادرس',
'Home Country': 'خپل هیواد',
'Home Phone': 'د کور تیلیفون',
'Hospital': 'روغتون',
'Hospitals': 'روغتونونه',
'Host National Society': 'د کوربه ملی ټولنه ',
'Host': 'کوربه',
'Hour': 'ساعت',
'Hourly': 'ساعتوار',
'Hours Details': 'د ساعتونو معلومات',
'Hours added': 'ساعتونه اضافه شوی',
'Hours by Program Report': 'ساعتونه د پروګرام د راپور په واسطه',
'Hours by Role Report': 'ساعتونه د پروګرام د نقش په واسطه',
'Hours deleted': 'ساعتونه له منځه تللی',
'Hours updated': 'ساعتونه ننی شوی',
'Hours': 'ساعتونه ',
'House Design': 'ساعتونه طرحه شوی ',
'How much detail is seen. A high Zoom level means lot of detail, but not a wide area. A low Zoom level means seeing a wide area, but not a high level of detail.': 'څومره معلومات ښکاری. یوه لوړه سطحه په دی معنی ده چی ډیر معلومات اما نه یوه لویه ساحه. یوه ښکته سطحه په دی معنی ده چی ډیره ساحه اما نه ډیر معلومات',
'How often you want to be notified. If there are no changes, no notification will be sent.': 'تاسی څو ځله غواړی چی با خبر شی. که کوم تغیر نه وی، هیڅ خبرتیا به تاسی ونه لیګل شی',
'How you want to be notified.': 'څه رنګه خبرتیا تاسی غواړی چی درته در کړل شی',
'Human Immunodeficiency Virus Refresher': 'ويروس/ ايډز تجددي کورسونه',
'Human Immunodeficiency Virus': 'ويروس/ ايډز',
'Human Resource': 'انسانی منبع',
'Human Resources': 'انسانی منبعی',
'Humanitarian Values Refresher': 'بشري ارزښتونه تجددي کورسونه',
'Humanitarian Values': 'بشري ارزښتونه',
'I agree to the %(terms_of_service)s': 'زه موافق یم د %(terms_of_service)s',
'ICBRR Staff': 'د ICBRR کارمندان',
'ID Number': 'تذکيري شمړه',
'ID Tag Number': 'د تذکری شمیره',
'ID Type': 'د تذکری بڼه',
'ID': 'تذکره',
'IEC Materials': 'د IEC مواد',
'IT Telecom': 'د آی ټی د لری اړیکی',
'Identities': 'شخصیتونه',
'Identity Details': 'د شخصیت معلومات',
'Identity added': 'شخصیت اضافه شوی',
'Identity deleted': 'شخصیت له منځه تللی',
'Identity updated': 'شخصیت ننی شوی',
'Identity': 'شخصیت',
'If a user verifies that they own an Email Address with this domain, the Approver field is used to determine whether & by whom further approval is required.': 'که چیری استعالوونکی پوه شی چی دوی یو ایمیل ادرس لری د دی ځای کی، تصدیق شوی ځای استعمالیږی چی مشخص شی یا او په چا باندی نورو تصدیقونو ته اړه ده',
'If checked, the notification will contain all modified records. If not checked, a notification will be send for each modified record.': 'که ولیدل شو،خبرتیا کیدای شی په خپل ځان کی ټوله تاریخچه ولری. او که لیدل شوی نه وی، خبرتیا کیدای شی ولیږل شی د هر یوه مشخص شوی تاریخچی ته',
'If it is a URL leading to HTML, then this will downloaded.': 'که دا یو URL رهبری کړی HTML ته بیا دا کیدای شی ( download ) شی',
'If neither are defined, then the Default Marker is used.': 'او یو هم مشخص شوی نه وی نو بیا مخکینی ښکارندوی استعالیږی',
'If not found, you can have a new location created.': 'او که پیدا نه شوه، نو تاسی یو نوی ځای جوړولی شی',
'If the location is a geographic area, then state at what level here.': 'که ځای یوه جغرافیاوی ځای وی، نو بیا بیان کړی چی په کوم سطح کی دی',
'If the person counts as essential staff when evacuating all non-essential staff.': 'که یو کس د یوه مهم کارمند په حیث و پیژندل شی کله چی خالی کیږی غیر مهم کسان',
'If there are multiple configs for a person, which should be their default?': 'او که هر کس ته مختلفه تنظیمات موجود وی، چی دا به د دوی مخکینی وی.',
'If this field is populated then a user with the Domain specified will automatically be assigned as a Staff of this Organization': 'او که دا ځای پر نفوس وی نو بیا استعمالوونکی د مشخصه ځای سره په خپل سر تعینیږی منحیث د موسسه کارمند',
'If this record should be restricted then select which role is required to access the record here.': 'که دا تاریخچه محدوده شوی وی نو بیا انتخاب کړی هغه نقش چی اړه وی د لاسرسی ولری تاریخچی ته دلته',
'If this record should be restricted then select which role(s) are permitted to access the record here.': 'که دا تاریخچه محدوده شوی وی نو بیا انتخاب کړی هغه نقش/نقشونه ته اجازه ورکړ شوی لاسرسی ولری تاریخچی ته دلته',
'If you have any questions or need support, please see': 'که تاسی کومه پوښتنه یا د حمایه ضرورت لری، هیله ده وګوری',
'If you would like to help, then please %(sign_up_now)s': 'که تاسی غواړی چی مرسته وکړی، نو هیله ده %(sign_up_now)s',
'Ignore Errors?': 'غلطیانی په نظر کی مه نیسی',
'Image Details': 'د تصویر معلومات',
'Image File(s), one image per page': 'د تصویر سند/سندونه، یو تصویر په هره صفحه کی',
'Image Type': 'د تصویر بڼه',
'Image added': 'تصویر اضافه شوی',
'Image deleted': 'تصویر له منځه تللی',
'Image updated': 'تصویر ننی شوی',
'Image': 'تصویر',
'Images': 'تصویرونه',
'Immediately': 'فورا',
'Immunisation Campaigns': 'مخفوظه کمپین',
'Import Activity Data': 'د فعالیت د معلوماتو واردول',
'Import Activity Type data': 'د فعالیت د بڼه معلوماتو واردول',
'Import Annual Budget data': 'د کلنی بودیجه معلوماتو واردول',
'Import Awards': 'د انعامونو واردول',
'Import Certificates': 'د شهادت نامو واردول',
'Import Community Data': 'د جامعه معلوماتو واردول',
'Import Contacts': 'د اړیکوواردول',
'Import Courses': 'د کورسونو واردول',
'Import Data for Theme Layer': 'واردول د معلومات د موضوع پاڼی لپاره',
'Import Data': 'معلوماتو واردول',
'Import Departments': 'واردول برخه',
'Import Deployments': 'د توسعه واردول',
'Import Facilities': 'د امکاناتو واردول',
'Import Facility Types': 'د امکاناتو د بڼه واردول',
'Import Hazard data': 'د خطر معلومات واردول',
'Import Hazards': 'د خطرونو واردول',
'Import Hours': 'د ساعتونو واردول',
'Import Layers': 'د پاڼو واردول',
'Import Location Data': 'د ځای معلوماتو واردول',
'Import Location data': 'د ځای معلوماتو واردول',
'Import Locations': 'د ځایونو واردول',
'Import Logged Time data': 'ثبت شوی وخت معلوماتو واردول',
'Import Members': 'د عضویتونو واردول',
'Import Membership Types': 'د عضویت د بڼو واردول',
'Import Offices': 'د دفترونو واردول',
'Import Organizations': 'د موسسو واردول',
'Import Participant List': 'د ګډونوال د لست واردول',
'Import Participants': 'د ګډونوالو واردول',
'Import Partner Organizations': 'د موسسو د شریک واردول',
'Import PoI Types': 'د پول د بڼه واردول',
'Import Points of Interest': 'د زړه پوری نکتی واردول',
'Import Policies & Strategies': 'د سیاستونو او ستراتیژی واردول',
'Import Project Organizations': 'د موسسو دپروژیی واردول',
'Import Projects': 'د پروژو واردول',
'Import Red Cross & Red Crescent National Societies': 'د سره چلیپا او سره حلال ملی جامعی واردول',
'Import Resource Types': 'د منبع د بڼه واردول',
'Import Resources': 'منبعی واردول',
'Import Sector data': 'د برخی د معلومات واردول',
'Import Service data': 'د خدمت د معلوماتو واردول',
'Import Services': 'د خدمتونو واردول',
'Import Staff': 'د کارمندانو واردول',
'Import Tasks': 'د دندو واردول',
'Import Theme data': 'د موضوع معلومات واردول',
'Import Training Events': 'د تعلیم د مواقع واردول',
'Import Training Participants': 'د ګډونوالو د تعلیم واردول',
'Import Volunteer Cluster Positions': 'د رضاکار د غونچی د موقف واردول',
'Import Volunteer Cluster Types': 'د رضاکار د غونچی د بڼی واردول',
'Import Volunteer Clusters': 'د رضاکار د غونچو واردول',
'Import Volunteers': 'د رضاکارانو واردول',
'Import from CSV': 'واردول د CSV',
'Import from OpenStreetMap': 'واردول د سرک د خلاصیدو نقشه',
'Import': 'واردول',
'Improved Production Techniques': 'د تولید د پرمختګ تخنیکونه ',
'In error': 'په غلطیانو کی',
'In order to be able to edit OpenStreetMap data from within %(name_short)s, you need to register for an account on the OpenStreetMap server.': 'ددی لپاره چی توانا شو چی تنظیم کړی خلاصه د سرک د نقشه معلومات په جریان %(name_short)s، تاسی اړه لری چی راجستر کړی د یوه حساب په سرک د خلاصیدو په سرور کی',
'InBox': 'دننه د بکس',
'Inactive': 'غیر فعال',
'Incident Categories': 'د واقعه کتگوری',
'Incident Reports': 'د واقعه راپور',
'Incident': 'واقعه',
'Incidents': 'واقعی',
'Incorrect parameters': 'غلطه پارامیتر',
'Infant (0-1)': 'ماشوم ( ۰-۱)',
'Infant and Young Child Feeding': 'ماشوم او د ځوان ماشوم خوراک',
'Inherited?': 'میراث وړل',
'Initials': 'لومړنی',
'Installation of Rainwater Harvesting Systems': 'نصب د باران د اوبو د خرمن سیستم',
'Instructor': 'رهنما',
'Insufficient Privileges': 'د امتیاز کمبود',
'Insufficient vars: Need module, resource, jresource, instance': 'کمبود (vars ): اړه د اندازی، منبع، د j منبع، لحظه',
'Insurance ': 'بیمه',
'Integrity error: record can not be deleted while it is referenced by other records': 'د تمامیت غلطی: تاریخچه نه باید له منځه ولاړ شی کله چی د نورو په واسطه راجع شوی وی.',
'Intergovernmental': 'زیر دولتی',
'Invalid Location!': 'بی اعتباره ځای',
'Invalid Site!': 'بی اعتباره سایت',
'Invalid data: record %(id)s not accessible in table %(table)s': 'بی اعتباره معلومات: تاریخچه %(id)s په چوکات کی په لاسرسی کی نه شته %(table)s',
'Invalid form (re-opened in another window?)': 'بی اعتباره فورم ( بیا خلاصیدل په بل وندو کی)',
'Invalid phone number!': 'بی اعتباره د تیلیفون شمیره',
'Invalid phone number': 'بی اعتباره د تیلیفون شمیره',
'Invalid request': 'بی اعتباره درخواست',
'Invalid source': 'بی اعتباره منبع',
'Inventory Items': 'د دارایی دفتر وسایل',
'Inventory': 'د دارایی دفتر',
'Irrigation and Watershed Management': 'د اوبو د لګونی مدیریت',
'Is editing level L%d locations allowed?': 'ایا د تنظیم سطح L%d اجازه د ځای',
'Is this a strict hierarchy?': 'ایا دا یوه د جدی د سلسله د مراتبو ځای ده',
'Issuing Authority': 'د موضوع قدرت',
'It captures not only the places where they are active, but also captures information on the range of projects they are providing in each area.': 'دی یوازی ځای یی نه دی نیولی چیرته چی دوی فعال دی بلکه معلومات یی د پروژی په ساحه کی چی مهیا کړی یی دی په هر ځای کی ',
'Item Categories': 'وسایل چی کتګوری شوی',
'Items': 'جنسونه',
'JNAP Priorities': 'د JNAP اولویتونه',
'JNAP-1: Strategic Area 1: Governance': 'د JNAP- 1: ستراتیژی ځای ۱: دولتی',
'JNAP-2: Strategic Area 2: Monitoring': 'د JNAP-۲: ستراتیژی ځای 2: لیدل',
'JNAP-3: Strategic Area 3: Disaster Management': 'د JNAP- ۳: ستراتیژی ځای 3: د مصیبت مدیریت',
'JNAP-4: Strategic Area 4: Risk Reduction and Climate Change Adaptation': 'د JNAP-۴: ستراتیژی ځای۴ : د خطر کمول او د اقلیم د بدلون توافق',
'JS Layer': 'د JS پاڼه',
'Jewish': 'یهودی',
'Job Schedule': 'د دندی تقسیم اوقات',
'Job Title Catalog': 'د دندی د عنوان فهرست',
'Job Title Details': 'د دندی د عنوان معلومات',
'Job Title added': 'د دندی عنوان اضافه شوی',
'Job Title deleted': 'د دندی عنوان له منځه تللی',
'Job Title updated': 'د دندی عنوان ننی شوی',
'Job Title': 'د دندی عنوان',
'Job added': 'دنده اضافه شوی',
'Job deleted': 'دنده له منځه تللی',
'Job updated': 'دنده ننی شوی',
'Job': 'وظیفه',
'Joint National Action Plan for Disaster Risk Management and Climate Change Adaptation. Applicable to Cook Islands only': 'یوزای ملی د عمل پلان د د مصیبت د خطر مدیریت او د اقلیم د بدلون توافق. قابلیت د انجام د کوک ( cook ) د جزیری لپاره',
'Journal Entry Details': 'د هری ورځ دفتر د دخول معلومات',
'Journal entry added': 'د هری ورځی د دفتر دخول اضافه شوی',
'Journal entry deleted': 'د هری ورځی د دفتر دخول له منځه تللی',
'Journal entry updated': 'د هری ورځی د دفتر دخول ننی شوی',
'Journal': 'د هری ورځ دفتر',
'KML Layer': 'د KML پاڼه',
'Keep Duplicate': 'دوهمه کاپی یی وساتی',
'Keep Original': 'اصلی یی وساتی',
'Key Value pairs': 'جوړه د کلید ارزښت',
'Key': 'کلی',
'Keyword Added': 'مهمی کلیمی اضافه شوی',
'Keyword Deleted': 'مهمی کلیمی له منځه تللی',
'Keyword Updated': 'مهمی کلیمی ننی شوی',
'Keyword': 'مهمی کلیمی',
'Keywords': 'مهمی کلیمی',
'Knowledge Management': 'د علم مدیریت',
'Language': 'ژبه ',
'Last Checked': 'ورستی لیدل شوی',
'Last Contacted': 'ورستی اړیکه',
'Last Name': 'تخلص',
'Last known location': 'ورستی ځای وپیژندل شو',
'Last run': 'ورستی منډه',
'Last status': 'ورستی حالت',
'Last updated': 'ورستی ننی شوی',
'Last': 'ورستی',
'Latitude & Longitude': 'جغرافیاوی وسعت او جغرافیاوی طول',
'Latitude and Longitude are required': 'جغرافیاوی وسعت او جغرافیاوی طول ته اړه ده',
'Latitude is Invalid!': 'جغرافیاوی وسعت بی اعتباره ده',
'Latitude is North - South (Up-Down).': 'جغرافیاوی وسعت شمال دی - جنوب ( جګ - ښکته)',
'Latitude is North-South (Up-Down).': 'جغرافیاوی وسعت شمال دی - جنوب ( جګ - ښکته)',
'Latitude is zero on the equator and positive in the northern hemisphere and negative in the southern hemisphere.': 'جغرافیاوی وسعت صفر دی د استوا په خط کی او مثبت دی په شمالی نیم کره کی او منفی ده په جنوبی د نیم کره کی',
'Latitude must be between -90 and 90.': 'جغرافیاوی وسعت باید د -۹۰ نه تر ۹۰ پوری وی',
'Latitude of Map Center': 'جغرافیاوی وسعت د نقشی مرکز',
'Latitude of far northern end of the region of interest.': 'جغرافیاوی وسعت د شمالی ورستی د د زړه پوری ساحه کی',
'Latitude of far southern end of the region of interest.': 'جغرافیاوی وسعت د جنوبی وروستی لری د زړه پوری ساحه',
'Latitude %(lat)s is invalid, should be between %(lat_min)s & %(lat_max)s': '%(lat_max)s & %(lat_min)s باید میان, غیر معتبر است %(lat)s عرض جغرافیایی',
'Latitude': 'جغرافیاوی وسعت',
'Latrine Construction': 'د اوبو د لویدو جوړونه',
'Layer Details': 'د پاڼی معلومات',
'Layer Name': 'د پاڼی نوم',
'Layer Properties': 'د پاڼی تنظیمات',
'Layer added': 'پاڼه اضافه شوی',
'Layer deleted': 'پاڼه له منځه تللی',
'Layer has been Disabled': 'پاڼه غیر فعاله شوی',
'Layer has been Enabled': 'پاڼی فعال شوی',
'Layer removed from Symbology': 'پاڼی د نماد څخه له منځه وړل شو',
'Layer updated': 'پاڼی ننی شوی',
'Layer': 'پاڼه',
'Layers': 'پاڼی',
'Lead Implementer for this project is already set, please choose another role.': 'رهبری د آلاتو ددی پروژی مخکی له مخکی تعین شوی، هیله ده یو بل نقش انتخاب کړی',
'Lead Implementer': 'رهبری د آلاتو',
'Leader': 'رهبری',
'Left-side is fully transparent (0), right-side is opaque (1.0).': 'چپ خوا بشپړه شفافه ده (0) ، ښی خوا غیر شفاف دی (۱.۰ )',
'Legal Approvals': 'قانونی تایید',
'Legend URL': 'د URL فهرست',
'Legend': 'فهرست',
'Less Options': 'لگ اختیارونه',
'Level of Award': 'د انعام سطح',
'Level of competency this person has with this skill.': 'د صلاحیت سطح یعنی دا کس دا مهارت لری',
'Level': 'سطح',
'License Number': 'د جواز شمیره',
'Link to this result': 'ارتباط ورکړی دی نتیجی ته',
'List %(site_label)s Status': 'لست %(site_label)s حالت',
'List Activities': 'د فعالیتون لست',
'List Activity Organizations': 'د موسسه د فعالیتونو لست',
'List Activity Types': 'د فعالیت د بڼی لست',
'List Addresses': 'اخیر ادرسونه',
'List Affiliations': 'د اتحاد لست',
'List All Community Contacts': 'لست کړی ټول د جامعه اړیکی',
'List All': 'ټول لست کړی',
'List Annual Budgets': 'لست کړی کلنی بودیجه',
'List Awards': 'انعام لست کړی',
'List Beneficiaries': 'فایدی لست کړی ',
'List Beneficiary Types': 'لست کړی د فایدی بڼه',
'List Branch Organizations': 'د موسسه د څانګی لست',
'List Campaigns': 'کمپینونه لست کړی',
'List Certificates': 'شهادت نامی لست کړی',
'List Certifications': 'شهادتونه لست کړی',
'List Clusters': 'خوشی/ګروپونه لست کړی',
'List Coalitions': 'اتحادیه لست کړی',
'List Communities': 'جامعی لست کړی',
'List Competency Ratings': 'د صلاحیت د اندازی لست',
'List Contact Information': 'د اړیکی د معلوماتو لست',
'List Contacts': 'د اړیکو لست',
'List Course Certificates': 'د کورس د شهادت نامی لست ',
'List Courses': 'د کورس لست',
'List Credentials': 'د اعتبار نامی لست',
'List Data in Theme Layer': 'معلومات لست کړی د موضوع په پاڼی کی',
'List Departments': 'د برخی لست',
'List Deployments': 'د توسعی لست',
'List Donors': 'بښوونکی لست کړی',
'List Education Details': 'د تعلیم د معلوماتو لست',
'List Facilities': 'د امکاناتو لست',
'List Facility Types': 'د امکاناتو د بڼی لست',
'List Feature Layers': 'د شکل د پاڼی لست',
'List Groups': 'د ګروپ لست',
'List Hazards': 'خطر لست',
'List Hours': 'د ساعتونو لست',
'List Identities': 'د پیژندګلنی لست',
'List Images': 'د تصویر لست',
'List Job Titles': 'د دندو د عنوانونو لست',
'List Jobs': 'دندی لست کړی',
'List Keywords': 'مهمی تڼی لست کړی',
'List Layers in Profile': 'پاڼی په منظزو کی لست کړی',
'List Layers in Symbology': 'پاڼی په نماد کی لست کړی',
'List Layers': 'پاڼی لست کړی',
'List Location Hierarchies': 'سلسله د مراتبو د ځای لست',
'List Locations': 'ځایونه لست کړی',
'List Log Entries': 'د داخلیدو د ثبت لست',
'List Logged Time': 'د داخلیدو د وخت لست',
'List Mailing Lists': 'د برقی لیک لست',
'List Map Profiles': 'د نقشی د تنظیم لست',
'List Markers': 'د ښکارونکی لست ',
'List Members': 'د اعضاوو لست',
'List Membership Types': 'د عضویت د بڼی لست',
'List Memberships': 'عصویتونه لست کړی',
'List Milestones': 'لست کړی مهمه مرحله',
'List Networks': 'شبکه لست کړی',
'List Office Types': 'د دفتر د بڼی لست',
'List Offices': 'دفترونه لست کړی',
'List Organization Types': 'د موسسه بڼه لست کړی',
'List Organizations': 'لست د موسسه',
'List Outputs': 'لست کړی د باندی تګ',
'List Participants': 'ګډون وال لست کړی',
'List Partner Organizations': 'د موسسه شریک لست کړی',
'List Persons': 'کسان لست کړی',
'List PoI Types': 'د پول بڼه لست کړی',
'List Points of Interest': 'زړه پوری نقطه لست کړی',
'List Policies & Strategies': 'لست کړی سیاستونه او ستراتیژیانی',
'List Profiles configured for this Layer': 'لست کړی د منظره تنظیم ددی پاڼی لپاره',
'List Programs': 'لست کړی پروګرامونه',
'List Project Organizations': 'د پروژی موسسی لست کړی',
'List Projections': 'لست کړی طرحی',
'List Projects': 'پروژی لست کړی',
'List Records': 'تاریخچی لست کړی',
'List Red Cross & Red Crescent National Societies': 'لست کړی سره د چلیپا علامه او سره د میاشتی د ملی جامیعو علامی',
'List Regions': 'ځایونه لست کړی',
'List Response Summaries': 'د خلاصی د ځواب لست',
'List Roles': 'نقشونه لست کړی',
'List Rooms': 'کوټی لست کړی',
'List Sectors': 'سکتورونه لست کړی',
'List Services': 'خدمتونه لست کړی',
'List Skill Equivalences': 'د مهارتو د معادل لست',
'List Skill Types': 'د مهارت د بڼه لست',
'List Skills': 'د مهارتونو لست',
'List Staff & Volunteers': 'کارمندان او داوطلبان لست',
'List Staff Assignments': 'د کارمندانو د دندی لست',
'List Staff Members': 'د کارمندانو د عضویت لست',
'List Statuses': 'د حالتونه لست',
'List Symbologies for Layer': 'نمادونه د پاڼی لپاره لست کړی',
'List Symbologies': 'د نمادونو لست',
'List Tasks': 'دندی لست کړی',
'List Teams': 'ګروپونه لست کړی',
'List Themes': 'موضوع ګانی لست کړی',
'List Training Events': 'د تعلیم واقعی لست کړی',
'List Trainings': 'تعلیمونه لست کړی',
'List Volunteer Cluster Positions': 'د رضاکار د ګروپ موقف لست کړی',
'List Volunteer Cluster Types': 'د رضاکار د ګروپ بڼه لست کړی',
'List Volunteer Clusters': 'د رضاکار ګروپونه لست کړی',
'List Volunteer Roles': 'د رضاکار نقشونه لست کړی',
'List Volunteers': 'رضاکاران لست کړی',
'List of Appraisals': 'ارزیابیانی لست کړی',
'List of Facilities': 'د امکاناتو لستونه',
'List of Professional Experience': 'د مسلکی تجربی لست',
'List of Roles': 'د نقشونو لست',
'List saved searches': 'زیرمه شوی پلټنی لست کړی',
'List': 'لست',
'Live Help': 'ژوندی مرسته',
'Livelihood / CTP': 'معاش / CTP',
'Livelihood Manager': 'د منیجر معاش',
'Livelihoods': 'معاشونه',
'Load Cleaned Data into Database': 'ډک کړه پاک شوی معلومات به دیتابیس کی',
'Load Raw File into Grid': 'ډک کړه هغه سند چی زیرمه شوی نه دی په شبکه کی',
'Load': 'ډکول',
'Loading': 'ډکول',
'Local Currency': 'محلی اسعار',
'Local Name': 'محلی نوم',
'Local Names': 'محلی نومونه',
'Location Added': 'ځای اضافه شوی',
'Location Deleted': 'ځای له منځه تللی ',
'Location Details': 'د ځای معلومات',
'Location Found': 'ځای پیدا شو',
'Location Group': 'د ځای ګروپ',
'Location Hierarchies': 'سلسله دمراتبو ځای',
'Location Hierarchy Level 1 Name': 'د سلسله د مراتبو د لومړی سطح نوم',
'Location Hierarchy Level 2 Name': 'د سلسله د مراتبو د دوهم سطح نوم',
'Location Hierarchy Level 3 Name': 'د سلسله د مراتبو د دریم سطح نوم',
'Location Hierarchy Level 4 Name': 'د سلسله د مراتبو د څلورم سطح نوم',
'Location Hierarchy Level 5 Name': 'د سلسله د مراتبو د پنځم سطح نوم',
'Location Hierarchy added': 'سلسله دمراتبو ځای اضافه شوی',
'Location Hierarchy deleted': 'سلسله دمراتبو ځای له منځه تللی ',
'Location Hierarchy updated': 'سلسله دمراتبو ځای ننی شوی',
'Location Hierarchy': 'سلسله دمراتبو ځای',
'Location NOT Found': 'ځای پیدا نشو',
'Location added to Organization': 'ځای اضافه شوی موسسه ته',
'Location added': 'ځای اضافه شوی',
'Location deleted': 'ځای له منځه تللی ',
'Location is Required!': 'ځای ته اړه ده',
'Location needs to have WKT!': 'ځای اړه لری چی WKT ولری',
'Location removed from Organization': 'ځای د موسسه څخه له منځه تللی',
'Location updated': 'ځای ننی شوی',
'Location': 'ځای',
'Locations of this level need to have a parent of level': 'د دی ځای سطح اړه لری چی د منشا سطح ولری',
'Locations': 'ځایونه',
'Log Entry Deleted': 'د داخلیدو ثبت له منځه تللی',
'Log Entry Details': 'د داخلیدو ثبت معلومات',
'Log New Time': 'د ثبت نوی وخت',
'Log Time Spent': 'ثبت د ضایع شوی وخت',
'Log entry added': 'د داخلیدو ثبت اضافه شوی ',
'Log entry updated': 'د داخلیدو ثبت ننی شوی',
'Logged Time Details': 'د ثبت د وخت معلومات',
'Logged Time': 'د ثبت وخت',
'Login using Facebook account': 'داخلیدل د فیس بوک حساب ته',
'Login using Google account': 'داخلیدل د ګوګل حساب ته',
'Login with Facebook': 'داخلیدل فیس بوک سره',
'Login with Google': 'داخلیدل ګوګل سره',
'Login': 'سیستم ته داخلیدل',
'Logistics & Warehouses': 'لوجستیک او انبارونه',
'Logo of the organization. This should be a png or jpeg file and it should be no larger than 400x400': 'د موسسه لوګو. دا باید png او یا jpeg سند وی او باید د 400x400 نه لوی نه وی',
'Logo': 'لوګو',
'Logout': 'وټل',
'Long Name': 'اوږد نوم',
'Longitude is Invalid!': 'طول د جغرافیاوی په لاس کی نشته',
'Longitude is West - East (sideways).': 'طول د جغرافیاوی غرب دی - شرق (یو طرفه)',
'Longitude is zero on the prime meridian (through Greenwich, United Kingdom) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas.': 'طول د جغرافیاوی صفر دی په نیمه ورز ( ګرین ویچ، انګلستان) او مثبت دی په شرق کی، سر تا سره د اروپا او اسیا. طول د جغرافیاوی منفی ده په شرق کی، سرتاسره د اتلانتیک او امریکا کی',
'Longitude must be between -180 and 180.': 'طول د جغرافیاوی باید د -۱۸۰ څخه تر ۱۸۰ تر منځ اوسیږی',
'Longitude of Map Center': 'د طول د جغرافیاوی د مرکز نقشه',
'Longitude of far eastern end of the region of interest.': 'طول د جغرافیاوی د لری شرقی په اخر د زړه پوری ځای کی',
'Longitude of far western end of the region of interest.': 'طول د جغرافیاوی د لری غربی په اخر د زړه پوری ځای کی',
'Longitude %(lon)s is invalid, should be between %(lon_min)s & %(lon_max)s': '%(lon_max)s & %(lon_min)s باید میان, غیر معتبر است %(lon)s طول جغرافیایی',
'Longitude': 'طول د جغرافیاوی',
'Lost Password': 'پاسورد ورک شوی',
'Lost': 'له لاسه تللی',
'Low': 'ښکته',
'MGRS Layer': 'د MGRS پاڼه',
'Mailing List Details': 'د لیږول د لست معلومات',
'Mailing List Name': 'د لیږول د لست نوم',
'Mailing Lists': 'د لیږولو لستونه',
'Mailing list added': 'د لیږولو لست اضافه شوی',
'Mailing list deleted': 'د لیږلو لست له منځه تللی',
'Mailing list updated': 'د لیږلو لست ننی شوی',
'Mailing list': 'د لیږولو لست ',
'Main Duties': 'مهمی دندی',
'Main?': 'اساسی؟',
'Mainstreaming DRR': 'اساسی ویاله DRR',
'Major': 'لوی',
'Manage Layers in Catalog': 'د پاڼو ترتیب په فهرست کی',
'Manage Your Facilities': 'خپل امکانات ترتیب کړی',
'Managing material and human resources together to better prepare for future hazards and vulnerabilities.': 'ترتیبول د مواد او د منابع بشری سره یو ځای د ښه اماده شی د راتلوونکی خطراتو لپاره',
'Mandatory. In GeoServer, this is the Layer Name. Within the WFS getCapabilities, this is the FeatureType Name part after the colon(:).': 'په زور: د جغرافیاوی سرور، دا د پاڼی نوم ده. د WFS له طریقه توانایی، دا د شکل د بڼی د نوم د برخی د دوو نقطو شاته راځی.',
'Mandatory. The base URL to access the service. e.g. http://host.domain/geoserver/wfs?': 'په زور: د URL اساس ته د لاسرسی خدمت. لکه. http://host.domain/geoserver/wfs?',
'Mandatory. The base URL to access the service. e.g. http://host.domain/geoserver/wms?': 'په زور: د URL اساس ته د لاسرسی خدمت. لکه. http://host.domain/geoserver/wms?',
'Map Center Latitude': 'د جغرافیاوی د عرض د مرکز نقشه',
'Map Center Longitude': 'طول د جغرافیاوی د نقشی مرکز',
'Map Profile added': 'نقشه د تنظیمات اضافه شوی ',
'Map Profile deleted': 'نقشه د تنظیمات له منځه تللی',
'Map Profile updated': 'نقشه د تنظیمات ننی شوی',
'Map Profile': 'د نقشه تنظیمات',
'Map Profiles': 'د نقشی تنظیمات',
'Map Settings': 'د نقشه تنظیمات',
'Map Viewing Client': 'د نقشه د ښکاریدو مشتری',
'Map Zoom': 'د نقشه نزدیوالی',
'Map has been copied and set as Default': 'کاپی نقشه شوی او په قراردادی توګه تعین شوی',
'Map has been set as Default': 'نقشه په قراردادی توګه تعین شوی',
'Map is already your Default': 'نقشه مخکی د مخکی ستاسو قراردادی دی',
'Map not available: Cannot write projection file - %s': 'نقشه په لاس کی نشته: د پروژی سند نشی لیکلی - %s',
'Map not available: No Projection configured': 'نقشه په لاس کی نشته: هیڅ پروژه تنظیم شوی نه ده',
'Map not available: Projection %(projection)s not supported - please add definition to %(path)s': 'نقشه په لاس کی نشته: پروژه %(projection)s حمایه شوی نه دی- هیله ده تعریف اضافه شی %(path)s',
'Map of Communities': 'د جامعو نقشی',
'Map of Facilities': 'د نقشی امکانات',
'Map of Offices': 'د دفترونو نقشی',
'Map of Projects': 'د پروژو نقشه',
'Map of Resources': 'د منابعو نقشه',
'Map': 'نقشه',
'Marital Status': 'مدنی حالت',
'Mark as duplicate': 'نښانی یی کړی دوه دانیی',
'Marker Details': 'د ښکاروندوی معلومات',
'Marker added': 'ښکاروندوی اضافه شوی ',
'Marker deleted': 'ښکاروندوی له منځه تللی ',
'Marker updated': 'ښکاروندی ننی شوی',
'Marker': 'ښکارندوی',
'Markers': 'ښکاروندی ',
'Markets/Marketing Analysis, Linkages and Support': 'بازارونه/ د بازار تجزیه او تحلیل، یوزای والی او حمایه',
'Master degree': 'ماسټر',
'Matching Records': 'وصل کول د تاریخچی',
'Max': 'ډیر',
'Maximum Extent': 'ډیر ترینه توسعه',
'Maximum Location Latitude': 'ډیر ترینه د جغرافیاوی د عرض ځای ',
'Maximum Location Longitude': 'ډیر ترینه د جغرافیاوی د طول ځای',
'Maximum must be greater than minimum': 'ډیر ترین برخه باید د کمترین څخه ډیر وی',
'Measure Area: Click the points around the polygon & end with a double-click': 'اندازه کړی ساحه: کلیک کړه په څلور اطرافو کی د څو ضلعی او ختم یی کړی د دوو کلیکونو سره',
'Measure Length: Click the points along the path & end with a double-click': 'اوږد والی اندازه کړی: کلیک کړه په نقاطو په امتداد د مسیر او په دووو کلیکو سره یی ختم کړی',
'Medical Conditions': 'طبی حالات',
'Medical Supplies and Equipment': 'د طبی وسایلو عرضه',
'Medium': 'منځنی ',
'Member Details': 'د عضو معلومات',
'Member ID': 'د عضو تذکره',
'Member Organizations': 'د موسسی عضو',
'Member added': 'عضو اضافه شوی',
'Member deleted': 'عضو له منځه تللی',
'Member updated': 'عضو ننی شوی',
'Member': 'عضو',
'Members': 'عضوی',
'Membership Details': 'د عضویت معلومات',
'Membership Fee': 'د عضویت فیس',
'Membership Paid': 'د عضویت ورکړل شوی',
'Membership Type Details': 'د عضویت د بڼه معلومات',
'Membership Type added': 'د عضویت بڼه اضافه شوی',
'Membership Type deleted': 'د عضویت بڼه له منځه تللی',
'Membership Type updated': 'د عضویت بڼه ننی شوی',
'Membership Types': 'د عضویت بڼی',
'Membership added': 'عضویت اضافه شوی',
'Membership deleted': 'عضویت له منځه تللی',
'Membership updated': 'عضویت ننی شوی',
'Membership': 'عضویت',
'Memberships': 'عضویتونه',
'Menu': 'فهرست د انتخاب',
'Merge records': 'د یو ځای کولو تاریخچه',
'Merge': 'یو ځای کول',
'Message': 'پیغام',
'Method disabled': 'تګ لاره غیر فعالول',
'Middle Name': 'منځنی نوم',
'Milestone Added': 'مهمه مرحله اضافه شوی ',
'Milestone Deleted': 'مهمه مرحله له منځه تللی',
'Milestone Details': 'د مهمه مرحله معلومات',
'Milestone Updated': 'مهمه مرحله ننی شوی',
'Milestone': 'مهمه مرحله',
'Milestones': 'مهمی مرحلی',
'Military': 'نظامی',
'Min': 'کمترین',
'Minimum Location Latitude': 'کمترین عرض د جغرافیاوی ځای',
'Minimum Location Longitude': 'کمترین طول د جغرافیاوی ځای',
'Minute': 'دقیقه',
'Minutes must be a number.': 'دقیقی باید شمیری وی',
'Minutes must be less than 60.': 'دقیقی باید د ۶۰ څخه کم وی',
'Missing': 'له لاسه تللی',
'Mission': 'هدف',
'Missions': 'هدفونه',
'Mobile Health Units': 'د صحی برخی موبایل',
'Mobile Phone Number': 'د موبایل تلیفون شمیره',
'Mobile Phone': 'د موبایل تلیفون',
'Mobile': 'موبایل',
'Modify Feature: Select the feature you wish to deform & then Drag one of the dots to deform the feature in your chosen manner': 'مشخص کړی شکل: انتخاب کړی شکل چی غواړی شکل یی خراب کړی او بیا یوه نقطه کش کړی چی شکل په خپله زړه خراب کړی',
'Monitoring and Evaluation': 'لیدل او ارزیابی کول',
'Month': 'میاشت',
'Monthly Membership Fee': 'د حق العضویت مياشتني ورکړل شوې پیسی',
'Monthly': 'میاشتنی',
'More Info': 'نور معلومات',
'More Options': 'نور اختیارونه',
'Morgue': 'د مړو ځای',
'Moustache': 'بریت',
'Move Feature: Drag feature to desired location': 'حرکت د شکل: د کش کړی شکل په کوم ځای کی چی ړزه مو غواړی',
'Multiple': 'ضرب',
'Muslim': 'مسلمان',
'Must a location have a parent location?': 'یو ځای باید د منشا ځای ولری',
'My Logged Hours': 'زما د دننه شوی ساعتونه',
'My Maps': 'زما نقشی',
'My Open Tasks': 'زما خلاصه شوی دنده',
'My Profile': 'زما منظره',
'My Tasks': 'زما دنده',
'NDRT (National Disaster Response Teams)': 'د NDRT ( ملی د مصیبت د ځواب ګروپونه)',
'NGO': 'NGO',
'NONE': 'هیڅ يو',
'NOT %s AND NOT %s': 'NOT %s AND NOT %s',
'NOT %s OR NOT %s': 'NOT %s OR NOT %s',
'Name and/or ID': 'نوم او / یا تذکره',
'Name field is required!': 'د ځای نوم ته اړه ده',
'Name of Award': 'د انعام نوم',
'Name of Father': 'د پلار نوم',
'Name of Institute': 'د بنیاد نوم',
'Name of Map': 'د نقشی نوم',
'Name of Mother': 'د مور نوم',
'Name of a programme or another project which this project is implemented as part of': 'د پروګرام نوم او یا د بلی پروژی چی دا پروژه د هغه یوه برخه اجرا کوی',
'Name of the person in local language and script (optional).': 'د کس نوم په محلی ژبه کی او متن کی (اختیاری)',
'Name': 'نوم',
'National ID Card': 'ملی د تذکره',
'National Societies': 'ملی جامعی',
'National Society / Branch': 'ملی ټولنه / څانګه',
'National Society Details': 'د ملی ټولنه معلومات',
'National Society added': 'ملی ټولنه اضافه شوی ',
'National Society deleted': 'ملی ټولنه له منځه تللی',
'National Society updated': 'ملی ټولنه ننی شوی',
'National Society': 'ملی ټولنه ',
'National': 'ملی',
'Nationality of the person.': 'ملیت د شخص',
'Nationality': 'ملیت ',
'Needs': 'اړی ',
'Network Details': 'د شبکی معلومات',
'Network added': 'شبکه اضافه شوی ده',
'Network removed': 'شبکه له منځه تللی ده',
'Network updated': 'شبکه ننی شوی',
'Networks': 'شبکی',
'Never': 'هیڅ',
'New Annual Budget created': 'نوی کلنی بودیجه پیدا شوی',
'New Deployment': 'نوی وسعت',
'New Entry': 'نوی ثبت شوی ',
'New Hazard': 'نوی خطر',
'New Location': 'نوی ځای',
'New Organization': 'نوی موسسه',
'New Post': 'نوی پست شوی',
'New Records': 'نوی تاریخچی ',
'New Role': 'نوی نقش',
'New Sector': 'نوی سکتور',
'New Service': 'نوی خدمت',
'New Theme': 'نوی موضوع',
'New updates are available.': 'نوی ننی مهیا دی',
'New': 'نوی',
'News': 'خبرونه',
'Next View': 'بله منظره',
'Next run': 'بله منډه',
'Next': 'بل',
'No Activities Found': 'هیڅ فعالیت پیدا شوی نه دی',
'No Activity Organizations Found': 'هیڅ موسسه فعالیت پیدا شوی نه ده',
'No Activity Types Found': 'هیڅ د فعالیت بڼه پیدا شوی نه ده',
'No Activity Types found for this Activity': 'هیڅ د فعالیت بڼه پیدا شوی نه ده ددی فعالیت لپاره',
'No Activity Types found for this Project Location': 'هیڅ د فعالیت بڼه پیدا شوی نه ده د دی پروژی ځای له پاره',
'No Affiliations defined': 'هیڅ اتحاد مشخصه شوی نه ده',
'No Appraisals found': 'هیڅ ارزیابیانی پیدا شوی نه دی',
'No Awards found': 'هیڅ انعامونه پیدا شوی نه دی',
'No Base Layer': 'هیڅ د اساس پاڼه نشته',
'No Beneficiaries Found': 'هیڅ فایدی پیدا شوی نه دی',
'No Beneficiary Types Found': 'هیڅ د فایدی بڼه پیدا نه شوه',
'No Branch Organizations currently registered': 'هیڅ د موسسه څانګه تر اوسه راجستر شوی نه ده',
'No Campaigns Found': 'هیڅ کمپینونه پیدا شوی نه دی',
'No Clusters currently registered': 'هیڅ ګروپونه تر اوسه راجستر شوی نه دی',
'No Coalitions currently recorded': 'هیڅ اتحاد تر اوسه زیرمه شوی نه ده',
'No Communities Found': 'هیڅ جامعی پیداشوی نه دی',
'No Contacts Found': 'هیڅ اړیکه پیدا شوی نه ده',
'No Contacts currently registered': 'هیڅ د اړیکه تر اوسه راجستر شوی نه ده',
'No Data currently defined for this Theme Layer': 'هیڅ معلومات تر اوسه مشخصه شوی نه ده د دی موضوع پاڼی لپاره',
'No Deployments currently registered': 'هیڅ وسعت تر اوسه راجستر شوی نه ده',
'No Donors currently registered': 'هیڅ بښونکی تر اوسه راجستر شوی نه ده',
'No Facilities currently registered': 'هیڅ امکانات تر اوسه راجستر شوی نه ده',
'No Facility Types currently registered': 'هیڅ د امکاناتو بڼه تر اوسه راجستر شوی نه ده',
'No Feature Layers currently defined': 'هیڅ مخصوصه پاڼه تر اوسه مشخصه شوی نه ده',
'No Groups currently registered': 'هیڅ ګروپ تر اوسه راجستر شوی نه ده',
'No Hazards currently registered': 'هیڅ خطر تر اوسه راجستر شوی نه ده',
'No Hazards found for this Project': 'هیڅ خطر ددی پروژی لپاره پیدا شوی نه دی',
'No Identities currently registered': 'هیڅ پیژندګلنی راجستر شوی نه دی',
'No Images currently registered': 'هیڅ تصویر اوس ندی راجستر شوی',
'No Keywords Found': 'هیڅ اساسی کلیمی پیدا نه شوی',
'No Layers currently configured in this Profile': 'هیڅ پاڼی تر اوسه تنظیم شوی نه دی په دی منظره کی',
'No Layers currently defined in this Symbology': 'هیڅ پاڼی ترا اوسه مشخص شوی نه دی په دی نماد کی',
'No Layers currently defined': 'هیڅ پاڼی تر اوسه مشخص شوی نه دی',
'No Location Hierarchies currently defined': 'هیڅ سلسله د مراتبو ځای تر اوسه مشخص شوی نه دی',
'No Locations Found': 'هیڅ ځایونه پیداشوی نه دی',
'No Locations currently available': 'هیڅ ځای تر اوسه اعتبار نه لری',
'No Locations found for this Organization': 'هیڅ ځای تر اوسه پیدا شوی نه ده ددی موسسه لپاره',
'No Mailing List currently established': 'هیڅ د لیک لست تر اوسه تاسیس شوی نه دی',
'No Map Profiles currently defined': 'هیڅ د نقشی تنظیمات تر اوسه مشخص شوی نه دی',
'No Markers currently available': 'هیڅ ښکارندوی تر اوسه قابلیت د اعتبار نه دی',
'No Matching Records': 'هیڅ د وصلیدو راپور نشته',
'No Members currently registered': 'اعضاوی تر اوسه راجستر شوی نه دی',
'No Memberships currently registered': 'عضویتونه تر اوسه راجستر شوی نه دی',
'No Milestones Found': 'مهمه مرحلی پیدا شوی نه دی',
'No Networks currently recorded': 'شبکی تر اوسه زیرمه شوی نه دی',
'No Office Types currently registered': 'د دفتر بڼی تر اوسه راجستر شوی نه دی',
'No Offices currently registered': 'دفتر تر اوسه راجستر شوی نه ده',
'No Open Tasks for %(project)s': 'هیڅ خلاص شوی دنده د %(project)s',
'No Organization Types currently registered': 'هیڅ د موسسه بڼه تر اوسه راجستر شوی نه ده',
'No Organizations currently registered': 'هیڅ موسسی تر اوسه راجستر شوی نه دی',
'No Organizations for Project(s)': 'هیڅ موسسی تر اوسه د پروژی نشته',
'No Organizations found for this Policy/Strategy': 'هیڅ موسسی پیدا شوی نه دی د دی سیاست / ستراتیژی لپاره',
'No Partner Organizations currently registered': 'هیڅ د موسسه شریک تر اوسه راجستر شوی نه دی',
'No Persons currently registered': 'هیڅ کس تر اوسه راجستر شوی نه دی',
'No PoI Types currently available': 'هیڅ د پول بڼه تر اوسه قابلیت د اعتبار نه دی',
'No PoIs available.': 'هیڅ د پولونه په لاسرسی کی نشته',
'No Points of Interest currently available': 'هیڅ د زړه پوری نقطی تر اوسه په لاسرسی که نشته',
'No Policies or Strategies found': 'هیڅ سیاست او یا ستراتیژی پیدا شوی نه دی',
'No Presence Log Entries currently registered': 'هیڅ د داخلیدو ثبت تر اوسه راجستر شوی نه دی',
'No Professional Experience found': 'هیڅ مسلکی تجربه پیدا شوی نه ده',
'No Profiles currently have Configurations for this Layer': 'هیڅ منظره تر اوسه تنظیمات ددی پاڼی لپاره نشته',
'No Projections currently defined': 'هیڅ طرح تر اوسه مشخص شوی نه ده',
'No Projects currently registered': 'هیڅ پروژه تر اوسه راجسترشوی نه ده',
'No Ratings for Skill Type': 'هیڅ اندازه د مهارت بڼی لپاره نشته ',
'No Records currently available': 'هیڅ تاریخچه تر اوسه په لاسرسی کی نشته',
'No Red Cross & Red Crescent National Societies currently registered': 'هیڅ سره د چلیپا او د سره صلیب ملی ټولنه تر اوسه راجستر شوی نه ده',
'No Regions currently registered': 'هیڅ ساحی تر اوسه راجستر شوی نه دی',
'No Resource Types defined': 'هیڅ د منبع بڼه مشخصه شوی نه ده',
'No Resources in Inventory': 'هیڅ منبعی د فهرست نشته',
'No Response Summaries Found': 'ځواب د خلاصی پیدا شوی نه دی',
'No Response': 'ځواب نشته',
'No Restrictions': 'محدودیت نشته',
'No Roles defined': 'هیڅ نقشونه مشخص شوی نه دی',
'No Rooms currently registered': 'هیڅ کوټی تر اوسه راجستر شوی نه دی',
'No Search saved': 'هیڅ پلټنی زیرمه شوی نه دی',
'No Sectors currently registered': 'هیڅ سکتور تر اوسه راجستر شوی نه دی',
'No Sectors found for this Organization': 'هیڅ سکتورونه پیدا شوی نه دی ددی موسسه لپاره',
'No Sectors found for this Project': 'هیڅ سکتور پیدا شوی نه دی ددی پروژی لپاره',
'No Sectors found for this Theme': 'هیڅ سکتور پیدا شوی نه دی ددی موضوع لپاره',
'No Services currently registered': 'هیڅ خدمتونه تر اوسه راجستر شوی نه دی',
'No Services found for this Organization': 'هیڅ خدمتونه تر اوسه پیدا شوی نه دی ددی موسسه لپاره',
'No Staff currently registered': 'هیڅ کارمندان تر اوسه راجستر شوی نه دی',
'No Statuses currently registered': 'هیڅ حالتونه تر اوسه راجستر شوی نه دی',
'No Symbologies currently defined for this Layer': 'هیڅ نماد تر اوسه مشخص شوی نه دی د دی پاڼی لپاره',
'No Symbologies currently defined': 'هیڅ نماد تر اوسه مشخص شوی نه دی',
'No Tasks Assigned': 'هیڅ دندی تعین شوی نه دی',
'No Teams currently registered': 'هیڅ ګروپونه تر اوسه راجستر شوی نه دی',
'No Themes currently registered': 'هیڅ موضوع تر اوسه راجستر شوی نه دی',
'No Themes found for this Activity': 'هیڅ موضوع پیدا شوی نه ده ددی فعالیت لپاره',
'No Themes found for this Project Location': 'هیڅ موضوع پیدا شوی نه ده ددی د فعالیت د ځای لپاره',
'No Themes found for this Project': 'هیڅ موضوع پیدا شوی نه ده ددی پروژی لپاره',
'No Time Logged': 'هیڅ وخت ثبت شوی نه ده',
'No Volunteer Cluster Positions': 'هیڅ د رضاکار غونچه موقف نشته',
'No Volunteer Cluster Types': 'هیڅ د رضاکار د بڼی غونچه نشته',
'No Volunteer Clusters': 'هیڅ د رضاکار غونچی نشته',
'No Volunteers currently registered': 'هیڅ رضاکاران تر اوسه راجستر شوی نه دی',
'No access to this record!': 'هیڅ لاسته راوړنه تاریخچه ته نشته',
'No annual budgets found': 'هیڅ کلنی بودیجه پیدا شوی نه ده',
'No contact information available': 'هیڅ د اړیکی معلومات با اعتبار شوی نه دی',
'No contact method found': 'هیڅ د اړیکی رویه پیدا شوی نه ده',
'No data available in table': 'هیڅ معلومات په چوکات کی نشته',
'No data available': 'هیڅ معلومات په لاس کی نشته',
'No education details currently registered': 'هیڅ د تعلیم معلومات تر اوسه راجستر شوی نه دی',
'No entries currently available': 'هیڅ د داخلیدو ثبت تر اوسه په لاس کی نشته',
'No entries found': 'هیڅ ثبتونه پیدا شوی نه دی',
'No entry available': 'هیڅ ثبت په لاس کی نشته',
'No forms to the corresponding resource have been downloaded yet.': 'هیڅ فورمونه د مشابه منبع راوړل شوی نه دی',
'No further users can be assigned.': 'هیڅ نور استعمالوونکی تعین شوی نه دی',
'No jobs configured yet': 'هیڅ دندی تنظیم شوی نه دی تر اوسه',
'No jobs configured': 'هیڅ دندی تنظیم شوی نه دی',
'No location information defined!': 'هیڅ د ځای معلومات مشخصه شوی نه دی',
'No match': 'هیڅ وصل نشته',
'No matching element found in the data source': 'هیڅ د وصل عنصر پیدا شوی نه دی د معلوماتو په منبع کی',
'No matching records found': 'هیڅ تاریخچه د وصل پیدا شوی نه دی',
'No members currently registered': 'هیڅ اعضاوی تر اوسه راجستر شوی نه دی',
'No membership types currently registered': 'هیڅ د عضویت بڼی تر اوسه راجستر شوی نه دی',
'No options available': 'هیڅ اختیارونه په لاس کی نشته',
'No options currently available': 'هیڅ اختیارونه تر اوسه په لاس کی نشته',
'No outputs defined': 'هیڅ باندنی پیدا شوی نه دی',
'No records in this resource': 'هیڅ تاریخچه په دی منبع کی نشته',
'No records in this resource. Add one more records manually and then retry.': 'هیڅ تاریخچه په دی منبع کی نشته. اضافه کړه یوه بله تاریخچه معمولا او بیا امتحان کړی',
'No records to review': 'هیڅ تاریخچه د ښکارولو لپاره نشته',
'No report specified.': 'هیڅ تاریخچه مشخصه شوی نه ده',
'No role to delete': 'هیڅ نقش له منځه تللی نه ده',
'No roles currently assigned to this user.': 'هیڅ نقشونه تر اوسه تعین شوی نه دی دی استعمالوونکی ته',
'No staff or volunteers currently registered': 'هیڅ کارمندان یا رضاکاران تر اوسه راجستر شوی نه دی ',
'No tasks currently registered': 'هیڅ دندی تر اوسه راجستر شوی نه ده',
'No time stamps found in this resource': 'په منبع کی هیڅ د وخت علامه موجوده نه ده',
'No users with this role at the moment.': 'هیڅ استعمالوونکی په دی نقش کی په دی لحظه کی',
'No': 'هیڅ',
'None (no such record)': 'هیڅ یو ( هیڅ داسی تاریخچه )',
'None of the above': 'هیڅ یو دپاسینیو څخه',
'None': 'هیڅ یو',
'Nonexistent or invalid resource': 'هیڅ یو موجود او یا غیر اعتبار منبع',
'Normal Job': 'عادی وظیفه ',
'Normal': 'عادی',
'Not Authorized': 'قادر نه دی',
'Not implemented': 'اجرا شوی نه دی',
'Not installed or incorrectly configured.': 'نصب شوی نه دی او یا غلطه تنظیمات',
'Note that this list only shows active volunteers. To see all people registered in the system, search from this screen instead': 'توجه چی دا لست یوازی فعال رضاکاران ښکاروی. که غواړی چی ټول خلک چی راجستر شوی دی په دی سیستم کی وګوری، نو له دی صفحه څخه ولټوی',
'Note that when using geowebcache, this can be set in the GWC config.': 'توجه چی کله ( geowebccache ) استعمالوی دا ځای پر ځای کیږی په GWC په تنظیم کی',
'Notification frequency': 'د خبرتیا تکرار',
'Notification method': 'د خبرتیا طریقه',
'Notify': 'خبرول',
'Number of Activities': 'د فعالیتونو شمیره',
'Number of Beneficiaries': 'د فایدی شمیره',
'Number of Countries': 'د هیوادونو شمیری',
'Number of Deployments': 'د وسعت شمیره',
'Number of Disaster Types': 'د مصیبت د بڼی شمیره',
'Number of Facilities': 'د امکاناتو شمیره',
'Number of Missions': 'د هدف شمیره',
'Number of Responses': 'د ځوابونو شمیره',
'Number or Label on the identification tag this person is wearing (if any).': 'شمیره اویا علامه د خبرتیا علامه دی کس اغوستی دی ( که کوم) ',
'Number': 'شمیره',
'Nutrition': 'خوراک',
'Nutritional Assessments': 'د خوراک تعین',
'OCR Form Review': 'د OCR دفورم تکرار',
'OCR module is disabled. Ask the Server Administrator to enable it.': 'د OCR اندازی غیر فعال شوی. د سرور د اداری څخه و پښتی چی فعال یی کړی',
'OCR review data has been stored into the database successfully.': 'د OCR تکرار معلومات زیرمه شوی په دیتابیس کی په کامیابه توګه',
'OD Coordinator': 'د OD هم اهنګ کوونکی',
'OK': 'صحیح',
'OSM file generation failed!': 'د OSM د سند نسل ناکامه شوه',
'OSM file generation failed: %s': 'د OSM د سند نسل ناکامه شوه: ٪S',
'Object': 'مفعول ',
'Objectives': 'مفعولی',
'Observer': 'نظارت کول',
'Obsolete': 'له منځه تللی',
'Office Address': 'د دفتر ادرس',
'Office Details': 'د دفتر معلومات',
'Office Phone': 'د دفتر تلیفون',
'Office Type Details': 'د دفتر د بڼه معلومات',
'Office Type added': 'د دفتر بڼه اضافه شوی',
'Office Type deleted': 'د دفتر بڼه له منځه تللی',
'Office Type updated': 'د دفتر بڼه ننی شوی',
'Office Type': 'د دفتر بڼه ',
'Office Types': 'د دفتر بڼی',
'Office added': 'دفتر اضافه شوی',
'Office deleted': 'دفتر له منځه تللی',
'Office updated': 'دفتر ننی شوی',
'Office': 'دفتر',
'Offices': 'دفترونه',
'On Hold': 'په لاس کی لرل',
'On by default?': 'د پخوانی په واسطه',
'Only showing accessible records!': 'یوازی د لاسرس تاریخچه ښکاروی',
'Opacity': 'کدری',
'Open Chart': 'چوکات خلاص کړی',
'Open Incidents': 'حادثه خلاص کړی',
'Open Map': 'نقشه خلاص کړی',
'Open Report': 'راپور خلاص کړی',
'Open Table': 'چوکات خلاص کړی',
'Open Tasks for %(project)s': 'خلاص کړی دندی د ٪( پروژه/پروژی )',
'Open Tasks for Project': 'خلاص کړی دندی د پروژی لپاره',
'Open recent': 'خلاص کړی نوی',
'Open': 'خلاص کړی',
'OpenStreetMap Layer': 'خلاص کړی د کوڅی د نقشه پاڼه',
'OpenStreetMap OAuth Consumer Key': 'خلاص د د سرک نقشه د ( Oauth ) د مصرف کلید',
'OpenStreetMap OAuth Consumer Secret': 'خلاص د د سرک نقشه د ( Oauth ) د مصرف راز',
'OpenWeatherMap Layer': 'خلاص د هوا د نقشی پاڼه',
'Opening Times': 'د خلاصیدو وخت',
'Operation not permitted': 'عملیات اجازه ور نه کړ شو',
'Optional password for HTTP Basic Authentication.': 'اختیاری پاسورد د HTTP د اساس سند',
'Optional selection of a MapServer map.': 'اختیاری انتخاب د سرور د نقشی نقشه',
'Optional selection of a background color.': 'اختیاری انتخاب د زمینه رنگ',
'Optional selection of an alternate style.': 'اختیاری انتخاب د متناوب ستایل',
'Optional username for HTTP Basic Authentication.': 'اختیاری د استعمالونکی نوم د HTTP اساسی تصدیق',
'Optional. In GeoServer, this is the Workspace Namespace URI (not the name!). Within the WFS getCapabilities, the workspace is the FeatureType Name part before the colon(:).': 'اختیاری. د جغرافیاوی سرور کی، دا د کار ځای نوم ځای URI ( نوم نه). د WFS تونایی په لاس راوړی، د کار ځای د شکل بڼه نوی برخه مخکی د دوه ټکو (:)',
'Optional. The name of an element whose contents should be a URL of an Image file put into Popups.': 'اختیاری. نوم د یوه عنصر چی مقدار یی باید د URL د یوه سند په تصویر کی په ( popup ) کی واچوی',
'Optional. The name of an element whose contents should be put into Popups.': 'اختیاری. نوم د یوه عنصر چی مقدار یی باید په ( popup) کی واچوی',
'Optional. The name of the schema. In Geoserver this has the form http://host_name/geoserver/wfs/DescribeFeatureType?version=1.1.0&;typename=workspace_name:layer_name.': 'اختیاری. د مودل نوم. په جیو سرور کی دا یو فورم لری دhttp://host_name/geoserver/wfs/DescribeFeatureType?version=1.1.0&;typename=workspace_name:layer_name.',
'Organisational Preparedness - Nhq and Branches': 'موسسه یی اماده ګی: Nhq او څانګی',
'Organization Details': 'د موسسه معلومات',
'Organization Type Details': 'د موسسه د بڼه معلومات',
'Organization Type added': 'د موسسه بڼه اضافه شوی',
'Organization Type deleted': 'د موسسه بڼه له منځه تللی',
'Organization Type updated': 'د موسسه بڼه ننی شوی',
'Organization Type': 'د موسسه بڼه',
'Organization Types': 'د موسسه بڼی',
'Organization Units': 'د موسسه وسایل',
'Organization added to Policy/Strategy': 'موسسه اضافه شوی سیاست ته/سترتیژی ته',
'Organization added to Project': 'موسسه اضافه شوی پروژی ته',
'Organization added': 'موسسه اضافه شوی',
'Organization deleted': 'موسسه له منځه تللی',
'Organization group': 'موسسه د ګروپ',
'Organization removed from Policy/Strategy': 'موسسه له منځه تللی د سیاست نه/ستراتیژی',
'Organization removed from Project': 'موسسه له منځه تللی د پروژی څخه',
'Organization updated': 'موسسه ننی شوی',
'Organization': 'موسسه',
'Organization(s)': 'موسسه/ موسسی',
'Organization/Branch': 'موسسه/څانګه',
'Organizational Development': 'د موسسه پرمختګ',
'Organizations / Teams / Facilities': 'موسسی/ ګروپونه/امکانات',
'Organizations': 'موسسی',
'Origin': 'اصل بنیاد',
'Original': 'اصلی',
'Other Address': 'نور ادرسونه',
'Other Details': 'نور معلومات',
'Other Users': 'نور استعمالوونکی',
'Other': 'نور',
'Others': 'نوری',
'Outcomes, Impact, Challenges': 'نتیجه، تاثیر، رقابتونه',
'Output added': 'محصول اضافه شوی',
'Output deleted': 'محصول له منځه تللی',
'Output updated': 'محصول ننی شوی',
'Output': 'محصول',
'Outputs': 'محصولونه',
'Outreach Staff': 'توسعه د کارمندانو',
'Overlays': 'پوښیدلی',
'Owned Records': 'خپل راپور',
'PDF File': 'د PDF سند',
'PIFACC Priorities': 'د PIFACC اولویتونه',
'PIFACC-1: Implementing Tangible, On-Ground Adaptation Measures': 'د PIFACC -1: قابلیت د لامس اجرا کول، په زمکه باندی د توافق اندازه',
'PIFACC-2: Governance and Decision Making': 'د PIFACC-2: دولت او د تصمیم جوړونه',
'PIFACC-3: Improving our understanding of climate change': 'د PIFACC-3: د اقلیم د بدلون د درک پرمختګ',
'PIFACC-4: Education, Training and Awareness': 'د PIFACC-4: تعلیم، زده کړه او خبرتیا',
'PIFACC-5: Mitigation of Global Greenhouse Gas Emissions': 'د PIFACC-5: کمول د نړی د ګلخانی ګاز نشر ',
'PIFACC-6: Partnerships and Cooperation': 'د PIFACC-6: شراکت او همکاری',
'PIL (Python Image Library) not installed': 'د PIL ( د خامار د تصویر کتابتون) نه دی نصب شوی',
'PIL (Python Image Library) not installed, images cannot be embedded in the PDF report': 'د PIL ( د خامار د تصویر کتابتون) نه دی نصب شوی، تصویر په د PDF په راپور کی نه ځای پر ځای کیدای شی',
'PMER': 'PMER',
'Pacific Islands Framework for Action on Climate Change. Applicable to projects in Pacific countries only': 'د ارام جزیری څلور خواوی د په عمل کی د اقلیم بدلون لپاره',
'Page': 'پاڼه',
'Paid': 'ورکول شوی',
'Pan Map: keep the left mouse button pressed and drag the map': 'د پان نقشه: چپه د موس تڼی ټینګه کړی او نقشه کش کړی',
'Parent needs to be of the correct level': 'منشا باید په ښه سطح کی وی',
'Parent needs to be set for locations of level': 'منشا باید ځای پر ځای شی د ځای سطح ته',
'Parent needs to be set': 'منشا باید ځای پر ځای شی',
'Parent': 'منشا',
'Part of the URL to call to access the Features': 'د URL یوه برخه باید شکل ته لاسرسی ولری',
'Participant Details': 'د ګډونوال معلومات',
'Participant added': 'ګډونوال اضافه شوی',
'Participant deleted': 'ګډونوال له منځه تللی',
'Participant updated': 'ګډونوال ننی شوی',
'Participant': 'ګډونوال',
'Participants': 'ګډونوالان',
'Participatory Hygiene Promotion': 'د ګډونوالو حفظ الصحه پرختللی',
'Partner National Society': 'د شریک ملی ټولنه ',
'Partner Organization Details': 'د شریک د موسسه معلومات',
'Partner Organization added': 'د شریک موسسه اضافه شوی',
'Partner Organization deleted': 'د شریک موسسه له منځه تللی',
'Partner Organization updated': 'د شریک موسسه ننی شوی',
'Partner Organizations': 'د شریک موسسی',
'Partner': 'شریک',
'Partners': 'شریکان',
'Pashto': 'پښتو',
'Pass': 'کامیاب',
'Passport': 'پاسپورټ',
'Password': 'پاسورد',
'Peer Support': 'د دوست حمایه',
'Pending': 'په وخت کی',
'Percentage': 'فیصدی',
'Performance Rating': 'د اجرا اندازه',
'Permanent Address': 'اصلي استوګنځی',
'Permanent Home Address': 'د کور دایمی ادرس',
'Person Details': 'د کس معلومات',
'Person Entity': 'د کس وجود',
'Person Registry': 'د کس راجستر',
'Person added': 'کس اضافه شوی',
'Person deleted': 'کس له منځه تللی',
'Person details updated': 'د کس دمعلوماتونو ننی شوی ',
'Person must be specified!': 'کس باید مشخص شی',
'Person or OU': 'کس یا OU',
'Person who has actually seen the person/group.': 'کس چاچی هغه کس لیدلی/ګروپ',
'Person': 'کس',
'Personal Profile': 'شخصی منظره',
'Personal': 'شخصی',
'Persons': 'کسان',
'Philippine Pesos': 'د فلیپین پیسو',
'Phone #': 'د تلیفون شمیره',
'Phone 1': 'لومړی تلیفون',
'Phone 2': 'دوهم تلیفون',
'Phone Number': 'شمیره',
'Phone number is required': 'د تلیفون شمیره ضرورت دی',
'Phone': 'تلیفون',
'Photo': 'د عکس ځای',
'Photograph': 'عکاس',
'Place of Birth': 'د زیږدنی ځای',
'Place of Work': 'د دندی ځای',
'Place on Map': 'ځای په نقشه کی',
'Planning and Construction of Drainage Systems ': 'د اوبو د لاندی سیستم پلانول او جوړول',
'Please Select a Facility': 'هیله ده امکانات انتخاب کړی',
'Please choose a type': 'هیله ده بڼه انتخاب کړی',
'Please enter a first name': 'هیله لومړی نوم مو داخل کړی',
'Please enter a last name': 'هیله ده وروستی نوم مو داخل کړی',
'Please enter a number only': 'هیله ده یوازی شمیره داخل کړی',
'Please enter a valid email address': 'هیله ده یو د اعتبار برقی لیک ادرس داخل کړی',
'Please fill this!': 'هیله ده دا ډک کړی',
'Please record Beneficiary according to the reporting needs of your project': 'هیله ده زیرمه کړی فایدی د راپور په مطابقت چی ضرورت خپلی پروژی ته ولری',
'Please select a valid image!': 'هیله ده یوه با اعتباره تصویر انتخاب کړی',
'Please select exactly two records': 'هیله دقیقا دو راپوره انتخاب کړی',
'Please use this field to record any additional information, including a history of the record if it is updated.': 'هیله دا ځای استعمال کړی چی زیرمه کړی هر یو اضافی معلومات، چی په ځان کی ولری د تاریخ راپور که ننی شوی وی',
'Please use this field to record any additional information, such as Ushahidi instance IDs. Include a history of the record if it is updated.': 'هیله دا ځای استعمال کړی چی زیرمه کړی هر یو اضافی معلومات، لکه د تذکری پر ځای اوشاهیدی. چی په ځان کی ولری د تاریخ راپور که ننی شوی وی',
'PoI Type Details': 'د پول د بڼی معلومات',
'PoI Type added': 'د پول بڼه اضافه شوی',
'PoI Type deleted': 'د پول بڼه له منځه تللی',
'PoI Type updated': 'د پول بڼه ننی شوی',
'PoI Types': 'د پول بڼی',
'PoI': 'پول',
'PoIs successfully imported.': 'پولونه په کامیابی توګه سره وارد شوی',
'PoIs': 'پولونه',
'Point of Interest Details': 'د زړه پوری معلوماتو نکتی',
'Point of Interest added': 'زړه پوری نکتی اضافه شوی',
'Point of Interest deleted': 'زړه پوری نکتی له منځه تللی',
'Point of Interest updated': 'زړه پوری نکتی ننی شوی',
'Points of Interest': 'زړه پوری نکتی',
'Policies & Strategies': 'سیاستونه او ستراتیژیانی',
'Policy Development': 'د سیاست پرمختګ',
'Policy or Strategy added': 'سیاست یا ستراتیژی اضافه شوی',
'Policy or Strategy deleted': 'سیاست یا ستراتیژی له منځه تللی',
'Policy or Strategy updated': 'سیاست یا ستراتیژی ننی شوی',
'Policy or Strategy': 'سیاست یا ستراتیژی',
'Polygon': 'څو ضلعی',
'Poor': 'مسکین',
'Popup Fields': 'د پاپ اپ ځایونه',
'Popup Label': 'د پاپ اپ علامی',
'Position': 'موقف',
'Positions': 'موفقونه',
'Post Harvest Storage and Management': 'د محصول زیرمه پوست او مدیریت',
'Postcode': 'د پوست کود',
'Power Supply Type': 'د برق تولیدونکی بڼه',
'Powered by Sahana': 'برقی شوی په واسطه د ساحانا',
'Powered by': 'برقی شوی په واسطه د',
'Preferred Name': 'ترجیع شوی نوم',
'Presence Condition': 'د حضور حالت',
'Presence Log': 'د حضور ثبت',
'Presence': 'حضور',
'Previous View': 'مخکینی منظره',
'Previous': 'مخکینی',
'Primary school': 'ابتدائیه ښونڅی',
'Print': 'چاپ',
'Priority from 1 to 9. 1 is most preferred.': 'اولویت د ۱ تر ۹ . ۱ ډیر ترجیع شوی',
'Priority': 'اولویت',
'Privacy': 'یوازیوالی',
'Private': 'شخصی',
'Procedure': 'رویه',
'Processing': 'پروسه',
'Profession': 'مسلک',
'Professional Experience Details': 'د مسلکی تجربی معلومات',
'Professional Experience added': 'مسلکی تجربی اضافه شوی',
'Professional Experience deleted': 'مسلکی تجربی له منځه تللی',
'Professional Experience updated': 'مسلکی تجربی ننی شوی',
'Professional Experience': 'مسلکی تجربه',
'Profile Configuration removed': 'د منظری تنطیم له منځه تللی',
'Profile Configuration updated': 'د منظری تنظیم ننی شوی',
'Profile Configuration': 'د منظری تنطیم',
'Profile Configurations': 'د منظری تنظیمات',
'Profile Configured': 'منظره تنظیم شوی',
'Profile Details': 'د منظری معلومات',
'Profile Page': 'د منظری پاڼه',
'Profile Picture': 'د منظری تصویر',
'Profile Picture?': 'د منظری تصویر؟',
'Profiles': 'منظری',
'Program Details': 'د پروګرام معلومات',
'Program Hours (Month)': 'د پروګرام ساعتونه ( میاشت)',
'Program Hours (Year)': 'د پروګرام ساعتونه ( کال)',
'Program added': 'پروګرام اضافه شوی',
'Program deleted': 'پروګرام له منځه تللی',
'Program updated': 'پروګرام ننی شوی',
'Program': 'پروګرام',
'Programme Manager': 'د پروګرام منیجر',
'Programme Preparation and Action Plan, Budget & Schedule': 'د پروګرام اماده ګی او د پلان عمل، بودیجه او تقسیم اوقات',
'Programs': 'پروګرامونه',
'Project Assessments and Planning': 'د پروژی تشخیص او پلانول',
'Project Calendar': 'د پروژی جنتری',
'Project Details': 'د پروژی معلومات',
'Project Name': 'د پروژی نوم',
'Project Officer': 'د پروژی افسر',
'Project Organization Details': 'د پروژی موسسه معلومات',
'Project Organization updated': 'د موسسه پروژه ننی شوی',
'Project Organizations': 'د موسسی پروژه',
'Project Report': 'د راپور پروژه',
'Project Task': 'د دندی پروژه',
'Project Time Report': 'د پروژی د وخت راپور',
'Project added': 'پروژه اضافه شوی',
'Project deleted': 'پروژه له منځه تللی',
'Project not Found': 'پروژه پیدا شوی نه ده',
'Project updated': 'پروژه ننی شوی',
'Project': 'پروژه',
'Projection Details': 'د طرح معلومات',
'Projection Type': 'د طرح بڼه ',
'Projection added': 'طرح اضافه شوی',
'Projection deleted': 'طرح له منځه تللی',
'Projection updated': 'طرح ننی شوی',
'Projection': 'طرح',
'Projections': 'طرحی ',
'Projects Map': 'د پروژو نقشه',
'Projects': 'پروژی',
'Proposed': 'پیشنهاد شوی',
'Provide a password': 'پاسورد مهیا شوی',
'Province': 'ولایت',
'Provision of Inputs': 'د داخلیدو تهیه کول',
'Provision of Tools and Equipment': 'تهیه کول د الاتو او لوازم',
'Psycho Social Support Refresher': 'روحي او ټولنيز ملاتړ تجددي کورسونه',
'Psycho Social Support': 'روحي او ټولنيز ملاتړ',
'Psychosocial Support': 'د اجتماع د روانی حمایه',
'Public': 'ټول ګټه',
'Python GDAL required for Shapefile support!': 'د خامار GDAL ضرورت دسند شکل حمایه',
'Python needs the ReportLab module installed for PDF export': 'د راپور لابراتور نصب شوی نه ده',
'Python needs the xlrd module installed for XLS export': 'غلطی : دا جاری افعی (Python ) د xlrd موډل ته اړه لری نصب شی د XLS صادراتو ته',
'Python needs the xlwt module installed for XLS export': 'غلطی : دا جاری افعی (Python ) د xlwt موډل ته اړه لری نصب شی د XLS صادراتو ته',
'Quantity': 'مقدار',
'Query Feature': 'د تحقیق شکل',
'Query': 'تحقیق',
'Queryable?': 'قابلیت د تحقیق',
'RDRT (Regional Disaster Response Teams)': 'د RDRT ( ساحوی مصیبت د ځواب ګروپ)',
'RDRT Members': 'د RDRT اعضاوی',
'RDRT Type': 'د RDRT بڼه',
'READ': 'لوستل',
'REST Filter': 'ارام فیلتر',
'RFA Priorities': 'د RFA اولویت',
'RFA1: Governance-Organisational, Institutional, Policy and Decision Making Framework': 'د RFA1: دولتی- موسسه یی، مسلکی ، سیاست او د تصمیم جوړونکی چوکات',
'RFA2: Knowledge, Information, Public Awareness and Education': 'د RFA2: علم< معلومات، عامه خبرتیا او تعلیم',
'RFA3: Analysis and Evaluation of Hazards, Vulnerabilities and Elements at Risk': 'د RFA3: تحلیل او د ارزیابی خطر، د اسیب او د عنصرونو خطر',
'RFA4: Planning for Effective Preparedness, Response and Recovery': 'د RFA4: پلان د موثره اماده ګی، ځوابونه او بیا له سره جوړول',
'RFA5: Effective, Integrated and People-Focused Early Warning Systems': 'د RFA5: موثر، مجتمع او د خلکو دقت د مخکی اخطار سیستم',
'RFA6: Reduction of Underlying Risk Factors': 'د RFA6: تبدیل د لاندنی خطر خوا',
'RMS Team': 'د RMS گروپ',
'RMS': 'RMS',
'Race': 'مسابقه',
'Rangeland, Fisheries and Forest Management': 'د ساحه ځای، د ماهی نیولو ځای او د ځنګل مدیریت',
'Rapid Data Entry': 'سریع د معلوماتو داخل ثبت',
'Rating': 'اندازه',
'Ready': 'اماده',
'Reason for Dismissal': 'د انفکاک دلیل',
'Receive %(opt_in)s updates:': 'راوړل ٪ (opt- in) ننی:',
'Receive updates': 'ننی شوی راوړل',
'Received Shipments': 'راوړل شوی مواد د کشتی په واسطه',
'Record Details': 'د تاریخچی معلومات',
'Record Updates': 'تاریخچه ننی شوی',
'Record added': 'تاریخچه اضافه شوی',
'Record already exists': 'تاریخچه مخکی د مخکی نه موجوده ده',
'Record approved': 'تاریخچه تایید شوی',
'Record could not be approved.': 'تاریخچه نه شی کیدای تایید شی',
'Record could not be deleted.': 'تاریخچه له منځه تللی نه شی',
'Record deleted': 'تاریخچه له منځه تللی',
'Record not found!': 'تاریخچه پیدا شوی نه ده!',
'Record not found': 'تاریخچه پیدا شوی نه ده',
'Record updated': 'تاریخچه ننی شوی',
'Record': 'تاریخچه',
'Records merged successfully.': 'تاریخچه یوزای شوی دی په کامیابی سره',
'Records': 'تاریخچی',
'Red Cross & Red Crescent National Societies': 'سره چلیپا او سره د حلال ملی ټولنه ',
'Red Cross / Red Crescent': 'سره چلیپا/ سره حلال علامه',
'Referral': 'مراجعه',
'Refresh Rate (seconds)': 'د تازګی اندازه ( ثانیه)',
'Region Details': 'د ساحه معلومات',
'Region Location': 'د ساحه ځای ',
'Region added': 'ساحه اضافه شوی',
'Region deleted': 'ساحه له منځه تللی',
'Region updated': 'ساحه ننی شوی',
'Region': 'ځای ',
'Regional': 'ساحوی',
'Regions': 'ساحی',
'Register As': 'راجستر په حیث',
'Register for Account': 'راجستر د حساب لپاره',
'Register': 'راجستر',
'Registered users can %(login)s to access the system': 'راجستر شوی استعمالونکی کولای شی %(login)s چی سیستم ته لاسرسی پیدا کړی',
'Registration not permitted': 'راجستریدلو ته اجازه نشته',
'Reject': 'لغو کول',
'Relationship': 'ارتباط',
'Relatives Contact #': 'د خپلوانو شمیره',
'Relief Team': 'د اسوده گی گروپ',
'Religion': 'مذهب',
'Reload': 'بیا له سره بارول',
'Remarks': 'کتنی',
'Remove Coalition': 'اتحاد له منځه وړل',
'Remove Feature: Select the feature you wish to remove & press the delete key': 'شکل له منځه وړل: هغه شکل انتخاب کړی چی تاسی غواړی چی له منځه وړل او د له منځه وړلو تڼی کیږدی',
'Remove Layer from Profile': 'پاڼه د منظری څخه له منځه یوسی',
'Remove Layer from Symbology': 'پاڼه د نماد څخه له منځه وړل',
'Remove Network': 'شبکه له منځه وړل',
'Remove Organization from Project': 'موسسه د پروژی څخه له منځه وړل',
'Remove Profile Configuration for Layer': 'د منظری تنظیم د پاڼی څخه له منځه وړل',
'Remove Skill': 'د مهارت له منځه وړل',
'Remove Symbology from Layer': 'نماد د پاڼی څخه له منځه وړل',
'Remove existing data before import': 'موجوده معلومات د پاڼی څخه له منځه وړل',
'Remove selection': 'انتخاب له منځه وړل',
'Remove this entry': 'دا د داخلیدو ثبت له منځه وړل',
'Remove': 'له منځه تلل',
'Reopened': 'بیا له سره خلاصول',
'Repeat your password': 'خپل پاسورد تکرار کړی',
'Repeat': 'تکرار',
'Replace': 'ځای پر ځای کړی',
'Reply': 'ځواب ورکول',
'Report Options': 'د اختیارونو راپور',
'Report of': 'راپور د',
'Report on Annual Budgets': 'راپور په کلنی بودیجه',
'Report': 'راپور ورکول',
'Reports': 'راپورونه',
'Request': 'تقاضا ګانی',
'Requested By Facility': 'تقاضا شوی د امکاناتو په واسطه',
'Requested Items': 'تقاضا شو آلات',
'Requests': 'تضاګانی',
'Requires Login': 'د داخلیدو ضرورت',
'Reset all filters': 'ټول فیلترونه بیا له سره تنظیم کړی',
'Reset': 'بیا لره سره تنظیمول',
'Resize Feature: Select the feature you wish to resize & then Drag the associated dot to your desired size': 'په اندازه د شکل: هغه شکل انتخاب کړه چی اندازه یی جوړوی او بیا مربوطه نقطه کش کړی د خپل زړه په میل باندی',
'Resource Details': 'د منبع معلومات',
'Resource Inventory': 'دمنبع فهرست',
'Resource Management System account has been activated': 'د منبع د مدیریت سیستم حساب فعال شوی دی',
'Resource Management System': 'د منبع د مدیریت سیستم',
'Resource Transfers for Acquiring Assets': 'د منبع انتقال د سرمایه د لاسته راوړل لپاره',
'Resource Transfers for Replacing/ Provisioning Or Consumption': 'د منبع انتقال د ځای پر ځای کولو لپاره/ د تهیه او مصرف',
'Resource Type Details': 'د منبع د بڼه معلومات',
'Resource Type added': 'د منبع بڼه اضافه شوی',
'Resource Type deleted': 'د منبع بڼه له منځه تللی',
'Resource Type updated': 'د منبع بڼه ننی شوی',
'Resource Type': 'د منبع بڼه',
'Resource Types': 'د منبع بڼی',
'Resource added': 'منبع اضافه شوی',
'Resource deleted': 'منبع له منځه تللی',
'Resource updated': 'منبع ننی شوی',
'Responded': 'ځواب ورکړل شوی',
'Response Summaries': 'د ځواب خلاصی',
'Response Summary Added': 'د ځواب خلاصه اضافه شوی',
'Response Summary Deleted': 'د ځواب خلاصه له منځه تللی',
'Response Summary Details': 'د ځواب خلاصه معلومات',
'Response Summary Report': 'د ځواب د خلاصه راپور',
'Response Summary Updated': 'د ځواب خلاصه ننی شوی',
'Response': 'ځواب',
'Retrieve Password': 'د پاسورد په لاس راوړل',
'Revert Entry': 'د داخلیدو ثبت ته بیا راتګ',
'Review': 'تکرار',
'Risk Management and Quality Assurance': 'د خطر مدیریت او د کیفیت اطمینان',
'Risk Transfer': 'د خطر انتقال',
'Role Details': 'د نقش معلومات',
'Role Name': 'د نقش نوم',
'Role Required': 'نقش ته ضرورت',
'Role added': 'نقش اضافه شوی',
'Role assigned to User': 'نقش تعین شوی دی استعمالوونکی ته',
'Role deleted': 'نقش له منځه تللی',
'Role updated': 'نقش ننی شوی',
'Role': 'نقش',
'Roles Permitted': 'نقش ته اجازه ورکړل شوی',
'Roles currently assigned': 'نقش اوس تعین شوی',
'Roles of User': 'د استعمالوونکی نقشونه',
'Roles updated': 'نقشونه ننی شوی',
'Roles': 'نقشونه',
'Room Details': 'د کوټی معلومات',
'Room added': 'کوټه اضافه شوی',
'Room deleted': 'کوټه له منځه تللی',
'Room updated': 'کوټه ننی شوی',
'Room': 'کوټه',
'Rooms': 'کوټی ',
'Rotate Feature: Select the feature you wish to rotate & then Drag the associated dot to rotate to your desired location': 'تاوول د شکل: هغه شکل انتخاب کړی چی غواړی چی تاو یی کړی او تعین شوی نقطه کش کړی چی تاو یی کړی د خپل زړه په میل ځای کی',
'Run every': 'هر یوه ته حرکت ورکړی',
'S3PivotTable unresolved dependencies': 'د S3 تاویدونکی چوکات نا حل شوی وابستګی',
'Sahana Community Chat': 'د ساحانا د خبرو جامعه',
'Sahana Eden Humanitarian Management Platform': 'د ساحانا ایدن د بشری مدیریت اپارتمان',
'Sahana Eden Website': 'د ساحانا ایدن ویب سایت',
'Sanitation': 'فاضله اوبه د خالی کولو سیستم',
'Save Map': 'د نقشه زیرمه',
'Save and Continue Editing': 'زیرمه او د نقشه تنظیم',
'Save as New Map?': 'منحیث د نوی نقشه زیرمه کول',
'Save search': 'پلټنه زیرمه کول',
'Save this search': 'دا پلټنه زیرمه کړی',
'Save': 'زیرمه',
'Save: Default Lat, Lon & Zoom for the Viewport': 'زیرمه: پخوانی لات، لون او نژدی کول د بندر منطری ته',
'Saved Filters': 'فیلترونه زیرمه شوی',
'Saved Maps': 'نقشی زیرمه شوی',
'Saved Searches': 'پلتنی زیرمه شوی',
'Saved search added': 'زیرمه شوی پلټنی اضافه شوی',
'Saved search deleted': 'زیرمه شوی پلټنی له منځه تللی',
'Saved search details': 'زیرمه شوی پلټنی معلومات',
'Saved search updated': 'زیرمه شوی پلټنی ننی شوی',
'Saved searches': 'زیرمه شوی پلټنی',
'Saved': 'زیرمه شوی',
'Scanned Copy': 'لیدل شوی کاپی',
'Scanned Forms Upload': 'لیدل شوی فورمونه راوړل شوی',
'Scheduled Jobs': 'تقسیم بندی شوی دندی',
'Schema': 'موډل',
'School / University': 'ښونځی',
'School Holidays only': 'د مکتب رخصتی یوازی',
'School RC Units Development': 'د مکتب د RC د برخی پرمختګ',
'School Safety and Children Education,': 'د مکتب محافظت او د ماشومانو تعلیم،',
'Seaport': 'د بحر بندر',
'Search %(site_label)s Status': 'پلټنه %(site_label)s حالت',
'Search Activities': 'د فعلیتونو پلټنه',
'Search Activity Types': 'د فعالیت د بڼی پلټنی',
'Search Addresses': 'د ادرسونو پلټنه',
'Search Affiliations': 'د اتحاد پلټنه',
'Search Annual Budgets': 'د کلنی د بودیجه پلټنه',
'Search Appraisals': 'د ارزیابی پلټنه',
'Search Awards': 'د انعام پلټنه',
'Search Beneficiaries': 'د فایدو پلټنه',
'Search Beneficiary Types': 'د فایدی د بڼو پلټنه',
'Search Branch Organizations': 'د موسسو د څانګی پلټنه',
'Search Campaigns': 'د کمپین پلټنه',
'Search Certificates': 'د شهادت نامو پلټنه',
'Search Certifications': 'د شهادتونو پلټنه',
'Search Clusters': 'د غونچو پلټنه',
'Search Coalitions': 'د ایتلاف پلټنه',
'Search Communities': 'د جامعو پلټنه',
'Search Community Contacts': 'د جامعه د اړیکو پلټنه',
'Search Competency Ratings': 'د اصلاحاتو د اندازی پلټنه',
'Search Contact Information': 'د اړیکی د معلوماتو پلټنه',
'Search Contacts': 'د اړیکو پلټنه',
'Search Course Certificates': 'د کورس شهادت نامه پلټنه',
'Search Courses': 'کورسونه پلټنه',
'Search Credentials': 'د اعتبار لیک پلټنه',
'Search Criteria': 'د معیار پلټنه',
'Search Departments': 'د برخه پلټنه',
'Search Deployments': 'د توسعه پلټنه',
'Search Donors': 'د بښونکو پلټنه',
'Search Education Details': 'د تعلیم د معلوماتو پلټنه',
'Search Entries': 'د ثبت پلټنه',
'Search Facilities': 'د امکاناتو پلټنه',
'Search Facility Types': 'د امکاناتو د بڼی پلټنه',
'Search Feature Layers': 'د شکل د پاڼی پلټنه',
'Search Groups': 'د ګروپ پلټنه',
'Search Hazards': 'د خطر پلټنه',
'Search Hours': 'د ساعتونو پلټنه',
'Search Identity': 'د پیژنګلنی پلټنه',
'Search Images': 'د تصویر پلټنه',
'Search Job Titles': 'د دندی د عنوان پلټنه',
'Search Keywords': 'مهمه کلمو پلټنه',
'Search Layers': 'د پاڼو پلټنه',
'Search Location Hierarchies': 'د ځای د سلسه د مراتبو پلټنه',
'Search Location': 'د ځای پلټنه',
'Search Locations': 'د ځایونو پلټنه',
'Search Log Entry': 'د داخلیدو د ثبت پلټنه',
'Search Logged Time': 'ثبت شوی وخت پلټنه',
'Search Mailing Lists': 'د برقی د لیک د لست پلټنه',
'Search Map Profiles': 'د نقشه د تنظیم پلټنه',
'Search Markers': 'د ښکاروندوی پلټنه',
'Search Member': 'د عضو پلټنه',
'Search Members': 'د اعضاوو پلټنه',
'Search Membership Types': 'د عضویت د بڼی پلټنه',
'Search Membership': 'د عضویت پلټنه',
'Search Milestones': 'د غونچه پلټنه',
'Search Networks': 'د شبکو پلټنه',
'Search Office Types': 'د دفتر د بڼه پلټنه',
'Search Offices': 'د دفترونو پلټنه',
'Search Open Tasks for %(project)s': 'د خلاصی دندی پلټنه د %(project)s',
'Search Organization Types': 'د موسسه د بڼه پلټنه',
'Search Organizations': 'د موسسی پلټنه',
'Search Participants': 'د ګډونوالو پلټنه',
'Search Partner Organizations': 'د شریکو موسسو پلټنه',
'Search Persons': 'د کسانو پلټنه',
'Search PoI Types': 'د پول د بڼه پلټنه',
'Search Points of Interest': 'د زړه پوری نقطی پلټنه',
'Search Policies & Strategies': 'د ساستونو او ستراتیژیو پلټنه',
'Search Professional Experience': 'د مسلکی تجربی پلټنه',
'Search Programs': 'د پروګرامونو پلټنه',
'Search Project Organizations': 'د موسسو د پروژی پلټنه',
'Search Projections': 'د طرحو پلټنه',
'Search Projects': 'د پروژی پلټنه',
'Search Records': 'د تاریخچو پلټنه',
'Search Red Cross & Red Crescent National Societies': 'د سره چلیپا او سره حلال د ملی جامعو پلټنه',
'Search Regions': 'د ساحو پلټنه',
'Search Resource Types': 'د منبع د بڼه پلټنه',
'Search Resource Inventory': 'د منبع د فهرست پلټنه',
'Search Response Summaries': 'د خلاصه ځواب پلټنه',
'Search Results': 'د نتیجو پلټنه',
'Search Roles': 'د نقشونو پلټنه',
'Search Rooms': 'د کوټو پلټنه',
'Search Sectors': 'د برخو پلټنه',
'Search Services': 'د خدمتونو پلټنه',
'Search Shipped Items': 'انتقال شوی الاتو پلټنه',
'Search Skill Equivalences': 'د مهارت معادل پلټنه',
'Search Skill Types': 'د مهارت د بڼه پلټنه',
'Search Skills': 'د مهارتونو پلټنه',
'Search Staff & Volunteers': 'د کارمندانو او رضاکارانو پلټنه',
'Search Staff Assignments': 'د کارمندانو د دندی پلټنه',
'Search Staff': 'د کارمندانو پلټنه',
'Search Symbologies': 'د نماد پلټنه',
'Search Tasks': 'د دندو پلټنه',
'Search Teams': 'د ګروپونو پلټنه',
'Search Theme Data': 'د موضوع د معلومات پلټنه',
'Search Themes': 'د موضوعګانو پلټنه',
'Search Training Events': 'د تعلیم د واقع پلټنه',
'Search Training Participants': 'د تعلیم د ګډونوالو پلټنه',
'Search Volunteer Cluster Positions': 'د رضاکار د غونچی د موقف پلټنه',
'Search Volunteer Cluster Types': 'د رضاکار د غونچی د بڼه پلټنه',
'Search Volunteer Clusters': 'د رضاکار د غونچی پلټنه',
'Search Volunteer Roles': 'د رضاکار د نقشونو پلټنه',
'Search Volunteers': 'د رضاکارانو پلټنه',
'Search by skills': 'د مهارت په واسطه پلټنه',
'Search for Activity Organization': 'د موسسه د فعالیت لپاره پلټنه',
'Search for Activity Type': 'د فعالیت د بڼی لپاره پلټنه',
'Search for a Person': 'د کس لپاره پلټنه',
'Search for a Project Community by name.': 'د پروژی د جامعه لپاره پلټنه د نوم په واسطه',
'Search for a Project by name, code, location, or description.': ' پلټنه د پروژی د نوم په واسطه، کود، ځای،یا تشریحات',
'Search for a Project by name, code, or description.': ' پلټنه د پروژی د نوم په واسطه، کود، یا تشریحات',
'Search for office by organization or branch.': ' پلټنه د دفتر د موسسه یا د څانګی په واسطه',
'Search for office by organization.': ' پلټنه د دفتر د موسسه په واسطه',
'Search location in Geonames': ' پلټنه د ځای په جغرافیاوی نوم',
'Search saved searches': ' پلټنه د زیرمه شوی پلټنه',
'Search': 'پلتنه',
'Secondary Server (Optional)': 'دوهم سرور ( اختیاری)',
'Secondary school': 'منځينی ښونڅی',
'Seconds must be a number.': 'دوهمی باید شمیری وی',
'Seconds must be less than 60.': 'دوهمی باید کم د ۶۰ څخه وی',
'Secretary General': 'عمومی سکرتر',
'Sector Details': 'د برخه معلومات',
'Sector added to Organization': 'برخه اضافه شوی موسسه ته',
'Sector added to Project': 'برخه اضافه شوی پروژی ته',
'Sector added to Theme': 'برخه اضافه شوی موضوع ته',
'Sector added': 'برخه اضافه شوی',
'Sector deleted': 'برخه له منځه تللی',
'Sector removed from Organization': 'برخه له منځه تللی د موسسه نه',
'Sector removed from Project': 'برخه له منځه تللی د پروژی نه',
'Sector removed from Theme': 'برخه له منځه تللی د موضوع نه',
'Sector updated': 'برخه ننی شوی',
'Sector': 'برخه',
'Sectors to which this Activity Type can apply': 'برخی چی دا د فعالیت بڼه استعمال شوی',
'Sectors to which this Theme can apply': 'برخی چی دا د موضوع استعمال شوی',
'Sectors': 'برخی',
'Security Officer': 'د امنیت افسر',
'Security': 'امنیت',
'See All Entries': 'وګوری ټول په ثبت کی',
'Seen': 'لیدل شوی',
'Select %(level)s': 'انتخاب کړه %(level)s',
'Select %(location)s': 'انتخاب کړه %(location)s',
'Select All': 'ټول انتخابول',
'Select Existing Location': 'انتخاب د موجود ځای',
'Select This Location': 'انتخاب کړه دا ځای',
'Select all': 'ټول انتخاب کړی',
'Select an existing bin': 'انتخاب کړی یو موجوده صندوق',
'Select an image to upload. You can crop this later by opening this record.': 'انتخاب کړی یو تصویر چی رایی وړی. تاسی کیدای شی دا وروسته قیچی کړی د تاریخچه په خلاصیدو په واسطه',
'Select from Registry': 'انتخاب کړی د راجستری څخه',
'Select one or more option(s) that apply': 'انتخاب کړی یو یا ډیر اختیارونه چی درخواست یی کړی',
'Select resources to import': 'انتخاب کړی منبعی چی وارد شی',
'Select the default site.': 'پخوانی سایت انتخاب کړی',
'Select the option that applies': 'انتخاب کړی هغه اختیار چی درخواست شوی',
'Select the overlays for Assessments and Activities relating to each Need to identify the gap.': 'انتخاب کړی پوښښ د تشخیص او فعالیتونو چی هر یوه مشخصه خالیګاه ته ضرورت وی\\',
'Select the person assigned to this role for this project.': 'انتخاب کړی کس چی تعین شوی وی دی نقش د پروژی لپاره دی',
'Select this if you need this resource to be mapped from site_id instead of location_id.': 'دا انتخاب کړی که تاسی دی منبع ته اړتیا لری چی نقشه شی د سایت څخه ـ تذکره د ځای پر ځای ـ تذکره',
'Select': 'انتخاب کړه',
'Selected OCR Form has no pages. Use another revision of create a new revision by downloading a new Form.': 'انتخاب شوی OCR فورم هیڅ صفحی نه لری. استعمال کړی بله د اصلاح جوړه کړی یو نوی اصلاح په راوړلو سره د یوه نوی فورم ',
'Send Message': 'پیغام لیږل',
'Send Task Notification': 'د دندی مشخصاتو لیږل',
'Send a message to this person': 'دی کس ته یو پیغام ولیږی',
'Send a message to this team': 'دی ګروپ ته یو پیغام ولیږی',
'Send batch': 'د غونچی لیږل',
'Senior (50+)': 'لوی (۵۰+)',
'Sent Shipments': 'د لیږلو انتقال',
'Service Details': 'د خدمت معلومات',
'Service Record': 'د خدمت تاریخچه',
'Service added to Organization': 'خدمت اضافه شوی موسسه ته',
'Service added': 'خدمت اضافه شوی ',
'Service deleted': 'خدمت له منځه تللی',
'Service removed from Organization': 'خدمت له منځه تللی د موسسه څخه',
'Service updated': 'خدمت ننی شوی',
'Service': 'خدمت',
'Services': 'خدمتونه',
'Set True to allow editing this level of the location hierarchy by users who are not MapAdmins.': 'حقیقت ځای پر ځای کړی چی تنظیم ته اجازه ورکړی دی سطح د سلسله د مراتبو ځای د استعمالوونکو په واسطه چی اداره د نقشی نه وی',
'Set as my Default': 'ځای پر ځای کړی دپخوانی توګه',
'Settings': 'تنظیمات',
'Sex': 'جنسیت',
'Shapefile Layer': 'د سند د شکل پاڼه',
'Share': 'شریکول',
'Shelter': 'پناګاه',
'Short Description': 'لنډ تشریحات',
'Short Title / ID': 'لنډ عنوان/ تذکړه',
'Show %(number)s entries': 'ښکارول %(number)s ثبتونه',
'Show Pivot Table': 'ښکارول تاویدونکی چوکات',
'Show Table': 'ښکارول د چوکات',
'Show on Map': 'په نقشه کی ښکاره کړی',
'Show totals': 'مجموعه ښکاره کړی',
'Show': 'ښکارول',
'Showing 0 to 0 of 0 entries': 'ښکارول ۰ تر ۰ د ۰ ثبتونه',
'Showing _START_ to _END_ of _TOTAL_ entries': 'ښکارول _START_ to _END_ of _TOTAL_ ثبتونه',
'Signature': 'امضا',
'Simple Search': 'ساده پلټنه',
'Simulation ': 'مانند',
'Single PDF File': 'یو د pdf سند',
'Site Name': 'د سایت نوم',
'Site Planning': 'د سایت پلانول',
'Site Selection': 'د سایت انتخاب',
'Site': 'سایت',
'Sitemap': 'د سایت نقشه',
'Situation Monitoring/Community Surveillance': 'د اوضاع لید/ د جامعه نظارت',
'Situation': 'اوضاع',
'Skeleton Example': 'د سکلیت مثال',
'Sketch': 'طرح',
'Skill Catalog': 'د مهارت فهرست',
'Skill Details': 'د مهارت معلومات',
'Skill Equivalence Details': 'د مهارت معادل معلومات',
'Skill Equivalence added': 'د مهارت معادل اضافه شوی',
'Skill Equivalence deleted': 'د مهارت معادل له منځه تللی',
'Skill Equivalence updated': 'د مهارت معادل ننی شوی',
'Skill Equivalence': 'د مهارت معادل',
'Skill Equivalences': 'دمهارت معادل',
'Skill Type Catalog': 'د مهارت بڼی فهرست',
'Skill Type added': 'د مهارت بڼه اضافه شوی',
'Skill Type deleted': 'مهارت بڼه له منځه تللی',
'Skill Type updated': 'د مهارت بڼه ننی شوی',
'Skill Type': 'د مهارت بڼه',
'Skill added': 'مهارت اضافه شوی',
'Skill deleted': 'مهارت له منځه تللی',
'Skill removed': 'مهارت له منځه تللی',
'Skill updated': 'مهارت ننی شوی',
'Skill': 'مهارت',
'Skills': 'مهارتونه',
'Skin Marks': 'د پوستکی علامی',
'Small Scale Mitigation': 'د وړوکی توازن کی تخفیف ورکول',
'Social Mobilisation': 'د جامعه تجهیزات',
'Solid Waste Management': 'د جامد ضایع مدیریت',
'Sops and Guidelines Development': 'د غذایی مایع او د راهنمایی پرمختګ',
'Sorry location %(location)s appears to be outside the area of parent %(parent)s.': 'بښنه غواړم ځای%(location)s معلومیږی چی دباندی ساحه د منشا %(parent)s.',
'Sorry location %(location)s appears to be outside the area supported by this deployment.': 'بښنه غواړم ځای%(location)s معلومیږی چی دباندی ساحه حمایه شوی د دی وسعت په واسطه',
'Sorry location appears to be outside the area of parent %(parent)s.': 'بښنه غواړم ځای معلومیږی چی دباندی ساحه د منشا %(parent)s.',
'Sorry location appears to be outside the area supported by this deployment.': 'بښنه غواړم ځای معلومیږی چی دباندی ساحه حمایه شوی د دی وسعت په واسطه',
'Sorry, only users with the MapAdmin role are allowed to edit these locations': 'بښنه غواړم، یوازی استعمالوونکی د اداری د نقشه سره اجازه لری چی دا ځایونه تنظیم کړی',
'Sorry, there are no addresses to display': 'بښنه غواړم، هیڅ ادرسونه نه ښکاریږی',
'Source Name': 'د منبع نوم',
'Source URL': 'منبع د URL',
'Source': 'منبع',
'Specific Area (e.g. Building/Room) within the Location that this Person/Group is seen.': 'مشخصه ساحه ( لکه تعمیر/ کوټه) په ځای کی چی دا کس/ګروپ لیدل شوی',
'Specific locations need to have a parent of level': 'مشخصه ځایونه ضرورت لری چی ولری یوه منشا',
'Spherical Mercator (900913) is needed to use OpenStreetMap/Google/Bing base layers.': 'کروی میرکاتور (۹۰۰۹۱۳ ) ضرورت دی چی خلاصه دسرک نقشه استعال کړی/ ګوګل/ د بینګ د اساس پاڼه ',
'Spraying of Vectors': 'د پاشولو مسیرونه',
'Staff & Volunteers (Combined)': 'کارمندان او رضاکاران ( ترکیب شوی)',
'Staff & Volunteers': 'کارمندان او رضاکاران',
'Staff Assigned': 'کارمندان تعیین شوی',
'Staff Assignment Details': 'د کارمندانو د تعین معلومات',
'Staff Assignment removed': 'د کارمندانو تعین له منځه تللی',
'Staff Assignment updated': 'د کارمندانو تعین ننی شوی',
'Staff Assignments': 'د کارمندانو تعینات',
'Staff ID': 'د کارمندانو تذکره',
'Staff Management': 'د کارمندانو مدیریت',
'Staff Member Details updated': 'د کارمندانو عضو معلومات ننی شوی',
'Staff Member Details': 'د کارمندانو عضو معلومات',
'Staff Member added': 'د کارمندانو عضو اضافه شوی',
'Staff Member deleted': 'د کارمندانو عضو له منځه تللی',
'Staff Record': 'د کارمندانو تاریخچه',
'Staff Report': 'د کارمندانو راپور',
'Staff member added': 'د کارمندانو عضو اضافه شوی',
'Staff with Contracts Expiring in the next Month': 'کارمندان د ختم شوی قراردادونو سره پبه راتلوونکی میاشت',
'Staff': 'کارمندان',
'Staff/Volunteer Record': 'کارمندان/د داوطلب راپور',
'Start Date': 'د شروع د تاریخ',
'Status Details': 'د حالت معلومات',
'Status added': 'حالت اضافه شوی',
'Status deleted': 'حالت له نځه تللی',
'Status updated': 'حالت ننی شوی',
'Status': 'حالت',
'Statuses': 'حالتونه',
'Stockpiling, Prepositioning of Supplies': 'ذخیره، بیا ځای پر ځای کول د تدارکاتو',
'Strategy Development': 'د ستراتیژی پرمختګ',
'Street Address': 'د کوڅه ادرس',
'Street View': 'د کوڅه منظره',
'String used to configure Proj4js. Can be found from %(url)s': 'د رسی استعمال چی تنظیم شی proj4js. کیدای شی پیدا شوی وی %(url)s',
'Strong': 'قوی',
'Style invalid': 'بی اعتباره ستایل',
'Style': 'ستایل',
'Sub Chapter': 'فرعی برخه',
'Submission successful - please wait': 'تسلیمی کامیابه شوه - هیله انتظار وباسی',
'Submit': 'تسلیمیدل',
'Supervisor': 'ناظر',
'Supplier': 'عرضه کوونکی',
'Suppliers': 'عرضه کوونکی',
'Swiss Francs': 'د سویس فرانک',
'Switch to 3D': 'د 3d تبدیلول',
'Symbologies': 'نمادونه',
'Symbology Details': 'د نماد معلومات',
'Symbology added': 'نماد اضافه شوی',
'Symbology deleted': 'نماد له منځه تللی',
'Symbology removed from Layer': 'نماد له منځه تللی د پاڼی څخه',
'Symbology updated': 'نماد ننی شوی',
'Symbology': 'نماد ',
'TMS Layer': 'د TMS پاڼه',
'Table Permissions': 'د چوکات اجازه',
'Tablename': 'د چوکات نوم',
'Tags': 'د علامی وهل',
'Task Details': 'د دندی معلومات',
'Task added': 'دنده اضافه شوی',
'Task deleted': 'دنده له منځه تللی',
'Task updated': 'دنده ننی شوی',
'Task': 'دنده',
'Tasks': 'دندی',
'Team Description': 'د ګروپ تشریحات',
'Team Details': 'د ګروپ معلومات',
'Team Leader': 'د ګروپ رهبر',
'Team Member added': 'د ګروپ عضو اضافه شوی',
'Team Members': 'د ګروپ اعضاوی',
'Team Name': 'د ګروپ نوم',
'Team Type': 'د ګروپ بڼه',
'Team added': 'ګروپ اضافه شوی',
'Team deleted': 'ګروپ له منځه تللی',
'Team updated': 'ګروپ ننی شوی',
'Team': 'ګروپ',
'Teams': 'ګروپونه',
'Tells GeoServer to do MetaTiling which reduces the number of duplicate labels.': 'جیو سرور ته ووایی چی د میتاتیلینګ چی کم کړی شمیری د دوه دانی علامی',
'Temporary Address': 'اوسنی استوګنځی',
'Term for the fifth-level within-country administrative division (e.g. a voting or postcode subdivision). This level is not often used.': 'ګروپ په پنځمه سطحه د هیواد د اداری برخه ( لکه رای ورکول یا د پوست کود فرعی برخه). دا سطحه معمولا نه استعمالیږی',
'Term for the fourth-level within-country administrative division (e.g. Village, Neighborhood or Precinct).': 'څلورمه دوره - سطح په جریان - د هیواد د اداری برخه( لکه کلی، ګاونډی یا محوطه)',
'Term for the primary within-country administrative division (e.g. State or Province).': 'ابتدایی دوره په جریان - د هیواد د اداری برخه(لکه ایالت یا ولایت)',
'Term for the secondary within-country administrative division (e.g. District or County).': 'دوهمه دوره په جریان - د هیواد د اداری برخه ( لکه ولسوالی یا هیواد)',
'Term for the third-level within-country administrative division (e.g. City or Town).': 'دریمه دوره په جریان - د هیواد د اداری برخه ( لکه ښار یا کلی)',
'Terms of Service': 'د خدمت دوری',
'Tertiary Server (Optional)': 'دریم سرور (اختیاری)',
'Text': 'متن',
'The Area which this Site is located within.': 'هغه ساحه چی په هغه کی دا سایت موجود دی په',
'The Current Location of the Person/Group, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': 'اوسنی ځای د کس/ګروپ، چی کیدای شی عمومی ( د راپور ) یا خلاصه ( د ښکارولو لپاره په نقشه کی). یو څوو نقشونه داخل کړی چی با اعتباره ځایونه ولټوی.',
'The Email Address to which approval requests are sent (normally this would be a Group mail rather than an individual). If the field is blank then requests are approved automatically if the domain matches.': 'د برقی لیک ادرس کوم ته چی تاییدی تقاضا شوی( کیدای شی دا یو ګروپ د برقی لیکونو وی نسبت تنهایی ته). اوکه ځای خالی وی نو بیا تقاضا ګانی په خپل سر تاییدیږی که ساحه موافقت پیدا کړی',
'The Location the Person has come from, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': 'د هغه ځایه چی دا کس راغلی، چی کیدای شی عمومی وی( د راپور لپاره) یا خلاصه ( د ښکارولو لپاره په نقشه کی) . داخل کړی یو څو نقشونه چی د با اعتباره ځایونو ولټوی.',
'The Location the Person is going to, which can be general (for Reporting) or precise (for displaying on a Map). Enter a few characters to search from available locations.': 'هغه ځای ته چی دا کس ځی، چی کیدای شی عمومی وی( د راپور لپاره) یا خلاصه ( د ښکارولو لپاره په نقشه کی) . داخل کړی یو څو نقشونه چی د با اعتباره ځایونو ولټوی.',
'The Maximum valid bounds, in projected coordinates': 'لوړترین با اعتباره حدونه، په پروژی کی همکاری کوی.',
'The Organization Registry keeps track of all the relief organizations working in the area.': 'د موسسه راجستری ساتی د ټولو د راحتی موسسو چی په ساحه کی کار کوی',
'The URL for the GetCapabilities page of a Web Map Service (WMS) whose layers you want available via the Browser panel on the Map.': 'د url د توانایی لاسته راوړنی صفحه د شبکه د خدمت نقشه (WMS) د چا پاڼی چی غواړی اعتباری شی د بروس د صفحه په واسطه په نقشه کی',
'The URL to access the service.': 'د url چی لاسرسی پیدا کړی دی خدمت ته',
'The area is': 'ساحه ده',
'The attribute used to determine which features to cluster together (optional).': 'نسبی استعمالیږی چی مشخص کړی کوم شکل یی سره غونچه کړی ( اختیاری)',
'The attribute which is used for the title of popups.': 'نسبی کوم چی استعمالیږی د پاپ اپ د عنوان لپاره',
'The attribute within the KML which is used for the title of popups.': 'نسبی په جریان د KML کوم چی استعمالیږی د پاپ اپ د عنوان لپاره',
'The attribute(s) within the KML which are used for the body of popups. (Use a space between attributes)': 'نسبی په جریان د KML کوم چی استعمالیږی د بدن د پاپ اپ لپاره. ( د نسبتونو تر منځ مصافه استعمال کړی)',
'The body height (crown to heel) in cm.': 'د بدنه لوړوالی ( د سره تر پونډو پوری) په سانتی متر باندی',
'The facility where this position is based.': 'امکانات چیرته چی موقف قرار لری',
'The first or only name of the person (mandatory).': 'لومړی یا یوازی د کس نوم ( ضروری)',
'The form of the URL is http://your/web/map/service?service=WMS&request=GetCapabilities where your/web/map/service stands for the URL path to the WMS.': 'د URL فورم http://your/web/map/service?service=WMS&request=GetCapabilities where your/web/map/service stands for the URL path to the WMS. د url د مسیر WMS لپاره وی',
'The language you wish the site to be displayed in.': 'ژبه چی تاسی غواړی چی سایت په کی ښکاره شی',
'The length is': 'پراخوالی ده',
'The map will be displayed initially with this latitude at the center.': 'نقشه کیدای شی ښکاره شی لومړی د دی وسعت د جغرافیاوی په مرکز کی',
'The map will be displayed initially with this longitude at the center.': 'نقشه کیدای شی ښکاره شی لومړی د دی د طول جغرافیاوی په مرکز کی',
'The minimum number of features to form a cluster. 0 to disable.': 'کمترینه شمیره د شکل چی یو غونچه تشکیل کړی. ۰ نه تر غیر فعالیدو پوری',
'The name to be used when calling for or directly addressing the person (optional).': 'نوم چی استعمال شی کله چی د مستقیم ادرس کس ته ورکول کیږی ( اختیاری)',
'The number of pixels apart that features need to be before they are clustered.': 'بغیر د شمیری د پیکسل شکل ضرورت لری چی مخکی د غونچی کیدلو څخه باید وی',
'The number of tiles around the visible map to download. Zero means that the 1st page loads faster, higher numbers mean subsequent panning is faster.': 'د کاشی شمیری چی د ښکاروندی نقشه چی رایوړی. صفر په دی معنی ده چی لومړی صفحه ډیره په تیزی کار کوی، لوړی شمیری په دی معنی دی چی وروستی قطعی تیزی شوی',
'The system supports 2 projections by default:': 'د سیستم حمایه د پخوانی دوه طرحو په واسطه',
'The uploaded Form is unreadable, please do manual data entry.': 'رواړل شوی فورم قابلیت د لوستلو نه دی، هیله ده کلنی د معلوماتو ثبت وکړی',
'The weight in kg.': 'وزن په کیلوګرام',
'The': 'د',
'Theme Data deleted': 'د موضوع معلومات له منځه تللی',
'Theme Data updated': 'د موضوع معلومات ننی شوی',
'Theme Data': 'د موضوع معلومات',
'Theme Details': 'د موضوع معلومات',
'Theme Layer': 'د موضوع پاڼه',
'Theme added to Activity': 'ز فعالیت ته',
'Theme added to Project Location': 'موضوع اضافه شوی ده د پروژی ځای ته',
'Theme added to Project': 'موضوع اضافه شوی ده پروژی ته',
'Theme added': 'موضوع اضافه شوی',
'Theme deleted': 'موضوع له منځه تللی',
'Theme removed from Activity': 'موضوع له منځه تللی د فعالیت څخه',
'Theme removed from Project Location': 'موضوع له منځه تللی د پروژی ځای نه',
'Theme removed from Project': 'موضوع له منځه تللی د پروژی نه',
'Theme updated': 'موضوع ننی شوی',
'Theme': 'موضوع',
'Themes': 'موضوع ګانی',
'There are multiple records at this location': 'مختلفه تاریخچی دی په دی ځای کی',
'There are too many features, please Zoom In': 'ډیری شکلونه شته دی ، هیله ده نژدی یی کړی',
'There is no address for this person yet. Add new address.': 'هیڅ ادرس ددی کس لپاره تر اوسه نشته ده. نوی ادرس اضافه کړی',
'There is no status for this %(site_label)s yet. Add %(site_label)s Status.': 'هیڅ حالت ددی لپاره نشته %(site_label)s تر اوسه. اضافه کړی %(site_label)s حالت',
'There was a problem, sorry, please try again later.': 'یوه ستونزه وه ، بښنه غواړی، هیله وروسته کوښښ وکړی',
'These are the filters being used by the search.': 'دا فیلترونه د استعمالیدو په حال کی دی د پلټنی په واسطه',
'These need to be added in Decimal Degrees.': 'دوی ضرورت لری چی اضافه شی په اعشاری درجه سره',
'This email address is already in use': 'دا برقی لیک مخکی د مخکی راجستر شوی',
'This is appropriate if this level is under construction. To prevent accidental modification after this level is complete, this can be set to False.': 'دا مناسب ده که دا سطح د ساختمانی د لاندی وی. که حادثوی مشخصات څخه مخنیوی وشی بیا دا سطح مکمله شوی ، دا کیدای شی غلط ځای پر ځای شی',
'This is normally edited using the Widget in the Style Tab in the Layer Properties on the Map.': 'دا په عادی توګه تنطیم شوی استعمال د یوه شی د جدول ستایل د پاڼی د تنظیماتو په نقشه کی',
'This job has already been finished successfully.': 'دا دنده مخکی د مخکی کامیابی سره ختمه شوی ',
'This level is not open for editing.': 'دا سطح د تنظیم لپاره خلاصه شوی نه ده',
'This role can not be assigned to users.': 'دا نقش د استعمالوونکو لپاره تعین شوی نه دی',
'This should be an export service URL, see': 'دا کیدای شی یو د صادرونکی خدمت اوسی URL، وګوری',
'Tiled': 'کاشی شوی',
'Time Actual': 'واقعی وخت',
'Time Estimate': ' وخت تخمین کیږی',
'Time Estimated': 'وخت تخمین شوی دی',
'Time Frame': 'د وخت چوکات',
'Time Log Deleted': 'د وخت ثبت له منځه تللی',
'Time Log Updated': 'د وخت ثبت ننی شوی',
'Time Log': 'د وخت ثبت',
'Time Logged': 'د وخت ثبت شوی',
'Time Taken': 'وخت نیول شوی',
'Time': 'وخت',
'Timeline': 'د وخت مسیر',
'Title to show for the Web Map Service panel in the Tools panel.': 'عنوان چی ښکاره کړی د شبکه دنقشی د خدمت صفحه په د الاتو صفحه کی',
'Title': 'عنوان',
'To Print or Share the Map you will have to take a screenshot. If you need help taking a screen shot, have a look at these instructions for %(windows)s or %(mac)s': 'د چاپ او شریکول د نقشی تاسی کیدای شی ولری چی ونیسی یو تصویر د صفحه. که تاسی مرستی اړه ولری چی ونیسی یو د صفحه عکس، نو دی هدایاتو ته وګوری ٪( کړکی) یا ٪ (mac)s',
'To edit OpenStreetMap, you need to edit the OpenStreetMap settings in your Map Config': 'چی تنظیم کړی خلاصه د سرک نقشه، تاسی اړه لری چی تنظیم کړی د خلاصی سرک نقشه تنظیم په نقشه د تنظیم کی',
'To move the Timeline: use the mouse scroll wheel, the arrow keys or grab and drag the Timeline.': 'چی د وخت کرښه ته حرکت ورکړی: د موس د منځنی تڼی راتاوه کړی، ارو تڼی یا ونیسی او کش کړی د وخت کرښه',
'Tools and Guidelines Development': 'د الاتو او رهنمای پرمختګ',
'Total Annual Budget': 'مجموعی کلنی مرستی',
'Total Funding (Local Currency)': 'مجموعی مرستی( محلی اسعار)',
'Total Funding Amount': 'مجموعی د مرستی مقدار',
'Total Persons': 'مجموعی کسان',
'Total Population': 'مجموعی نفوس',
'Total Records: %(numrows)s': 'مجموعی تاریخچی: ٪( نم رو) ',
'Total': 'مجموع',
'Tourist Group': 'د سیاحینو ګروپ',
'Trackable': 'قابلیت د تعقیب',
'Training Course Catalog': 'د کورس د زده کړی فهرست',
'Training Details': 'د زده کړی معلومات',
'Training Event Details': 'د زده کړی د واقعه معلومات',
'Training Event added': 'د زده کړی واقعه اضافه شوی',
'Training Event deleted': 'د زده کړی واقعه له منځه تللی',
'Training Event updated': 'د زده کړی واقعه ننی شوی',
'Training Event': 'د زده کړی واقعه',
'Training Events': 'د زده کړی واقعی',
'Training Facility': 'د زده کړی امکانات',
'Training Hours (Month)': 'د زده کړی ساعتونه (میاشت)',
'Training Hours (Year)': 'د زده کړی ساعتونه (کال)',
'Training Report': 'د زده کړی راپور',
'Training added': 'زده کړی اضافه شوی',
'Training deleted': ' د زده کړی له منځه تللی',
'Training of Community/First Responders': 'د جامعه زده کړی/لومړی ځواب ورکوونکی',
'Training of Master Trainers/Trainers': 'د ماستر د زده کړی ښوونکی/ ښوونکی',
'Training updated': 'زده کړی ننی شوی',
'Training': 'زده کړه',
'Trainings': 'زده کړی',
'Transfer': 'انتقال',
'Transit': 'تیریدل',
'Transparent?': 'شفافه؟',
'Tree and Mangrove Planting': 'ونه او د شاهی کورنی کرنه',
'Type': 'بڼه',
'UN agency': 'د un نمایندګی',
'UPDATE': 'ننی ',
'URL to a Google Calendar to display on the project timeline.': 'د URL د ګوکل جنتری ته چی ښکاره کړی د پروژی د وخت خط',
'URL': 'URL',
'Unable to parse CSV file or file contains invalid data': 'ناتوانه ده چی تجزیه او تحلیل کړی CSV سند یا د سند د متن بی اعتباره معلومات',
'Uncheck all': 'ټول غیر علامه لرونکی کړه',
'United States Dollars': 'د متحده ایالاتو ډالر',
'Units': 'برخی',
'Unknown': 'نامعلومه',
'Unmark as duplicate': 'بی علامه کړه دوه دانیی',
'Unspecified': 'نا مشخصه',
'Unsupported data format': 'غیر حمایه شوی معلوماتو شکل',
'Unsupported method': 'غیر حمایه شوی رویه',
'Update Coalition': 'اتحادیه ننی کړی',
'Update Report': 'راپور ننی کړی',
'Update this entry': 'دا د داخلیدو ثبت ننی کړی',
'Upload Format': 'شکل راوړی',
'Upload Scanned OCR Form': 'لیدل شوی راوړی د OCR څخه',
'Upload Shapefile': 'راوړی د شکل سندونه',
'Upload an image file (png or jpeg), max. 400x400 pixels!': 'راوړی تصویر سند ( png یا jpeg )، لویترین. ۴۰۰*۴۰۰ پیکسله',
'Uploaded PDF file has more/less number of page(s) than required. Check if you have provided appropriate revision for your Form as well as check the Form contains appropriate number of pages.': 'راوړل شوی د PDF سند ډیر/کم صفحه شمیری لری نسبت د ضرورت څخه. وګوری که تاسی مهیا شوی تکرار د خپل فورم لپاره مناسب لری او هم وګوری فورم چی په کی مناسب د صفحو شمیره وی',
'Uploaded file is not a PDF file. Provide a Form in valid PDF Format.': 'راوړل شوی سند PDF نه دی یوه فورم د PDF شکل مهیا کړی ',
'Urgent': 'ضروری',
'Use Geocoder for address lookups?': 'جیو کودر ادرس د مراجع استعمال کړی',
'Use Site?': 'سایت استعمال کړی؟',
'Use decimal': 'عشاریه استعمال کړی',
'Use default': 'پخوانی استعمال کړی',
'Use deg, min, sec': 'برقی دقیقه، ثانیه استعمال کړی',
'Use this to set the starting location for the Location Selector.': 'دا استعمال کړی چی شروع ځای ځای پر ځای کړی د ځای د انتخابوونکی لپاره',
'Used in onHover Tooltip & Cluster Popups to differentiate between types.': 'استعمال د دریدو د معلوماتو آلات او د غونچی پاپ اپ فرق د بڼو تر منځ وشی',
'Used to build onHover Tooltip & 1st field also used in Cluster Popups to differentiate between records.': 'استعمالیږی چی جوړ کړی د دریدو آلات او لومړی ساحه هم استعمالیږی په غونچه د پاپ اپ کی چی فرق د تاریخچو تر منځ وشی',
'Used to check that latitude of entered locations is reasonable. May be used to filter lists of resources that have locations.': 'استعمالیږی چی وګوری چی وسعت د جغرافیا په داخل ځای کی مناسب دی. کیدای شی چی استعمال شی د فیلتر د لست منابعو لپاره چی ځایونه لری',
'Used to check that longitude of entered locations is reasonable. May be used to filter lists of resources that have locations.': 'استعمالیږی چی وګوری چی طول د جغرافیا په داخل ځای کی مناسب دی. کیدای شی چی استعمال شی د فیلتر د لست منابعو لپاره چی ځایونه لری',
'Used to populate feature attributes which can be used for Styling.': 'استعمالیږی چی نسبی پر نفوسه کړی شکل کوم چی استعمالیږی ستایل لپاره',
'User Account': 'د استعمالوونکی حساب ',
'User Profile': 'د استعمالوونکی منظره',
'User Roles': 'د استعمالوونکی نقش',
'User added to Role': 'استعمالوونکی نقش ته اضافه شوی',
'User with Role': 'استعمالوونکی د نقش سره',
'User': 'استعمالوونکی',
'Username': 'د استعمالوونکی نوم',
'Users in my Organizations': 'د استعمالوونکی زما په موسسه کی',
'Users with this Role': 'استعمالوونکی دی نقش سره',
'Users': 'د استعمالوونکی',
'Uses the REST Query Format defined in': 'د استعمالوونکی د استراحت د سوال شکل تعریف شو',
'VCA (Vulnerability and Capacity Assessment)': 'د VCA( خرابیدونکی او د ظرفیت تشخیص)',
'Valid From': 'با اعتباره فورم',
'Valid Until': 'با اعتباره ده تر',
'Validation error': 'د تصدیق غلطی',
'Value': 'ارزښت',
'Vector Control': 'د جهت کنترول',
'Verified': 'تحقیق شوی',
'Version': 'موډل',
'Very Good': 'ډیر ښه',
'Very Strong': 'ډیر قوی',
'Video Tutorials': 'تصویری زده کړی',
'View Fullscreen Map': 'د پوره صفحی نقشه ښکارول',
'View Location Details': 'د ځای معلومات ښکارول',
'View full screen': 'مکمله صفحه ښکارول',
'View on Map': 'په نقشه کی ښکارول',
'View': 'منطره',
'Vocational Training and Employment Skills': 'مسلکی زده کړه او وسیع مهارتونه',
'Volunteer Cluster Position added': 'د رضاکار غونچی پوست اضافه شوی',
'Volunteer Cluster Position deleted': 'د رضاکار غونچی پوست له منځه تللی',
'Volunteer Cluster Position updated': 'د رضاکار غونچی پوست ننی شوی',
'Volunteer Cluster Position': 'د رضاکار غونچی پوست',
'Volunteer Cluster Type added': 'د رضاکار غونچی بڼه اضافه شوی',
'Volunteer Cluster Type deleted': 'د رضاکار غونچی بڼه له منځه تللی',
'Volunteer Cluster Type updated': 'د رضاکار د غونچی د بڼه ننی شو',
'Volunteer Cluster Type': 'د رضاکار غونچی بڼه',
'Volunteer Cluster added': 'د رضاکار غونچه اضافه شوی ',
'Volunteer Cluster deleted': 'د رضاکار غونچه له منځه تللی',
'Volunteer Cluster updated': 'د رضاکار غونچه ننی شوی',
'Volunteer Cluster': 'د رضاکار غونچی',
'Volunteer Details updated': 'د رضاکار معلومات ننی شوی',
'Volunteer Details': 'د رضاکار معلومات',
'Volunteer Hours': 'د رضاکار ساعتونه',
'Volunteer ID': 'کود نمبر',
'Volunteer Insurance': 'د رضاکار بیمه',
'Volunteer Management': 'د رضاکار مدیریت',
'Volunteer Recognition': 'د رضاکار پیژندنه',
'Volunteer Record': ' د رضاکار تاریخچه',
'Volunteer Recruitment': 'د رضاکار استخدام',
'Volunteer Report': 'د رضاکار راپور',
'Volunteer Role Catalog': 'د رضاکار نقس فهرست',
'Volunteer Role Details': 'د رضاکار د نقش معلومات',
'Volunteer Role added': 'د رضاکار نقش اضافه شوی',
'Volunteer Role deleted': 'د رضاکار نقش له منځه تللی',
'Volunteer Role updated': 'د رضاکار نقش ننی شوی',
'Volunteer Role': 'د رضاکار نقش',
'Volunteer Service Record': 'د رضاکار خدمت زیرمه شوی',
'Volunteer Start Date': 'درضاکارپه توګه شامیلیدونیټه',
'Volunteer Training': 'د رضاکار تعلیم',
'Volunteer added': 'رضاکار اضافه شوی',
'Volunteer deleted': 'رضاکار له منځه شوی',
'Volunteer': 'رضاکار',
'Volunteering in Emergencies Guidelines/Toolkit': 'په عاجله راهنمای کی باید رضاکار شی/ وسایل',
'Volunteering in Pandemic Emergency Situations': 'رضاکار کیدل په ساری عاجله حالات',
'Volunteers': 'رضاکاران',
'WARNING': 'خبرتیا',
'WFS Layer': 'د WFS پاڼه',
'WGS84 (EPSG 4236) is required for many WMS servers.': 'WGS84(EPSG 4236) ته اړه ده د ډیرو WMS سرورونوته',
'WKT is Invalid!': 'WKT بی اعتباره ده',
'WMS Layer': 'د WMS پاڼه',
'Warehouse Manager': 'د ګدام منیجر',
'Warehouse Stock': 'د ګدام اسهام',
'Warehouse': 'ګدام',
'Warehouses': 'ګدامونه',
'Water Testing': 'د اوبو امتحان',
'Water and Sanitation Refresher': 'اوبه او د فاضله اوبو سیستم تجددي کورسونه',
'Water and Sanitation': 'اوبه او د فاضله اوبو سیستم',
'Watsan Officer': 'د واتسن افسر',
'Watsan Technician': 'د واتسن تکنیشن',
'Watsan': 'واتسن',
'We have tried': 'مونږ کوښښ کړی ',
'Weak': 'ضعیف',
'Web Map Service Browser Name': 'د شبکه د نقشه د خدمت د لټون نوم',
'Web Map Service Browser URL': 'د شبکه د نقشه د URL خدمت لټون',
'Website': 'ویب سایت',
'Week': 'هفته',
'Weekends only': 'د هفته اخر یوازی',
'Weekly': 'هفته یی',
'Weight (kg)': 'وزن (کیلو ګرام)',
'Weight': 'وزن',
'Well-Known Text': 'پیژندل شوی متن',
'What order to be contacted in.': 'کوم امرونه به نښلول کیږی',
'When this search was last checked for changes.': 'دا لټنه کله د بدلون لپاره لیدل شوی',
'Whether calls to this resource should use this configuration as the default one': 'که اړیکی دی منبع ته استعمال کړی دا تنظیم منحیث د پخوانی یوه',
'Whether the Latitude & Longitude are inherited from a higher level in the location hierarchy rather than being a separately-entered figure.': 'که وسعت او طول د جغرافیاوی د لوړی سطح د ځای د سلسله د مراتبو پر ځای وی نسبت د جدا داخل شوی شکل څخه',
'Whether the resource should be tracked using S3Track rather than just using the Base Location': 'منبع باید تعقیب شی د S3Track نسبت د یوازی د اساس ځای استعمال څخه',
'Whiskers': 'بریت',
'Who is doing What Where': 'څوک څه کوی څه چیرته',
'Will create and link your user account to the following records': 'خپل د استعمال حساب جوړه او ونښلوی لاندنی تاریخچه ته',
'With best regards': 'خپل د زړه د کومی سلامونه',
'Work on Program': 'دنده په کار کی',
'Work': 'دنده',
'X-Ray': 'ایکسرای',
'XML parse error': 'د XML د غلطی تجزیه',
'XSLT stylesheet not found': 'د XSLT ستیل صفحه پیدا شوی نه ده',
'XSLT transformation error': 'د XSLT د تغیر غلطی',
'XYZ Layer': 'د XYZ پاڼه',
'Year that the organization was founded': 'هغه کال چی موسسه پیدا شوی ',
'Year': 'کال',
'Yes': 'هو',
'You can click on the map below to select the Lat/Lon fields': 'تاسی کولای شی چی په نقشه کی لاندی کلیک کړی چی Lat/ Lon ساحه',
'You can search by name, acronym or comments': 'تاسی کوالای شی د نوم،دنوم سر یا د نظریو په واسطه ولټوی',
'You can search by name, acronym, comments or parent name or acronym.': 'تاسی کوالای شی د نوم،دنوم سر، نظریو یا د منشا نوم یا د نوم سر په واسطه ولټوی',
'You can select an area on the image and save to crop it.': 'تاسی کولای شی چی یوه ساحه د تصویر انتخاب او د پریکولو لپاره یی زیرمه کړی',
'You can select the Draw tool': 'تاسی کولای شی چی د رسامی الات انتخاب کړی',
'You can set the modem settings for SMS here.': 'تاسی کولای شی د مودم تنظیمات ځای پر ځای کړی د sms لپاره',
'You do not have permission for any facility to perform this action.': 'تاسی اجازه نه لری چی د کوم یوه امکاناتو چی دا عمل اجرا کړی',
'You do not have permission for any organization to perform this action.': 'تاسی اجازه نه لری چی د کوم یوه موسسه چی دا عمل اجرا کړی',
'You have unsaved changes. You need to press the Save button to save them': 'تاسی نه زیرمه شوی تغیرات لری. تاسی اړه لری چی د زیرمی تڼی کیږدی چی زیرمه یی کړی',
'You must agree to the Terms of Service': 'تاسی باید موافق اوسیږی د خدمت دوره سره',
'You must enter a minimum of %d characters': 'تاسی باید دننه کړی کمترین د ٪ d شخصیتونه',
'You need to have at least 2 records in this list in order to merge them.': 'تاسی لګ تر لګه ۲ تاریخچی ولری په دی لست کی چی یوزای یی کړی',
'Your name for this search. Notifications will use this name.': 'ستاسو نوم د دی لټون لپاره. خبرتیا دا نوم به کاروی',
'Your request for Red Cross and Red Crescent Resource Management System (RMS) has been approved and you can now access the system at': 'ستاسو هیله د سره چلیپا او سره حلال منبع مدیریت سیستم RMS لپاره تایید شوی تاسی اوس لاسرسی لری سیستم ته په',
'Youth Leadership Development': 'د ځوانانو د رهبری پرمختګ',
'Youth as Agents of Behavioural Change Refresher': 'ځوانان د بدلون د استازيو په توګه تجددي کورسونه',
'Youth as Agents of Behavioural Change': 'ځوانان د بدلون د استازيو په توګه',
'Zone': 'ساحه',
'Zoom In': 'نژدی کول',
'Zoom In: click in the map or use the left mouse button and drag to create a rectangle': 'نژدی کول: په نقشه باندی کلیک کړی یا د چپ د موس تڼی کیږدی او کش یی کړی چی یوه مستطیل جوړه کړی',
'Zoom Levels': 'د نژدی کولو سطح',
'Zoom Out: click in the map or use the left mouse button and drag to create a rectangle': 'لری کول: په نقشه باندی کلیک کړی یا د چپ د موس تڼی کیږدی او کش یی کړی چی یوه مستطیل جوړه کړی',
'Zoom in closer to Edit OpenStreetMap layer': 'نژدی کړی چی تنظیم کړی خلاصه د سرک نقشه پاڼه',
'Zoom to Current Location': 'نژدی کړی د اوسنی ځای ته',
'Zoom to maximum map extent': 'نژدی کړی د لویترین د نقشی وسعت',
'Zoom': 'فاصله کمول',
'activate to sort column ascending': 'فعالیتونه چی برابروی ستونونه په سعودی شکل',
'activate to sort column descending': 'فعالیتونه چی برابروی ستونونه په نزولی شکل',
'an individual/team to do in 1-2 days': 'یو یوازی ګروپ/ ګروپ چی باید اجرا کړی ۱-۲ ورځو',
'and': 'او',
'anonymous user': 'یوه نامعلومه استعمالوونکی',
'average': 'اوسط',
'black': 'تور',
'blond': 'سپین مخ',
'blue': 'شین',
'brown': 'خړ',
'by %(person)s': 'په واسطه ٪( کس/کسان)',
'by': 'په واسطه',
'cannot be deleted.': 'له منځه تللی نه شی',
'caucasoid': 'کوکاساید',
'clear': 'پباک',
'click here': 'دلته کلیک کړی',
'created': 'جوړ شوی',
'curly': 'چیندار',
'current': 'اوس ',
'dark': 'تیاره',
'deceased': 'مړی',
'deleted': 'له منځه تللی ',
'diseased': 'مرض',
'displaced': 'بی ځایه',
'divorced': 'طلاق',
'enter a value': 'یوه بیه داخله کړی',
'enter date and time in range %(min)s %(max)s': 'تاریځ او وخت داخل کړی چی په ساحه کی دی %(min)s %(max)s',
'enter date and time on or after %(min)s': 'تاریخ او وخت داخل کړی چی په لور یا تیر شوی %(min)s',
'enter date and time on or before %(max)s': 'تاریخ او وخت داخل کړی چی په لور یا پاتی شوی %(max)s',
'enter date and time': 'تاریځ او وخت داخل کړی',
'expired': 'بی اعتباره',
'fat': 'چاق',
'female': 'ښځینه',
'fill in order: day(2) month(2) year(4)': 'ډک کړی په نظم: دوهمه ورځ دوهمه میاشت څلورمه کال',
'fill in order: hour(2) min(2) day(2) month(2) year(4)': 'ډک کړی په نظم: دوهمه ساعت دوهمه دقیقه دوهمه ورځ دوهمه میاشت څلورمه کال',
'fill in order: hour(2) min(2) month(2) day(2) year(4)': 'ډک کړی په نظم: دوهمه ساعت دوهمه دقیقه دوهمه ورځ دوهمه میاشت څلورمه کال',
'fill in order: month(2) day(2) year(4)': 'ډک کړی په نظم: دوهمه ورځ دوهمه میاشت څلورمه کال',
'forehead': 'تندی',
'form data': 'د معلومات جوړکړی',
'found': 'پیدا شوی',
'getting': 'په لاس راوړل',
'green': 'شین',
'grey': 'خاورین',
'hours': 'ساعتونه',
'ignore': 'په نظر کی نه نیول',
'injured': 'ټپی شوی',
'input': 'داخلول',
'insufficient number of pages provided': 'کمه شمیره د صفحه مهیا شوی',
'less': 'کمه شمیره د صفحه مهیا شوی',
'light': 'سپک',
'login': 'داخلیدل',
'long': 'اوږد',
'long>12cm': 'اوږد> 12 سانتی متر',
'male': 'نرینه',
'married': 'واده شوی',
'medium': 'میانه',
'medium<12cm': 'میانه > 12 سانتی متر',
'missing': 'له لاسه ورکړ شوی',
'mongoloid': 'مربوطه',
'more': 'ډیر',
'more...': 'ډیر...',
'negroid': 'نیګروید',
'new ACL': 'نوی ACL',
'no options available': 'هیڅ اختیار په لاسرسی کی نشته',
'no': 'نه',
'none': 'هیڅ یو',
'obsolete': 'له منځه تللی',
'on %(date)s': 'په ٪ (تاریخ) s',
'or': 'یا',
'other': 'نور',
'overdue': 'رسیدلی تاریخ',
'paid': 'پیسی ورکړل شوی',
'per': 'هر یو',
'records deleted': 'تاریخی له منځه تللی',
'red': 'سور',
'reload': 'له سره بار شوی',
'representation of the Polygon/Line.': 'ننداره د څو ضلعی/ خط.',
'retry': 'کوښښ کول',
'seconds': 'ثانیی',
'see comment': 'نظریه وګوری',
'see more': 'نور وګوری',
'separated from family': 'د کورنی څخه جدا شوی',
'separated': 'جدا شوی ',
'shaved': 'ارښیول شوی',
'short': 'لنډ',
'short<6cm': 'لنډ > 6 سانتی متر',
'sides': 'خوا ګانی',
'sign-up now': 'دننه کیدل اوس',
'single': 'یوازی',
'slim': 'ډنګر',
'source': 'Target',
'straight': 'مستقیم',
'suffered financial losses': 'د مالی د لاسه ورکړی تحمل',
'tall': 'اوږد',
'times (0 = unlimited)': 'وختونه (۰ = نامحدوده)',
'times and it is still not working. We give in. Sorry.': 'وختونه او تر اوسه کار نه ورکوی. مونږه ورکړی. بښنه',
'times': 'وختونه',
'to download a OCR Form.': 'چی د OCR فورم ډاونلود کړی',
'to reset your password': 'چی خپل پاسورد بیا ځای پر ځای کړو',
'tonsure': 'خپل سر ورښیل',
'total': 'مجموعه',
'unknown': 'نامعلومه',
'unlimited': 'نامحدوده',
'updated': 'ننی',
'using default': 'پخوانی استعمالول',
'wavy': 'موج لرونکی',
'white': 'سپین ',
'wider area, longer term, usually contain multiple Activities': 'پراخه ځای، اوږده دوره، معمولا مختلفه فعالیتونه په ځان کی لری',
'widowed': 'کنډه شوی',
'yes': 'هو',
}
| mit |
patrickcurl/ztruck | dj/lib/python2.7/site-packages/django/utils/deprecation.py | 82 | 2585 | import inspect
import warnings
class RemovedInDjango20Warning(PendingDeprecationWarning):
pass
class RemovedInDjango19Warning(DeprecationWarning):
pass
RemovedInNextVersionWarning = RemovedInDjango19Warning
class warn_about_renamed_method(object):
def __init__(self, class_name, old_method_name, new_method_name, deprecation_warning):
self.class_name = class_name
self.old_method_name = old_method_name
self.new_method_name = new_method_name
self.deprecation_warning = deprecation_warning
def __call__(self, f):
def wrapped(*args, **kwargs):
warnings.warn(
"`%s.%s` is deprecated, use `%s` instead." %
(self.class_name, self.old_method_name, self.new_method_name),
self.deprecation_warning, 2)
return f(*args, **kwargs)
return wrapped
class RenameMethodsBase(type):
"""
Handles the deprecation paths when renaming a method.
It does the following:
1) Define the new method if missing and complain about it.
2) Define the old method if missing.
3) Complain whenever an old method is called.
See #15363 for more details.
"""
renamed_methods = ()
def __new__(cls, name, bases, attrs):
new_class = super(RenameMethodsBase, cls).__new__(cls, name, bases, attrs)
for base in inspect.getmro(new_class):
class_name = base.__name__
for renamed_method in cls.renamed_methods:
old_method_name = renamed_method[0]
old_method = base.__dict__.get(old_method_name)
new_method_name = renamed_method[1]
new_method = base.__dict__.get(new_method_name)
deprecation_warning = renamed_method[2]
wrapper = warn_about_renamed_method(class_name, *renamed_method)
# Define the new method if missing and complain about it
if not new_method and old_method:
warnings.warn(
"`%s.%s` method should be renamed `%s`." %
(class_name, old_method_name, new_method_name),
deprecation_warning, 2)
setattr(base, new_method_name, old_method)
setattr(base, old_method_name, wrapper(old_method))
# Define the old method as a wrapped call to the new method.
if not old_method and new_method:
setattr(base, old_method_name, wrapper(new_method))
return new_class
| apache-2.0 |
bqq100/android_kernel_imx | scripts/rt-tester/rt-tester.py | 904 | 5366 | #!/usr/bin/env python
#
# rt-mutex tester
#
# (C) 2006 Thomas Gleixner <tglx@linutronix.de>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
import os
import sys
import getopt
import shutil
import string
# Globals
quiet = 0
test = 0
comments = 0
sysfsprefix = "/sys/devices/system/rttest/rttest"
statusfile = "/status"
commandfile = "/command"
# Command opcodes
cmd_opcodes = {
"schedother" : "1",
"schedfifo" : "2",
"lock" : "3",
"locknowait" : "4",
"lockint" : "5",
"lockintnowait" : "6",
"lockcont" : "7",
"unlock" : "8",
"lockbkl" : "9",
"unlockbkl" : "10",
"signal" : "11",
"resetevent" : "98",
"reset" : "99",
}
test_opcodes = {
"prioeq" : ["P" , "eq" , None],
"priolt" : ["P" , "lt" , None],
"priogt" : ["P" , "gt" , None],
"nprioeq" : ["N" , "eq" , None],
"npriolt" : ["N" , "lt" , None],
"npriogt" : ["N" , "gt" , None],
"unlocked" : ["M" , "eq" , 0],
"trylock" : ["M" , "eq" , 1],
"blocked" : ["M" , "eq" , 2],
"blockedwake" : ["M" , "eq" , 3],
"locked" : ["M" , "eq" , 4],
"opcodeeq" : ["O" , "eq" , None],
"opcodelt" : ["O" , "lt" , None],
"opcodegt" : ["O" , "gt" , None],
"eventeq" : ["E" , "eq" , None],
"eventlt" : ["E" , "lt" , None],
"eventgt" : ["E" , "gt" , None],
}
# Print usage information
def usage():
print "rt-tester.py <-c -h -q -t> <testfile>"
print " -c display comments after first command"
print " -h help"
print " -q quiet mode"
print " -t test mode (syntax check)"
print " testfile: read test specification from testfile"
print " otherwise from stdin"
return
# Print progress when not in quiet mode
def progress(str):
if not quiet:
print str
# Analyse a status value
def analyse(val, top, arg):
intval = int(val)
if top[0] == "M":
intval = intval / (10 ** int(arg))
intval = intval % 10
argval = top[2]
elif top[0] == "O":
argval = int(cmd_opcodes.get(arg, arg))
else:
argval = int(arg)
# progress("%d %s %d" %(intval, top[1], argval))
if top[1] == "eq" and intval == argval:
return 1
if top[1] == "lt" and intval < argval:
return 1
if top[1] == "gt" and intval > argval:
return 1
return 0
# Parse the commandline
try:
(options, arguments) = getopt.getopt(sys.argv[1:],'chqt')
except getopt.GetoptError, ex:
usage()
sys.exit(1)
# Parse commandline options
for option, value in options:
if option == "-c":
comments = 1
elif option == "-q":
quiet = 1
elif option == "-t":
test = 1
elif option == '-h':
usage()
sys.exit(0)
# Select the input source
if arguments:
try:
fd = open(arguments[0])
except Exception,ex:
sys.stderr.write("File not found %s\n" %(arguments[0]))
sys.exit(1)
else:
fd = sys.stdin
linenr = 0
# Read the test patterns
while 1:
linenr = linenr + 1
line = fd.readline()
if not len(line):
break
line = line.strip()
parts = line.split(":")
if not parts or len(parts) < 1:
continue
if len(parts[0]) == 0:
continue
if parts[0].startswith("#"):
if comments > 1:
progress(line)
continue
if comments == 1:
comments = 2
progress(line)
cmd = parts[0].strip().lower()
opc = parts[1].strip().lower()
tid = parts[2].strip()
dat = parts[3].strip()
try:
# Test or wait for a status value
if cmd == "t" or cmd == "w":
testop = test_opcodes[opc]
fname = "%s%s%s" %(sysfsprefix, tid, statusfile)
if test:
print fname
continue
while 1:
query = 1
fsta = open(fname, 'r')
status = fsta.readline().strip()
fsta.close()
stat = status.split(",")
for s in stat:
s = s.strip()
if s.startswith(testop[0]):
# Seperate status value
val = s[2:].strip()
query = analyse(val, testop, dat)
break
if query or cmd == "t":
break
progress(" " + status)
if not query:
sys.stderr.write("Test failed in line %d\n" %(linenr))
sys.exit(1)
# Issue a command to the tester
elif cmd == "c":
cmdnr = cmd_opcodes[opc]
# Build command string and sys filename
cmdstr = "%s:%s" %(cmdnr, dat)
fname = "%s%s%s" %(sysfsprefix, tid, commandfile)
if test:
print fname
continue
fcmd = open(fname, 'w')
fcmd.write(cmdstr)
fcmd.close()
except Exception,ex:
sys.stderr.write(str(ex))
sys.stderr.write("\nSyntax error in line %d\n" %(linenr))
if not test:
fd.close()
sys.exit(1)
# Normal exit pass
print "Pass"
sys.exit(0)
| gpl-2.0 |
WillCh/286proj | dataMover/kafka/system_test/mirror_maker_testsuite/mirror_maker_test.py | 70 | 17661 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#!/usr/bin/env python
# ===================================
# mirror_maker_test.py
# ===================================
import inspect
import logging
import os
import signal
import subprocess
import sys
import time
import traceback
from system_test_env import SystemTestEnv
sys.path.append(SystemTestEnv.SYSTEM_TEST_UTIL_DIR)
from setup_utils import SetupUtils
from replication_utils import ReplicationUtils
import system_test_utils
from testcase_env import TestcaseEnv
# product specific: Kafka
import kafka_system_test_utils
import metrics
class MirrorMakerTest(ReplicationUtils, SetupUtils):
testModuleAbsPathName = os.path.realpath(__file__)
testSuiteAbsPathName = os.path.abspath(os.path.dirname(testModuleAbsPathName))
def __init__(self, systemTestEnv):
# SystemTestEnv - provides cluster level environment settings
# such as entity_id, hostname, kafka_home, java_home which
# are available in a list of dictionary named
# "clusterEntityConfigDictList"
self.systemTestEnv = systemTestEnv
super(MirrorMakerTest, self).__init__(self)
# dict to pass user-defined attributes to logger argument: "extra"
d = {'name_of_class': self.__class__.__name__}
def signal_handler(self, signal, frame):
self.log_message("Interrupt detected - User pressed Ctrl+c")
# perform the necessary cleanup here when user presses Ctrl+c and it may be product specific
self.log_message("stopping all entities - please wait ...")
kafka_system_test_utils.stop_all_remote_running_processes(self.systemTestEnv, self.testcaseEnv)
sys.exit(1)
def runTest(self):
# ======================================================================
# get all testcase directories under this testsuite
# ======================================================================
testCasePathNameList = system_test_utils.get_dir_paths_with_prefix(
self.testSuiteAbsPathName, SystemTestEnv.SYSTEM_TEST_CASE_PREFIX)
testCasePathNameList.sort()
replicationUtils = ReplicationUtils(self)
# =============================================================
# launch each testcase one by one: testcase_1, testcase_2, ...
# =============================================================
for testCasePathName in testCasePathNameList:
skipThisTestCase = False
try:
# ======================================================================
# A new instance of TestcaseEnv to keep track of this testcase's env vars
# and initialize some env vars as testCasePathName is available now
# ======================================================================
self.testcaseEnv = TestcaseEnv(self.systemTestEnv, self)
self.testcaseEnv.testSuiteBaseDir = self.testSuiteAbsPathName
self.testcaseEnv.initWithKnownTestCasePathName(testCasePathName)
self.testcaseEnv.testcaseArgumentsDict = self.testcaseEnv.testcaseNonEntityDataDict["testcase_args"]
# ======================================================================
# SKIP if this case is IN testcase_to_skip.json or NOT IN testcase_to_run.json
# ======================================================================
testcaseDirName = self.testcaseEnv.testcaseResultsDict["_test_case_name"]
if self.systemTestEnv.printTestDescriptionsOnly:
self.testcaseEnv.printTestCaseDescription(testcaseDirName)
continue
elif self.systemTestEnv.isTestCaseToSkip(self.__class__.__name__, testcaseDirName):
self.log_message("Skipping : " + testcaseDirName)
skipThisTestCase = True
continue
else:
self.testcaseEnv.printTestCaseDescription(testcaseDirName)
system_test_utils.setup_remote_hosts_with_testcase_level_cluster_config(self.systemTestEnv, testCasePathName)
# ============================================================================== #
# ============================================================================== #
# Product Specific Testing Code Starts Here: #
# ============================================================================== #
# ============================================================================== #
# initialize self.testcaseEnv with user-defined environment variables (product specific)
self.testcaseEnv.userDefinedEnvVarDict["zkConnectStr"] = ""
self.testcaseEnv.userDefinedEnvVarDict["stopBackgroundProducer"] = False
self.testcaseEnv.userDefinedEnvVarDict["backgroundProducerStopped"] = False
# initialize signal handler
signal.signal(signal.SIGINT, self.signal_handler)
# TestcaseEnv.testcaseConfigsList initialized by reading testcase properties file:
# system_test/<suite_name>_testsuite/testcase_<n>/testcase_<n>_properties.json
self.testcaseEnv.testcaseConfigsList = system_test_utils.get_json_list_data(
self.testcaseEnv.testcasePropJsonPathName)
# clean up data directories specified in zookeeper.properties and kafka_server_<n>.properties
kafka_system_test_utils.cleanup_data_at_remote_hosts(self.systemTestEnv, self.testcaseEnv)
# create "LOCAL" log directories for metrics, dashboards for each entity under this testcase
# for collecting logs from remote machines
kafka_system_test_utils.generate_testcase_log_dirs(self.systemTestEnv, self.testcaseEnv)
# TestcaseEnv - initialize producer & consumer config / log file pathnames
kafka_system_test_utils.init_entity_props(self.systemTestEnv, self.testcaseEnv)
# generate remote hosts log/config dirs if not exist
kafka_system_test_utils.generate_testcase_log_dirs_in_remote_hosts(self.systemTestEnv, self.testcaseEnv)
# generate properties files for zookeeper, kafka, producer, consumer and mirror-maker:
# 1. copy system_test/<suite_name>_testsuite/config/*.properties to
# system_test/<suite_name>_testsuite/testcase_<n>/config/
# 2. update all properties files in system_test/<suite_name>_testsuite/testcase_<n>/config
# by overriding the settings specified in:
# system_test/<suite_name>_testsuite/testcase_<n>/testcase_<n>_properties.json
kafka_system_test_utils.generate_overriden_props_files(self.testSuiteAbsPathName,
self.testcaseEnv, self.systemTestEnv)
# =============================================
# preparing all entities to start the test
# =============================================
self.log_message("starting zookeepers")
kafka_system_test_utils.start_zookeepers(self.systemTestEnv, self.testcaseEnv)
self.anonLogger.info("sleeping for 2s")
time.sleep(2)
self.log_message("starting brokers")
kafka_system_test_utils.start_brokers(self.systemTestEnv, self.testcaseEnv)
self.anonLogger.info("sleeping for 5s")
time.sleep(5)
self.log_message("creating topics")
kafka_system_test_utils.create_topic_for_producer_performance(self.systemTestEnv, self.testcaseEnv)
self.anonLogger.info("sleeping for 5s")
time.sleep(5)
self.log_message("starting mirror makers")
kafka_system_test_utils.start_mirror_makers(self.systemTestEnv, self.testcaseEnv)
self.anonLogger.info("sleeping for 10s")
time.sleep(10)
# =============================================
# starting producer
# =============================================
self.log_message("starting producer in the background")
kafka_system_test_utils.start_producer_performance(self.systemTestEnv, self.testcaseEnv, False)
msgProducingFreeTimeSec = self.testcaseEnv.testcaseArgumentsDict["message_producing_free_time_sec"]
self.anonLogger.info("sleeping for " + msgProducingFreeTimeSec + " sec to produce some messages")
time.sleep(int(msgProducingFreeTimeSec))
# =============================================
# A while-loop to bounce mirror maker as specified
# by "num_iterations" in testcase_n_properties.json
# =============================================
i = 1
numIterations = int(self.testcaseEnv.testcaseArgumentsDict["num_iteration"])
bouncedEntityDownTimeSec = 15
try:
bouncedEntityDownTimeSec = int(self.testcaseEnv.testcaseArgumentsDict["bounced_entity_downtime_sec"])
except:
pass
while i <= numIterations:
self.log_message("Iteration " + str(i) + " of " + str(numIterations))
# =============================================
# Bounce Mirror Maker if specified in testcase config
# =============================================
bounceMirrorMaker = self.testcaseEnv.testcaseArgumentsDict["bounce_mirror_maker"]
self.log_message("bounce_mirror_maker flag : " + bounceMirrorMaker)
if (bounceMirrorMaker.lower() == "true"):
clusterConfigList = self.systemTestEnv.clusterEntityConfigDictList
mirrorMakerEntityIdList = system_test_utils.get_data_from_list_of_dicts(
clusterConfigList, "role", "mirror_maker", "entity_id")
stoppedMirrorMakerEntityId = mirrorMakerEntityIdList[0]
mirrorMakerPPid = self.testcaseEnv.entityMirrorMakerParentPidDict[stoppedMirrorMakerEntityId]
self.log_message("stopping mirror maker : " + mirrorMakerPPid)
kafka_system_test_utils.stop_remote_entity(self.systemTestEnv, stoppedMirrorMakerEntityId, mirrorMakerPPid)
self.anonLogger.info("sleeping for " + str(bouncedEntityDownTimeSec) + " sec")
time.sleep(bouncedEntityDownTimeSec)
# starting previously terminated broker
self.log_message("starting the previously terminated mirror maker")
kafka_system_test_utils.start_mirror_makers(self.systemTestEnv, self.testcaseEnv, stoppedMirrorMakerEntityId)
self.anonLogger.info("sleeping for 15s")
time.sleep(15)
i += 1
# while loop
# =============================================
# tell producer to stop
# =============================================
self.testcaseEnv.lock.acquire()
self.testcaseEnv.userDefinedEnvVarDict["stopBackgroundProducer"] = True
time.sleep(1)
self.testcaseEnv.lock.release()
time.sleep(1)
# =============================================
# wait for producer thread's update of
# "backgroundProducerStopped" to be "True"
# =============================================
while 1:
self.testcaseEnv.lock.acquire()
self.logger.info("status of backgroundProducerStopped : [" + \
str(self.testcaseEnv.userDefinedEnvVarDict["backgroundProducerStopped"]) + "]", extra=self.d)
if self.testcaseEnv.userDefinedEnvVarDict["backgroundProducerStopped"]:
time.sleep(1)
self.testcaseEnv.lock.release()
self.logger.info("all producer threads completed", extra=self.d)
break
time.sleep(1)
self.testcaseEnv.lock.release()
time.sleep(2)
self.anonLogger.info("sleeping for 15s")
time.sleep(15)
self.anonLogger.info("terminate Mirror Maker")
cmdStr = "ps auxw | grep Mirror | grep -v grep | tr -s ' ' | cut -f2 -d ' ' | xargs kill -15"
subproc = system_test_utils.sys_call_return_subproc(cmdStr)
for line in subproc.stdout.readlines():
line = line.rstrip('\n')
self.anonLogger.info("#### ["+line+"]")
self.anonLogger.info("sleeping for 15s")
time.sleep(15)
# =============================================
# starting consumer
# =============================================
self.log_message("starting consumer in the background")
kafka_system_test_utils.start_console_consumer(self.systemTestEnv, self.testcaseEnv)
self.anonLogger.info("sleeping for 10s")
time.sleep(10)
# =============================================
# this testcase is completed - stop all entities
# =============================================
self.log_message("stopping all entities")
for entityId, parentPid in self.testcaseEnv.entityBrokerParentPidDict.items():
kafka_system_test_utils.stop_remote_entity(self.systemTestEnv, entityId, parentPid)
for entityId, parentPid in self.testcaseEnv.entityZkParentPidDict.items():
kafka_system_test_utils.stop_remote_entity(self.systemTestEnv, entityId, parentPid)
# make sure all entities are stopped
kafka_system_test_utils.ps_grep_terminate_running_entity(self.systemTestEnv)
# =============================================
# collect logs from remote hosts
# =============================================
kafka_system_test_utils.collect_logs_from_remote_hosts(self.systemTestEnv, self.testcaseEnv)
# =============================================
# validate the data matched and checksum
# =============================================
self.log_message("validating data matched")
kafka_system_test_utils.validate_data_matched(self.systemTestEnv, self.testcaseEnv, replicationUtils)
kafka_system_test_utils.validate_broker_log_segment_checksum(self.systemTestEnv, self.testcaseEnv, "source")
kafka_system_test_utils.validate_broker_log_segment_checksum(self.systemTestEnv, self.testcaseEnv, "target")
# =============================================
# draw graphs
# =============================================
metrics.draw_all_graphs(self.systemTestEnv.METRICS_PATHNAME,
self.testcaseEnv,
self.systemTestEnv.clusterEntityConfigDictList)
# build dashboard, one for each role
metrics.build_all_dashboards(self.systemTestEnv.METRICS_PATHNAME,
self.testcaseEnv.testCaseDashboardsDir,
self.systemTestEnv.clusterEntityConfigDictList)
except Exception as e:
self.log_message("Exception while running test {0}".format(e))
traceback.print_exc()
self.testcaseEnv.validationStatusDict["Test completed"] = "FAILED"
finally:
if not skipThisTestCase and not self.systemTestEnv.printTestDescriptionsOnly:
self.log_message("stopping all entities - please wait ...")
kafka_system_test_utils.stop_all_remote_running_processes(self.systemTestEnv, self.testcaseEnv)
| bsd-2-clause |
heyavery/lopenr | venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/sjisprober.py | 1777 | 3764 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import SJISDistributionAnalysis
from .jpcntx import SJISContextAnalysis
from .mbcssm import SJISSMModel
from . import constants
class SJISProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(SJISSMModel)
self._mDistributionAnalyzer = SJISDistributionAnalysis()
self._mContextAnalyzer = SJISContextAnalysis()
self.reset()
def reset(self):
MultiByteCharSetProber.reset(self)
self._mContextAnalyzer.reset()
def get_charset_name(self):
return self._mContextAnalyzer.get_charset_name()
def feed(self, aBuf):
aLen = len(aBuf)
for i in range(0, aLen):
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == constants.eError:
if constants._debug:
sys.stderr.write(self.get_charset_name()
+ ' prober hit error at byte ' + str(i)
+ '\n')
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
self._mLastChar[1] = aBuf[0]
self._mContextAnalyzer.feed(self._mLastChar[2 - charLen:],
charLen)
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mContextAnalyzer.feed(aBuf[i + 1 - charLen:i + 3
- charLen], charLen)
self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1],
charLen)
self._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if (self._mContextAnalyzer.got_enough_data() and
(self.get_confidence() > constants.SHORTCUT_THRESHOLD)):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
contxtCf = self._mContextAnalyzer.get_confidence()
distribCf = self._mDistributionAnalyzer.get_confidence()
return max(contxtCf, distribCf)
| mit |
JuliaPackageMirrors/MarketData.jl | docs/source/conf.py | 1 | 8411 | # -*- coding: utf-8 -*-
#
# marketdatajl documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 22 18:54:01 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
import juliadoc
import sphinx_rtd_theme
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['juliadoc.julia',
'juliadoc.jlhelp']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'marketdatajl'
copyright = u'2015, Multiple'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.2'
# The full version, including alpha/beta/rc tags.
release = '0.2.2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
primary_domain = 'jl'
highlight_language = 'julia'
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'julia'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [juliadoc.get_theme_dir(),
sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = juliadoc.default_sidebars()
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'marketdatajldoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'marketdatajl.tex', u'marketdatajl Documentation',
u'Multiple', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'marketdatajl', u'marketdatajl Documentation',
[u'Multiple'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'marketdatajl', u'marketdatajl Documentation',
u'Multiple', 'marketdatajl', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.