repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
|---|---|---|---|---|---|
bcl/pykickstart
|
pykickstart/handlers/rhel3.py
|
3
|
3887
|
#
# Chris Lumens <clumens@redhat.com>
#
# Copyright 2007 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use, modify,
# copy, or redistribute it subject to the terms and conditions of the GNU
# General Public License v.2. This program is distributed in the hope that it
# will be useful, but WITHOUT ANY WARRANTY expressed or implied, including the
# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. Any Red Hat
# trademarks that are incorporated in the source code or documentation are not
# subject to the GNU General Public License and may only be used or replicated
# with the express permission of Red Hat, Inc.
#
__all__ = ["RHEL3Handler"]
from pykickstart import commands
from pykickstart.base import BaseHandler
from pykickstart.version import RHEL3
class RHEL3Handler(BaseHandler):
version = RHEL3
commandMap = {
"auth": commands.authconfig.FC3_Authconfig,
"authconfig": commands.authconfig.FC3_Authconfig,
"autopart": commands.autopart.FC3_AutoPart,
"autostep": commands.autostep.FC3_AutoStep,
"bootloader": commands.bootloader.FC3_Bootloader,
"cdrom": commands.cdrom.FC3_Cdrom,
"clearpart": commands.clearpart.FC3_ClearPart,
"cmdline": commands.displaymode.FC3_DisplayMode,
"device": commands.device.FC3_Device,
"deviceprobe": commands.deviceprobe.FC3_DeviceProbe,
"driverdisk": commands.driverdisk.FC3_DriverDisk,
"firewall": commands.firewall.FC3_Firewall,
"firstboot": commands.firstboot.FC3_Firstboot,
"graphical": commands.displaymode.FC3_DisplayMode,
"halt": commands.reboot.FC3_Reboot,
"harddrive": commands.harddrive.FC3_HardDrive,
"ignoredisk": commands.ignoredisk.FC3_IgnoreDisk,
"install": commands.upgrade.FC3_Upgrade,
"interactive": commands.interactive.FC3_Interactive,
"keyboard": commands.keyboard.FC3_Keyboard,
"lang": commands.lang.FC3_Lang,
"langsupport": commands.langsupport.FC3_LangSupport,
"lilo": commands.bootloader.FC3_Lilo,
"lilocheck": commands.lilocheck.FC3_LiloCheck,
"logvol": commands.logvol.FC3_LogVol,
"method": commands.method.FC3_Method,
"monitor": commands.monitor.FC3_Monitor,
"mouse": commands.mouse.RHEL3_Mouse,
"network": commands.network.FC3_Network,
"nfs": commands.nfs.FC3_NFS,
"part": commands.partition.FC3_Partition,
"partition": commands.partition.FC3_Partition,
"poweroff": commands.reboot.FC3_Reboot,
"raid": commands.raid.FC3_Raid,
"reboot": commands.reboot.FC3_Reboot,
"rootpw": commands.rootpw.FC3_RootPw,
"shutdown": commands.reboot.FC3_Reboot,
"skipx": commands.skipx.FC3_SkipX,
"text": commands.displaymode.FC3_DisplayMode,
"timezone": commands.timezone.FC3_Timezone,
"upgrade": commands.upgrade.FC3_Upgrade,
"url": commands.url.FC3_Url,
"vnc": commands.vnc.FC3_Vnc,
"volgroup": commands.volgroup.FC3_VolGroup,
"xconfig": commands.xconfig.FC3_XConfig,
"zerombr": commands.zerombr.FC3_ZeroMbr,
}
dataMap = {
"DriverDiskData": commands.driverdisk.FC3_DriverDiskData,
"LogVolData": commands.logvol.FC3_LogVolData,
"NetworkData": commands.network.RHEL4_NetworkData,
"PartData": commands.partition.FC3_PartData,
"RaidData": commands.raid.FC3_RaidData,
"VolGroupData": commands.volgroup.FC3_VolGroupData,
"ZFCPData": commands.zfcp.FC3_ZFCPData,
}
|
gpl-2.0
|
thomashaw/SecGen
|
modules/utilities/unix/ctf/metactf/files/repository/src_angr/solutions/17_angr_arbitrary_jump/scaffold17.py
|
3
|
5173
|
import angr
import claripy
import sys
def main(argv):
path_to_binary = argv[1]
project = angr.Project(path_to_binary)
initial_state = ???
# An under-constrained (unconstrained) state occurs when there are too many
# possible branches from a single instruction. This occurs, among other ways,
# when the instruction pointer (on x86, eip) is completely symbolic, meaning
# that user input can control the address of code the computer executes.
# For example, imagine the following pseudo assembly:
#
# mov user_input, eax
# jmp eax
#
# The value of what the user entered dictates the next instruction. This
# is an unconstrained state. It wouldn't usually make sense for the execution
# engine to continue. (Where should the program jump to if eax could be
# anything?) Normally, when Angr encounters an unconstrained state, it throws
# it out. In our case, we want to exploit the unconstrained state to jump to
# a location of our choosing. We will get to how to disable Angr's default
# behavior later. For now, test if a state is vulnerable by checking if we
# can set the instruction pointer to the address of print_good in the binary.
# (!)
def check_vulnerable(state):
# Reimplement me!
return False
# The save_unconstrained=True parameter specifies to Angr to not throw out
# unconstrained states. Instead, it will move them to the list called
# 'simulation.unconstrained'.
simulation = project.factory.simgr(initial_state, save_unconstrained=True)
# Explore will not work for us, since the method specified with the 'find'
# parameter will not be called on an unconstrained state. Instead, we want to
# explore the binary ourselves.
# To get started, construct an exit condition to know when we've found a
# solution. We will later be able to move states from the unconstrained list
# to the simulation.found list. Alternatively, you can create a boolean value
# that serves the same purpose.
def has_found_solution():
return len(simulation.found) > 0
# Check if there are still unconstrained states left to check. Once we
# determine a given unconstrained state is not exploitable, we can throw it
# out. Use the simulation.unconstrained list.
# (!)
def has_unconstrained_to_check():
# Reimplement me!
pass
# The list simulation.active is a list of all states that can be explored
# further.
# (!)
def has_active():
# Reimplement me!
pass
while (has_active() or has_unconstrained_to_check()) and (not has_found_solution()):
# Iterate through all unconstrained states and check them.
# (!)
for unconstrained_state in ???:
# Check if the unconstrained state is exploitable.
# (!)
if ???:
# Found an exploit, exit the while loop and keep unconstrained_state as
# the solution. The way the loops is currently set up, you should move
# the exploitable unconstrained state to the 'found' stash.
# A 'stash' should be a string that corresponds to a list that stores
# all the states that the state group keeps. Values include:
# 'active' = states that can be stepped
# 'deadended' = states that have exited the program
# 'errored' = states that encountered an error with Angr
# 'unconstrained' = states that are unconstrained
# 'found' = solutions
# anything else = whatever you want, perhaps you want a 'not_needed',
# you can call it whatever you want
# Moves anything in the stash 'from_stash' to the 'to_stash' if the
# function should_move evaluates to true.
# Reimplement this entire block of code.
# (!)
# def should_move(state):
# # Reimplement me if you decide to use me
# return False
# simulation.move(from_stash, to_stash, filter_func=should_move)
# # For example, the following moves everything in 'active' to
# # 'not_needed' except if the state is in keep_states
# keep_states = [ ... ]
# def should_move(state):
# return state in keep_states
# simulation.move('active', 'not_needed', filter_func=should_move)
pass
else: # unconstrained state is not exploitable
# Move the unconstrained_state that you tested that doesn't work to a
# different stash, perhaps 'not_needed'.
# Reimplement me.
# (!)
pass
# Advance the simulation.
simulation.step()
if simulation.found:
solution_state = simulation.found[0]
# Ensure that every printed byte is within the acceptable ASCII range (A..Z)
for byte in solution_state.posix.files[sys.stdin.fileno()].all_bytes().chop(bits=8):
solution_state.add_constraints(byte >= ???, byte <= ???)
# Constrain the instruction pointer to target the print_good function and
# then solve for the user input (recall that this is
# 'solution_state.posix.dumps(sys.stdin.fileno())')
# (!)
...
solution = ???
print solution
else:
raise Exception('Could not find the solution')
if __name__ == '__main__':
main(sys.argv)
|
gpl-3.0
|
Phosphenius/battle-snakes
|
src/skinpacker.py
|
1
|
2068
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
Script for packing and unpacking skin textures
"""
import argparse
from pygame import image, Surface, Rect, transform
from pygame.locals import SRCALPHA
from utils import add_vecs
HEAD = Rect(0, 0, 10, 10)
STRAIGHT1 = Rect(20, 20, 10, 10)
STRAIGHT2 = Rect(30, 20, 10, 10)
TAIL = Rect(20, 0, 10, 10)
TURN = Rect(0, 20, 10, 10)
ROT_OFFSET = {0: (0, 0), -90: (10, 0), -180: (10, 10), -270: (0, 10)}
def main():
parser = argparse.ArgumentParser(description='Pack skin textures')
parser.add_argument('src')
parser.add_argument('dst')
parser.add_argument('-u', dest='unpack', const=True,
type=bool, default=False, nargs="?")
args = parser.parse_args()
src_img = image.load(args.src)
if args.unpack:
dst_img = Surface((10, 50), flags=SRCALPHA)
dst_img.blit(src_img, (0, 00), HEAD)
dst_img.blit(src_img, (0, 10), STRAIGHT1)
dst_img.blit(src_img, (0, 20), STRAIGHT2)
dst_img.blit(src_img, (0, 30), TAIL)
dst_img.blit(src_img, (0, 40), TURN)
else:
dst_img = Surface((40, 40), flags=SRCALPHA)
single_tile = Surface((10, 10), flags=SRCALPHA)
seq = ((HEAD, (0, 0)), (TAIL, (0, 30)), (TURN, (0, 40)))
for tile, tile_pos in seq:
single_tile.fill((0, 0, 0, 0))
single_tile.blit(src_img, (0, 0), Rect(tile_pos, (10, 10)))
for rot, offset in list(ROT_OFFSET.items()):
pos = add_vecs(tile.topleft, offset)
dst_img.blit(transform.rotate(single_tile, rot), pos)
for tile, tile_pos in ((STRAIGHT1, (0, 10)),
(STRAIGHT2, (0, 20))):
single_tile.fill((0, 0, 0, 0))
single_tile.blit(src_img, (0, 0), Rect(tile_pos, (10, 10)))
dst_img.blit(single_tile, tile)
pos = add_vecs(tile.topleft, (0, 10))
dst_img.blit(transform.rotate(single_tile, -90), pos)
image.save(dst_img, args.dst)
if __name__ == '__main__':
main()
|
mit
|
NMGRL/pychron
|
pychron/pipeline/plot/overlays/mean_indicator_overlay.py
|
2
|
8405
|
# ===============================================================================
# Copyright 2013 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from chaco.abstract_overlay import AbstractOverlay
from chaco.plot_label import PlotLabel
from chaco.scatterplot import render_markers
from traits.api import Color, Instance, Str, Float, Int, Any
# ============= standard library imports ========================
# ============= local library imports ==========================
# from pychron.pipeline.plot import LabelMoveTool
from pychron.pipeline.plot.point_move_tool import LabelMoveTool
class MovableMixin:
current_screen_point = None
altered_screen_point = None
delta_screen_point = None
ox = None
oy = None
offset_x = 0
offset_y = 0
def update_offset(self, dx, dy):
if self.ox is None:
self.ox, self.oy = self.x, self.y
self.offset_x += dx
self.offset_y += dy
def get_current_point(self):
data_pt = self.altered_screen_point
if data_pt is None:
data_pt = self.current_screen_point
if data_pt is None:
data_pt = (self.x, self.y)
return data_pt
try:
class XYPlotLabel(PlotLabel, MovableMixin):
sx = Float
sy = Float
def do_layout(self):
""" Tells this component to do layout.
Overrides PlotComponent.
"""
if self.component is None:
self._layout_as_component()
return
def hittest(self, pt):
x, y = pt
w, h = self.get_preferred_size()
return abs(x - self.x) < w and abs(y - self.y) < h
def _sx_changed(self):
lw = self.get_preferred_size()[0] / 2.0
x = self.sx + lw + 5
x2 = self.component.x2
if x + lw > x2:
x = x2 - lw - 3
self.x = x
# self.altered_screen_point=(self.x, self.y)
# print self.altered_screen_point
self.current_screen_point = (self.x, self.y)
def _sy_changed(self):
self.y = self.sy + 10
self.current_screen_point = (self.x, self.y)
def set_altered(self):
self.altered_screen_point = (self.x, self.y)
except TypeError:
# documentation auto doc hack
class XYPlotLabel:
pass
def render_vertical_marker(gc, points, color, line_width, outline_color):
with gc:
gc.set_line_width(line_width)
gc.set_stroke_color(outline_color)
gc.set_fill_color(color)
x, y = points[0]
d = 5
gc.begin_path()
gc.move_to(x, y - d)
gc.line_to(x, y + d)
gc.draw_path()
def render_error_bar(gc, x1, x2, y, color, line_width=1, end_caps=True):
with gc:
gc.set_line_width(line_width)
gc.set_stroke_color(color)
gc.begin_path()
gc.set_stroke_color(color)
gc.move_to(x1, y)
gc.line_to(x2, y)
gc.draw_path()
if end_caps:
if not isinstance(end_caps, (float, int)):
end_caps = 3
render_end_cap(gc, x1, y, length=end_caps)
render_end_cap(gc, x2, y, length=end_caps)
def render_end_cap(gc, x, y, length=3):
with gc:
l = length
# gc.translate_ctm(x, y)
gc.begin_path()
gc.move_to(x, y - l)
gc.line_to(x, y + l)
# print x, y, y - l, y + l
gc.draw_path()
try:
class MeanIndicatorOverlay(AbstractOverlay, MovableMixin):
color = Color
label = Instance(PlotLabel)
text = Str
# font = KivaFont('modern 15')
x = Float
error = Float
nsigma = Int
marker = Str('vertical')
end_cap_length = Int(4)
label_tool = Any
def clear(self):
self.altered_screen_point = None
def hittest(self, pt, tol=5):
x, y = pt
if self.get_current_point():
gx, gy = self.get_current_point()
# print abs(gx-x)<tol , abs(gy-y)<tol
return abs(gx - x) < tol and abs(gy - y) < tol
# print x,y, gx, gy
def _text_changed(self):
label = self.label
if label is None:
label = XYPlotLabel(component=self.component,
font=self.font,
text=self.text,
color=self.color,
id='{}_label'.format(self.id))
self.label = label
self.overlays.append(label)
tool = LabelMoveTool(component=label)
self.tools.append(tool)
self.label_tool = tool
else:
label.text = self.text
# print self.label
def _color_changed(self):
color = self.color
# if isinstance(color, str):
# color=color_table[color]
self._color = [x / 255. for x in (color.red(), color.green(), color.blue(), color.alpha())]
# self._color=color
def overlay(self, other_component, gc, view_bounds=None, mode="normal"):
with gc:
oc = other_component
gc.clip_to_rect(oc.x, oc.y, oc.x2, oc.y2)
points = self._gather_data()
marker = self.marker
color = self._color
line_width = 1
outline_color = self._color
if marker != 'vertical':
marker_size = 3
render_markers(gc, points, marker, marker_size,
color, line_width, outline_color)
else:
render_vertical_marker(gc, points,
color, line_width, outline_color)
x, y = self.get_current_point()
e = self.error * max(1, self.nsigma)
p1, p2 = self.component.map_screen([(self.x - e, 0), (self.x + e, 0)])
render_error_bar(gc, p1[0], p2[0], y,
self._color,
end_caps=self.end_cap_length)
for o in self.overlays:
o.overlay(other_component, gc, view_bounds=view_bounds, mode=mode)
def _gather_data(self):
comp = self.component
x = comp.map_screen([(self.x, 0)])[0, 0]
if self.altered_screen_point is None:
if self.label:
if not self.label.altered_screen_point:
self.label.sx = x
self.label.sy = self.y
self.current_screen_point = (x, self.y)
return [(x, self.y)]
else:
if self.label:
if not self.label.altered_screen_point:
self.label.sx, self.label.sy = self.altered_screen_point
return [(x, self.altered_screen_point[1])]
def set_x(self, x):
self.x = x
comp = self.component
x = comp.map_screen([(self.x, 0)])[0, 0]
if self.label:
if not self.label.altered_screen_point:
self.label.sx = x
self.label.sy = self.y
if self.altered_screen_point:
self.altered_screen_point = (x, self.altered_screen_point[1])
else:
self.current_screen_point = (x, self.y)
except TypeError:
# documentation auto doc hack
class MeanIndicatorOverlay:
pass
# ============= EOF =============================================
|
apache-2.0
|
mzdaniel/oh-mainline
|
vendor/packages/django-model-utils/model_utils/models.py
|
2
|
4347
|
import warnings
from datetime import datetime
from django.db import models
from django.contrib.contenttypes.models import ContentType
from django.utils.translation import ugettext_lazy as _
from django.db.models.fields import FieldDoesNotExist
from django.core.exceptions import ImproperlyConfigured
from model_utils.managers import manager_from, InheritanceCastMixin, \
QueryManager
from model_utils.fields import AutoCreatedField, AutoLastModifiedField, \
StatusField, MonitorField
class InheritanceCastModel(models.Model):
"""
An abstract base class that provides a ``real_type`` FK to ContentType.
For use in trees of inherited models, to be able to downcast
parent instances to their child types.
Pending deprecation; use InheritanceManager instead.
"""
real_type = models.ForeignKey(ContentType, editable=False, null=True)
objects = manager_from(InheritanceCastMixin)
def __init__(self, *args, **kwargs):
warnings.warn(
"InheritanceCastModel is pending deprecation. "
"Use InheritanceManager instead.",
PendingDeprecationWarning,
stacklevel=2)
super(InheritanceCastModel, self).__init__(*args, **kwargs)
def save(self, *args, **kwargs):
if not self.id:
self.real_type = self._get_real_type()
super(InheritanceCastModel, self).save(*args, **kwargs)
def _get_real_type(self):
return ContentType.objects.get_for_model(type(self))
def cast(self):
return self.real_type.get_object_for_this_type(pk=self.pk)
class Meta:
abstract = True
class TimeStampedModel(models.Model):
"""
An abstract base class model that provides self-updating
``created`` and ``modified`` fields.
"""
created = AutoCreatedField(_('created'))
modified = AutoLastModifiedField(_('modified'))
class Meta:
abstract = True
class TimeFramedModel(models.Model):
"""
An abstract base class model that provides ``start``
and ``end`` fields to record a timeframe.
"""
start = models.DateTimeField(_('start'), null=True, blank=True)
end = models.DateTimeField(_('end'), null=True, blank=True)
class Meta:
abstract = True
class StatusModel(models.Model):
"""
An abstract base class model with a ``status`` field that
automatically uses a ``STATUS`` class attribute of choices, a
``status_changed`` date-time field that records when ``status``
was last modified, and an automatically-added manager for each
status that returns objects with that status only.
"""
status = StatusField(_('status'))
status_changed = MonitorField(_('status changed'), monitor='status')
class Meta:
abstract = True
def add_status_query_managers(sender, **kwargs):
"""
Add a Querymanager for each status item dynamically.
"""
if not issubclass(sender, StatusModel):
return
for value, name in getattr(sender, 'STATUS', ()):
try:
sender._meta.get_field(name)
raise ImproperlyConfigured("StatusModel: Model '%s' has a field "
"named '%s' which conflicts with a "
"status of the same name."
% (sender.__name__, name))
except FieldDoesNotExist:
pass
sender.add_to_class(value, QueryManager(status=value))
def add_timeframed_query_manager(sender, **kwargs):
"""
Add a QueryManager for a specific timeframe.
"""
if not issubclass(sender, TimeFramedModel):
return
try:
sender._meta.get_field('timeframed')
raise ImproperlyConfigured("Model '%s' has a field named "
"'timeframed' which conflicts with "
"the TimeFramedModel manager."
% sender.__name__)
except FieldDoesNotExist:
pass
sender.add_to_class('timeframed', QueryManager(
(models.Q(start__lte=datetime.now) | models.Q(start__isnull=True)) &
(models.Q(end__gte=datetime.now) | models.Q(end__isnull=True))
))
models.signals.class_prepared.connect(add_status_query_managers)
models.signals.class_prepared.connect(add_timeframed_query_manager)
|
agpl-3.0
|
rr326/servi
|
servi/config.py
|
1
|
6717
|
import os
import os.path
import logging
from logging import warning as warn
import collections
import yaml
from jinja2 import Environment, DictLoader
from servi.exceptions import MasterNotFound, ServiError
'''
Global configuration for servi files
Use as import config as c
Note - this will also read in additional variables (and overrides) from
SERVIFILE
Proper dir structure
servi installation -
eg: pyvenv/py3.4/lib/python3.4/site-packages/servi-0.1-py3.4.egg/servi
Project
...\masterdir
\servi
\servi # Kinda ugly that it has the smae name, but helpful
for argparse
\sevi_templates
'''
SERVI_DIR = os.path.normpath(os.path.join(os.path.dirname(__file__), '..'))
TMPL_DIR_SITE = \
os.path.normpath(os.path.join(SERVI_DIR, 'servi_templates'))
BOX_DIR = os.path.abspath(os.path.join(SERVI_DIR, 'servi_boxes'))
# These must be initialized and then set here as c.MASTER_DIR =xxx
MASTER_DIR = None
MANIFEST_FILE = "servi_data.json"
VERSION_FILE = "TEMPLATE_VERSION.json"
SERVIFILE = "Servifile.yml"
SERVIFILE_GLOBAL = "Servifile_globals.yml"
SERVIFILE_GLOBAL_FULL = os.path.expanduser(os.path.join('~', SERVIFILE_GLOBAL))
TEMPLATE = 'template'
MASTER = 'master'
MISSING_HASH = 'FILE NOT FOUND'
# The following must be set in Servifile.yml
SERVI_IGNORE_FILES = []
DIFFTOOL = 'git diff'
DEFAULT_LOG_LEVEL = logging.INFO
# Variables that should be set by Servifile.yml and Servifile_globals.yml
HOSTS = None
MAIN_USERNAME = None
MAIN_RSA_KEY_FILE = None
LOCAL_DIR = None
SITE_SUFFIX = None
#############################################################################
#############################################################################
#############################################################################
LOOKUP_FAILED_MESSAGE = 'Environment variable not found'
def lookup(ltype, arg1):
if type(ltype) is not str or ltype.strip().lower() != 'env':
raise ServiError('Found "lookup" function that servi does not'
'understand ({0}). Currently servi only processes'
'lookup("env", variable") - which mimics a portion'
'of ansibles lookup function.')
retval = os.environ.get(arg1)
if retval is None:
retval = LOOKUP_FAILED_MESSAGE
return retval
def setup_jinja(env=None, template_text=None):
if env is None:
env = Environment(loader=DictLoader({SERVIFILE: template_text}))
env.globals['lookup'] = lookup
return env
def set_master_dir(set_dir_to):
"""
sets c.MASTER_DIR
by finding the first ancestor(default)
to set_dir_to (if supplied - only for servi init)
"""
global MASTER_DIR
MASTER_DIR = set_dir_to
# TODO - remove this function
def load_user_config():
"""
Reads and processes Servifile.yml, adding all variables to this modules
globals()
For Servifile_Globals, Servifile:
Step 1: Render the file as a Jinja2 template
(with custom function: lookup('env', envvar) )
Step 2: Load as a yaml doc
Step 3: Add to this module's globals()
Note - Servifile.yml can essentially update or override Servifile_globals
but it CAN NOT delete a key from Servifile_globals
ie: combined >= servifile_globals
"""
# Global config
global_config = {}
if not os.path.exists(SERVIFILE_GLOBAL_FULL):
warn('No global servifile found in {0}'.format(SERVIFILE_GLOBAL_FULL))
else:
with open(SERVIFILE_GLOBAL_FULL) as f:
servi_raw = f.read()
global_config = process_config(servi_raw)
if MASTER_DIR:
user_config = getconfig(
SERVIFILE, TEMPLATE, MASTER, TMPL_DIR_SITE, MASTER_DIR)
else:
warn('Getting config but MASTER_DIR is empty.')
user_config = {}
combined_config = deep_update(global_config, user_config)
for k, v in combined_config.items():
globals()[k] = v
return global_config, user_config, combined_config
def find_master_dir(start_dir, fail_ok=False):
"""
finds Servifile.yml at or above start_dir
returns MasterNotFound or None (if fail_ok)
"""
master_dir = find_ancestor_servifile(start_dir)
if not master_dir:
if not fail_ok:
raise MasterNotFound()
else:
return None
else:
return os.path.abspath(master_dir)
def find_ancestor_servifile(starting_dir):
return find_ancestor_with(starting_dir, SERVIFILE)
def find_ancestor_with(starting_dir, target):
"""
returns first ancestor of starting_dir that contains target (dir or file)
(returns abspath())
returns None if not found
"""
cur_dir = os.path.abspath(starting_dir)
while cur_dir != '/':
if os.path.exists(os.path.join(cur_dir, target)):
return cur_dir
cur_dir = os.path.abspath(
os.path.normpath(os.path.join(cur_dir, '..')))
return None
def servi_file_exists_in(path):
return os.path.exists(os.path.join(path, SERVIFILE))
def global_servi_file_exists():
return os.path.exists(SERVIFILE_GLOBAL_FULL)
"""
This is an ugly, parameterized version of pathfor, and a getconfig() which
relies on it. I need it since the other pathfor uses config parameters
(which this bootstraps).
Only use this in the config module.
After that, use commands.utils.utils.pathfor()
"""
def pathfor(fname, source, template, master, template_dir, master_dir):
assert source in [template, master]
if source == template:
path = os.path.normpath(os.path.join(template_dir, fname))
else: # MASTER
path = os.path.normpath(os.path.join(master_dir, fname))
return path
def getconfig(fname, template, master, template_dir, master_dir):
with open(pathfor(fname, master,
template, master, template_dir, master_dir)) as f:
servi_raw = f.read()
return process_config(servi_raw)
def process_config(raw_text):
env = setup_jinja(env=None, template_text=raw_text)
tmpl = env.get_template(SERVIFILE)
rendered = tmpl.render()
data = yaml.load(rendered)
return data
def deep_update(orig_dict, new_dict):
# Given a new dict, update the orig dict deeply
# eg: {a: {a1: 1}} , {a: {a2: 2}} --> {a: {a1: 1, a2:2 }}
# Note - this means new_dict >= orig_dict
# http://stackoverflow.com/a/3233356/1400991
for key, val in new_dict.items():
if isinstance(val, collections.Mapping):
tmp = deep_update(orig_dict.get(key, {}), val)
orig_dict[key] = tmp
else:
orig_dict[key] = new_dict[key]
return orig_dict
|
mit
|
StryKaizer/Brew
|
djangoproject/brew/migrations/0007_auto__chg_field_mashingtemplog_degrees.py
|
1
|
2586
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'MashingTempLog.degrees'
db.alter_column('brew_mashingtemplog', 'degrees', self.gf('django.db.models.fields.FloatField')())
def backwards(self, orm):
# Changing field 'MashingTempLog.degrees'
db.alter_column('brew_mashingtemplog', 'degrees', self.gf('django.db.models.fields.DecimalField')(max_digits=5, decimal_places=2))
models = {
'brew.batch': {
'Meta': {'object_name': 'Batch'},
'brew': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['brew.Brew']"}),
'brewing_date': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'number': ('django.db.models.fields.IntegerField', [], {'max_length': '3'})
},
'brew.brew': {
'Meta': {'object_name': 'Brew'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'brew.mashingschemeitem': {
'Meta': {'object_name': 'MashingSchemeItem'},
'brew': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['brew.Brew']"}),
'degrees': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'minutes': ('django.db.models.fields.CharField', [], {'max_length': '3'})
},
'brew.mashingtemplog': {
'Meta': {'object_name': 'MashingTempLog'},
'batch': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['brew.Batch']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'degrees': ('django.db.models.fields.FloatField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'brew.variable': {
'Meta': {'object_name': 'Variable'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '25'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '255'})
}
}
complete_apps = ['brew']
|
isc
|
Foxfanmedium/python_training
|
OnlineCoursera/mail_ru/Python_1/Week_3/playground/env/Lib/site-packages/dateutil/tz/_factories.py
|
19
|
1434
|
from datetime import timedelta
class _TzSingleton(type):
def __init__(cls, *args, **kwargs):
cls.__instance = None
super(_TzSingleton, cls).__init__(*args, **kwargs)
def __call__(cls):
if cls.__instance is None:
cls.__instance = super(_TzSingleton, cls).__call__()
return cls.__instance
class _TzFactory(type):
def instance(cls, *args, **kwargs):
"""Alternate constructor that returns a fresh instance"""
return type.__call__(cls, *args, **kwargs)
class _TzOffsetFactory(_TzFactory):
def __init__(cls, *args, **kwargs):
cls.__instances = {}
def __call__(cls, name, offset):
if isinstance(offset, timedelta):
key = (name, offset.total_seconds())
else:
key = (name, offset)
instance = cls.__instances.get(key, None)
if instance is None:
instance = cls.__instances.setdefault(key,
cls.instance(name, offset))
return instance
class _TzStrFactory(_TzFactory):
def __init__(cls, *args, **kwargs):
cls.__instances = {}
def __call__(cls, s, posix_offset=False):
key = (s, posix_offset)
instance = cls.__instances.get(key, None)
if instance is None:
instance = cls.__instances.setdefault(key,
cls.instance(s, posix_offset))
return instance
|
apache-2.0
|
evilhero/mylar
|
lib/requests/packages/chardet/langbulgarianmodel.py
|
2965
|
12784
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# Character Mapping Table:
# this table is modified base on win1251BulgarianCharToOrderMap, so
# only number <64 is sure valid
Latin5_BulgarianCharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 77, 90, 99,100, 72,109,107,101, 79,185, 81,102, 76, 94, 82, # 40
110,186,108, 91, 74,119, 84, 96,111,187,115,253,253,253,253,253, # 50
253, 65, 69, 70, 66, 63, 68,112,103, 92,194,104, 95, 86, 87, 71, # 60
116,195, 85, 93, 97,113,196,197,198,199,200,253,253,253,253,253, # 70
194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209, # 80
210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225, # 90
81,226,227,228,229,230,105,231,232,233,234,235,236, 45,237,238, # a0
31, 32, 35, 43, 37, 44, 55, 47, 40, 59, 33, 46, 38, 36, 41, 30, # b0
39, 28, 34, 51, 48, 49, 53, 50, 54, 57, 61,239, 67,240, 60, 56, # c0
1, 18, 9, 20, 11, 3, 23, 15, 2, 26, 12, 10, 14, 6, 4, 13, # d0
7, 8, 5, 19, 29, 25, 22, 21, 27, 24, 17, 75, 52,241, 42, 16, # e0
62,242,243,244, 58,245, 98,246,247,248,249,250,251, 91,252,253, # f0
)
win1251BulgarianCharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 77, 90, 99,100, 72,109,107,101, 79,185, 81,102, 76, 94, 82, # 40
110,186,108, 91, 74,119, 84, 96,111,187,115,253,253,253,253,253, # 50
253, 65, 69, 70, 66, 63, 68,112,103, 92,194,104, 95, 86, 87, 71, # 60
116,195, 85, 93, 97,113,196,197,198,199,200,253,253,253,253,253, # 70
206,207,208,209,210,211,212,213,120,214,215,216,217,218,219,220, # 80
221, 78, 64, 83,121, 98,117,105,222,223,224,225,226,227,228,229, # 90
88,230,231,232,233,122, 89,106,234,235,236,237,238, 45,239,240, # a0
73, 80,118,114,241,242,243,244,245, 62, 58,246,247,248,249,250, # b0
31, 32, 35, 43, 37, 44, 55, 47, 40, 59, 33, 46, 38, 36, 41, 30, # c0
39, 28, 34, 51, 48, 49, 53, 50, 54, 57, 61,251, 67,252, 60, 56, # d0
1, 18, 9, 20, 11, 3, 23, 15, 2, 26, 12, 10, 14, 6, 4, 13, # e0
7, 8, 5, 19, 29, 25, 22, 21, 27, 24, 17, 75, 52,253, 42, 16, # f0
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 96.9392%
# first 1024 sequences:3.0618%
# rest sequences: 0.2992%
# negative sequences: 0.0020%
BulgarianLangModel = (
0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,2,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,0,3,3,3,2,2,3,2,2,1,2,2,
3,1,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,0,3,3,3,3,3,3,3,3,3,3,0,3,0,1,
0,0,0,0,0,0,0,0,0,0,1,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,2,3,3,3,3,3,3,3,3,0,3,1,0,
0,1,0,0,0,0,0,0,0,0,1,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,2,2,2,3,3,3,3,3,3,3,3,3,3,3,3,3,1,3,2,3,3,3,3,3,3,3,3,0,3,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,1,3,2,3,3,3,3,3,3,3,3,0,3,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,2,3,2,2,1,3,3,3,3,2,2,2,1,1,2,0,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,2,3,2,2,3,3,1,1,2,3,3,2,3,3,3,3,2,1,2,0,2,0,3,0,0,
0,0,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,1,3,3,3,3,3,2,3,2,3,3,3,3,3,2,3,3,1,3,0,3,0,2,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,1,3,3,2,3,3,3,1,3,3,2,3,2,2,2,0,0,2,0,2,0,2,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,3,0,3,3,3,2,2,3,3,3,1,2,2,3,2,1,1,2,0,2,0,0,0,0,
1,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,2,3,3,1,2,3,2,2,2,3,3,3,3,3,2,2,3,1,2,0,2,1,2,0,0,
0,0,0,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,1,3,3,3,3,3,2,3,3,3,2,3,3,2,3,2,2,2,3,1,2,0,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,3,3,3,1,1,1,2,2,1,3,1,3,2,2,3,0,0,1,0,1,0,1,0,0,
0,0,0,1,0,0,0,0,1,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,2,2,3,2,2,3,1,2,1,1,1,2,3,1,3,1,2,2,0,1,1,1,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,1,3,2,2,3,3,1,2,3,1,1,3,3,3,3,1,2,2,1,1,1,0,2,0,2,0,1,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,2,2,3,3,3,2,2,1,1,2,0,2,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,0,1,2,1,3,3,2,3,3,3,3,3,2,3,2,1,0,3,1,2,1,2,1,2,3,2,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,1,2,3,3,3,3,3,3,3,3,3,3,3,3,0,0,3,1,3,3,2,3,3,2,2,2,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,0,3,3,3,3,3,2,1,1,2,1,3,3,0,3,1,1,1,1,3,2,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,2,2,2,3,3,3,3,3,3,3,3,3,3,3,1,1,3,1,3,3,2,3,2,2,2,3,0,2,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,2,3,3,2,2,3,2,1,1,1,1,1,3,1,3,1,1,0,0,0,1,0,0,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,2,3,2,0,3,2,0,3,0,2,0,0,2,1,3,1,0,0,1,0,0,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,2,1,1,1,1,2,1,1,2,1,1,1,2,2,1,2,1,1,1,0,1,1,0,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,2,1,3,1,1,2,1,3,2,1,1,0,1,2,3,2,1,1,1,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,2,2,1,0,1,0,0,1,0,0,0,2,1,0,3,0,0,1,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,2,3,2,3,3,1,3,2,1,1,1,2,1,1,2,1,3,0,1,0,0,0,1,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,1,2,2,3,3,2,3,2,2,2,3,1,2,2,1,1,2,1,1,2,2,0,1,1,0,1,0,2,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,1,3,1,0,2,2,1,3,2,1,0,0,2,0,2,0,1,0,0,0,0,0,0,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,1,2,0,2,3,1,2,3,2,0,1,3,1,2,1,1,1,0,0,1,0,0,2,2,2,3,
2,2,2,2,1,2,1,1,2,2,1,1,2,0,1,1,1,0,0,1,1,0,0,1,1,0,0,0,1,1,0,1,
3,3,3,3,3,2,1,2,2,1,2,0,2,0,1,0,1,2,1,2,1,1,0,0,0,1,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,2,3,3,1,1,3,1,0,3,2,1,0,0,0,1,2,0,2,0,1,0,0,0,1,0,1,2,1,2,2,
1,1,1,1,1,1,1,2,2,2,1,1,1,1,1,1,1,0,1,2,1,1,1,0,0,0,0,0,1,1,0,0,
3,1,0,1,0,2,3,2,2,2,3,2,2,2,2,2,1,0,2,1,2,1,1,1,0,1,2,1,2,2,2,1,
1,1,2,2,2,2,1,2,1,1,0,1,2,1,2,2,2,1,1,1,0,1,1,1,1,2,0,1,0,0,0,0,
2,3,2,3,3,0,0,2,1,0,2,1,0,0,0,0,2,3,0,2,0,0,0,0,0,1,0,0,2,0,1,2,
2,1,2,1,2,2,1,1,1,2,1,1,1,0,1,2,2,1,1,1,1,1,0,1,1,1,0,0,1,2,0,0,
3,3,2,2,3,0,2,3,1,1,2,0,0,0,1,0,0,2,0,2,0,0,0,1,0,1,0,1,2,0,2,2,
1,1,1,1,2,1,0,1,2,2,2,1,1,1,1,1,1,1,0,1,1,1,0,0,0,0,0,0,1,1,0,0,
2,3,2,3,3,0,0,3,0,1,1,0,1,0,0,0,2,2,1,2,0,0,0,0,0,0,0,0,2,0,1,2,
2,2,1,1,1,1,1,2,2,2,1,0,2,0,1,0,1,0,0,1,0,1,0,0,1,0,0,0,0,1,0,0,
3,3,3,3,2,2,2,2,2,0,2,1,1,1,1,2,1,2,1,1,0,2,0,1,0,1,0,0,2,0,1,2,
1,1,1,1,1,1,1,2,2,1,1,0,2,0,1,0,2,0,0,1,1,1,0,0,2,0,0,0,1,1,0,0,
2,3,3,3,3,1,0,0,0,0,0,0,0,0,0,0,2,0,0,1,1,0,0,0,0,0,0,1,2,0,1,2,
2,2,2,1,1,2,1,1,2,2,2,1,2,0,1,1,1,1,1,1,0,1,1,1,1,0,0,1,1,1,0,0,
2,3,3,3,3,0,2,2,0,2,1,0,0,0,1,1,1,2,0,2,0,0,0,3,0,0,0,0,2,0,2,2,
1,1,1,2,1,2,1,1,2,2,2,1,2,0,1,1,1,0,1,1,1,1,0,2,1,0,0,0,1,1,0,0,
2,3,3,3,3,0,2,1,0,0,2,0,0,0,0,0,1,2,0,2,0,0,0,0,0,0,0,0,2,0,1,2,
1,1,1,2,1,1,1,1,2,2,2,0,1,0,1,1,1,0,0,1,1,1,0,0,1,0,0,0,0,1,0,0,
3,3,2,2,3,0,1,0,1,0,0,0,0,0,0,0,1,1,0,3,0,0,0,0,0,0,0,0,1,0,2,2,
1,1,1,1,1,2,1,1,2,2,1,2,2,1,0,1,1,1,1,1,0,1,0,0,1,0,0,0,1,1,0,0,
3,1,0,1,0,2,2,2,2,3,2,1,1,1,2,3,0,0,1,0,2,1,1,0,1,1,1,1,2,1,1,1,
1,2,2,1,2,1,2,2,1,1,0,1,2,1,2,2,1,1,1,0,0,1,1,1,2,1,0,1,0,0,0,0,
2,1,0,1,0,3,1,2,2,2,2,1,2,2,1,1,1,0,2,1,2,2,1,1,2,1,1,0,2,1,1,1,
1,2,2,2,2,2,2,2,1,2,0,1,1,0,2,1,1,1,1,1,0,0,1,1,1,1,0,1,0,0,0,0,
2,1,1,1,1,2,2,2,2,1,2,2,2,1,2,2,1,1,2,1,2,3,2,2,1,1,1,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,3,2,0,1,2,0,1,2,1,1,0,1,0,1,2,1,2,0,0,0,1,1,0,0,0,1,0,0,2,
1,1,0,0,1,1,0,1,1,1,1,0,2,0,1,1,1,0,0,1,1,0,0,0,0,1,0,0,0,1,0,0,
2,0,0,0,0,1,2,2,2,2,2,2,2,1,2,1,1,1,1,1,1,1,0,1,1,1,1,1,2,1,1,1,
1,2,2,2,2,1,1,2,1,2,1,1,1,0,2,1,2,1,1,1,0,2,1,1,1,1,0,1,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,
1,1,0,1,0,1,1,1,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,3,2,0,0,0,0,1,0,0,0,0,0,0,1,1,0,2,0,0,0,0,0,0,0,0,1,0,1,2,
1,1,1,1,1,1,0,0,2,2,2,2,2,0,1,1,0,1,1,1,1,1,0,0,1,0,0,0,1,1,0,1,
2,3,1,2,1,0,1,1,0,2,2,2,0,0,1,0,0,1,1,1,1,0,0,0,0,0,0,0,1,0,1,2,
1,1,1,1,2,1,1,1,1,1,1,1,1,0,1,1,0,1,0,1,0,1,0,0,1,0,0,0,0,1,0,0,
2,2,2,2,2,0,0,2,0,0,2,0,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,2,0,2,2,
1,1,1,1,1,0,0,1,2,1,1,0,1,0,1,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,2,0,0,2,0,1,1,0,0,0,1,0,0,2,0,2,0,0,0,0,0,0,0,0,0,0,1,1,
0,0,0,1,1,1,1,1,1,1,1,1,1,0,1,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,3,2,0,0,1,0,0,1,0,0,0,0,0,0,1,0,2,0,0,0,1,0,0,0,0,0,0,0,2,
1,1,0,0,1,0,0,0,1,1,0,0,1,0,1,1,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
2,1,2,2,2,1,2,1,2,2,1,1,2,1,1,1,0,1,1,1,1,2,0,1,0,1,1,1,1,0,1,1,
1,1,2,1,1,1,1,1,1,0,0,1,2,1,1,1,1,1,1,0,0,1,1,1,0,0,0,0,0,0,0,0,
1,0,0,1,3,1,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,2,1,0,0,1,0,2,0,0,0,0,0,1,1,1,0,1,0,0,0,0,0,0,0,0,2,0,0,1,
0,2,0,1,0,0,1,1,2,0,1,0,1,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,2,0,1,1,0,2,1,0,1,1,1,0,0,1,0,2,0,1,0,0,0,0,0,0,0,0,0,1,
0,1,0,0,1,0,0,0,1,1,0,0,1,0,0,1,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,2,2,0,0,1,0,0,0,1,0,1,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,1,
0,1,0,1,1,1,0,0,1,1,1,0,1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
2,0,1,0,0,1,2,1,1,1,1,1,1,2,2,1,0,0,1,0,1,0,0,0,0,1,1,1,1,0,0,0,
1,1,2,1,1,1,1,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,1,2,1,0,0,1,0,0,0,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,0,0,0,1,
0,0,0,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,
0,1,1,0,1,1,1,0,0,1,0,0,1,0,1,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,
1,0,1,0,0,1,1,1,1,1,1,1,1,1,1,1,0,0,1,0,2,0,0,2,0,1,0,0,1,0,0,1,
1,1,0,0,1,1,0,1,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,
1,1,1,1,1,1,1,2,0,0,0,0,0,0,2,1,0,1,1,0,0,1,1,1,0,1,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,0,1,1,0,1,1,1,1,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
)
Latin5BulgarianModel = {
'charToOrderMap': Latin5_BulgarianCharToOrderMap,
'precedenceMatrix': BulgarianLangModel,
'mTypicalPositiveRatio': 0.969392,
'keepEnglishLetter': False,
'charsetName': "ISO-8859-5"
}
Win1251BulgarianModel = {
'charToOrderMap': win1251BulgarianCharToOrderMap,
'precedenceMatrix': BulgarianLangModel,
'mTypicalPositiveRatio': 0.969392,
'keepEnglishLetter': False,
'charsetName': "windows-1251"
}
# flake8: noqa
|
gpl-3.0
|
mattcongy/itshop
|
docker-images/taigav2/taiga-back/taiga/front/sitemaps/epics.py
|
2
|
1994
|
# -*- coding: utf-8 -*-
# Copyright (C) 2014-2016 Andrey Antukh <niwi@niwi.nz>
# Copyright (C) 2014-2016 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014-2016 David Barragán <bameda@dbarragan.com>
# Copyright (C) 2014-2016 Alejandro Alonso <alejandro.alonso@kaleidos.net>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.db.models import Q
from django.apps import apps
from taiga.front.templatetags.functions import resolve
from .base import Sitemap
class EpicsSitemap(Sitemap):
def items(self):
epic_model = apps.get_model("epics", "Epic")
# Get epics of public projects OR private projects if anon user can view them
queryset = epic_model.objects.filter(Q(project__is_private=False) |
Q(project__is_private=True,
project__anon_permissions__contains=["view_epics"]))
# Exclude blocked projects
queryset = queryset.filter(project__blocked_code__isnull=True)
# Project data is needed
queryset = queryset.select_related("project")
return queryset
def location(self, obj):
return resolve("epic", obj.project.slug, obj.ref)
def lastmod(self, obj):
return obj.modified_date
def changefreq(self, obj):
return "daily"
def priority(self, obj):
return 0.4
|
mit
|
hsaputra/tensorflow
|
tensorflow/python/keras/_impl/keras/datasets/cifar100.py
|
13
|
2130
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""CIFAR100 small image classification dataset.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from tensorflow.python.keras._impl.keras import backend as K
from tensorflow.python.keras._impl.keras.datasets.cifar import load_batch
from tensorflow.python.keras._impl.keras.utils.data_utils import get_file
def load_data(label_mode='fine'):
"""Loads CIFAR100 dataset.
Arguments:
label_mode: one of "fine", "coarse".
Returns:
Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`.
Raises:
ValueError: in case of invalid `label_mode`.
"""
if label_mode not in ['fine', 'coarse']:
raise ValueError('label_mode must be one of "fine" "coarse".')
dirname = 'cifar-100-python'
origin = 'https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz'
path = get_file(dirname, origin=origin, untar=True)
fpath = os.path.join(path, 'train')
x_train, y_train = load_batch(fpath, label_key=label_mode + '_labels')
fpath = os.path.join(path, 'test')
x_test, y_test = load_batch(fpath, label_key=label_mode + '_labels')
y_train = np.reshape(y_train, (len(y_train), 1))
y_test = np.reshape(y_test, (len(y_test), 1))
if K.image_data_format() == 'channels_last':
x_train = x_train.transpose(0, 2, 3, 1)
x_test = x_test.transpose(0, 2, 3, 1)
return (x_train, y_train), (x_test, y_test)
|
apache-2.0
|
endlessm/chromium-browser
|
third_party/catapult/telemetry/telemetry/internal/backends/chrome/fuchsia_browser_finder.py
|
1
|
3039
|
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Finds Fuchsia browsers that can be started and controlled by telemetry."""
from telemetry.core import fuchsia_interface
from telemetry.core import platform
from telemetry.internal.backends.chrome import fuchsia_browser_backend
from telemetry.internal.browser import browser
from telemetry.internal.browser import possible_browser
from telemetry.internal.platform import fuchsia_device
class UnsupportedExtensionException(Exception):
pass
class PossibleFuchsiaBrowser(possible_browser.PossibleBrowser):
def __init__(self, browser_type, finder_options, fuchsia_platform):
del finder_options
super(PossibleFuchsiaBrowser, self).__init__(browser_type, 'fuchsia', True)
self._platform = fuchsia_platform
self._platform_backend = (
fuchsia_platform._platform_backend) # pylint: disable=protected-access
def __repr__(self):
return 'PossibleFuchsiaBrowser(app_type=%s)' % self.browser_type
@property
def browser_directory(self):
return None
@property
def profile_directory(self):
return None
def _InitPlatformIfNeeded(self):
pass
def _GetPathsForOsPageCacheFlushing(self):
raise NotImplementedError()
def Create(self):
"""Start the browser process."""
browser_backend = fuchsia_browser_backend.FuchsiaBrowserBackend(
self._platform_backend, self._browser_options,
self.browser_directory, self.profile_directory)
try:
return browser.Browser(
browser_backend, self._platform_backend, startup_args=(),
find_existing=False)
except Exception:
browser_backend.Close()
raise
def CleanUpEnvironment(self):
if self._browser_options is None:
return # No environment to clean up.
try:
self._TearDownEnvironment()
finally:
self._browser_options = None
def SupportsOptions(self, browser_options):
if len(browser_options.extensions_to_load) > 0:
raise UnsupportedExtensionException(
'Fuchsia browsers do not support extensions.')
return True
def UpdateExecutableIfNeeded(self):
# Updating the browser is currently handled in the Chromium repository
# instead of Catapult.
pass
@property
def last_modification_time(self):
return -1
def SelectDefaultBrowser(possible_browsers):
for b in possible_browsers:
if b.browser_type == 'web-engine-shell':
return b
return None
def FindAllBrowserTypes():
return fuchsia_interface.FUCHSIA_BROWSERS
def FindAllAvailableBrowsers(finder_options, device):
"""Finds all available Fuchsia browsers."""
browsers = []
if not isinstance(device, fuchsia_device.FuchsiaDevice):
return browsers
fuchsia_platform = platform.GetPlatformForDevice(device, finder_options)
browsers.extend([
PossibleFuchsiaBrowser(
'web-engine-shell', finder_options, fuchsia_platform)
])
return browsers
|
bsd-3-clause
|
jni/networkx
|
examples/graph/knuth_miles.py
|
36
|
2994
|
#!/usr/bin/env python
"""
An example using networkx.Graph().
miles_graph() returns an undirected graph over the 128 US cities from
the datafile miles_dat.txt. The cities each have location and population
data. The edges are labeled with the distance betwen the two cities.
This example is described in Section 1.1 in Knuth's book [1,2].
References.
-----------
[1] Donald E. Knuth,
"The Stanford GraphBase: A Platform for Combinatorial Computing",
ACM Press, New York, 1993.
[2] http://www-cs-faculty.stanford.edu/~knuth/sgb.html
"""
__author__ = """Aric Hagberg (hagberg@lanl.gov)"""
# Copyright (C) 2004-2006 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
import networkx as nx
def miles_graph():
""" Return the cites example graph in miles_dat.txt
from the Stanford GraphBase.
"""
# open file miles_dat.txt.gz (or miles_dat.txt)
import gzip
fh = gzip.open('knuth_miles.txt.gz','r')
G=nx.Graph()
G.position={}
G.population={}
cities=[]
for line in fh.readlines():
line = line.decode()
if line.startswith("*"): # skip comments
continue
numfind=re.compile("^\d+")
if numfind.match(line): # this line is distances
dist=line.split()
for d in dist:
G.add_edge(city,cities[i],weight=int(d))
i=i+1
else: # this line is a city, position, population
i=1
(city,coordpop)=line.split("[")
cities.insert(0,city)
(coord,pop)=coordpop.split("]")
(y,x)=coord.split(",")
G.add_node(city)
# assign position - flip x axis for matplotlib, shift origin
G.position[city]=(-int(x)+7500,int(y)-3000)
G.population[city]=float(pop)/1000.0
return G
if __name__ == '__main__':
import networkx as nx
import re
import sys
G=miles_graph()
print("Loaded miles_dat.txt containing 128 cities.")
print("digraph has %d nodes with %d edges"\
%(nx.number_of_nodes(G),nx.number_of_edges(G)))
# make new graph of cites, edge if less then 300 miles between them
H=nx.Graph()
for v in G:
H.add_node(v)
for (u,v,d) in G.edges(data=True):
if d['weight'] < 300:
H.add_edge(u,v)
# draw with matplotlib/pylab
try:
import matplotlib.pyplot as plt
plt.figure(figsize=(8,8))
# with nodes colored by degree sized by population
node_color=[float(H.degree(v)) for v in H]
nx.draw(H,G.position,
node_size=[G.population[v] for v in H],
node_color=node_color,
with_labels=False)
# scale the axes equally
plt.xlim(-5000,500)
plt.ylim(-2000,3500)
plt.savefig("knuth_miles.png")
except:
pass
|
bsd-3-clause
|
2014cdbg3/2015cd_midterm
|
static/Brython3.1.1-20150328-091302/Lib/imp.py
|
637
|
9839
|
"""This module provides the components needed to build your own __import__
function. Undocumented functions are obsolete.
In most cases it is preferred you consider using the importlib module's
functionality over this module.
"""
# (Probably) need to stay in _imp
from _imp import (lock_held, acquire_lock, release_lock,
get_frozen_object, is_frozen_package,
init_builtin, init_frozen, is_builtin, is_frozen,
_fix_co_filename)
try:
from _imp import load_dynamic
except ImportError:
# Platform doesn't support dynamic loading.
load_dynamic = None
# Directly exposed by this module
from importlib._bootstrap import new_module
from importlib._bootstrap import cache_from_source, source_from_cache
from importlib import _bootstrap
#fixme brython
#from importlib import machinery
import importlib.machinery as machinery
import os
import sys
import tokenize
import warnings
# DEPRECATED
SEARCH_ERROR = 0
PY_SOURCE = 1
PY_COMPILED = 2
C_EXTENSION = 3
PY_RESOURCE = 4
PKG_DIRECTORY = 5
C_BUILTIN = 6
PY_FROZEN = 7
PY_CODERESOURCE = 8
IMP_HOOK = 9
def get_magic():
"""Return the magic number for .pyc or .pyo files."""
return _bootstrap._MAGIC_BYTES
def get_tag():
"""Return the magic tag for .pyc or .pyo files."""
return sys.implementation.cache_tag
def get_suffixes():
warnings.warn('imp.get_suffixes() is deprecated; use the constants '
'defined on importlib.machinery instead',
DeprecationWarning, 2)
extensions = [(s, 'rb', C_EXTENSION) for s in machinery.EXTENSION_SUFFIXES]
source = [(s, 'U', PY_SOURCE) for s in machinery.SOURCE_SUFFIXES]
bytecode = [(s, 'rb', PY_COMPILED) for s in machinery.BYTECODE_SUFFIXES]
return extensions + source + bytecode
class NullImporter:
"""Null import object."""
def __init__(self, path):
if path == '':
raise ImportError('empty pathname', path='')
elif os.path.isdir(path):
raise ImportError('existing directory', path=path)
def find_module(self, fullname):
"""Always returns None."""
return None
class _HackedGetData:
"""Compatibiilty support for 'file' arguments of various load_*()
functions."""
def __init__(self, fullname, path, file=None):
super().__init__(fullname, path)
self.file = file
def get_data(self, path):
"""Gross hack to contort loader to deal w/ load_*()'s bad API."""
if self.file and path == self.path:
if not self.file.closed:
file = self.file
else:
self.file = file = open(self.path, 'r')
with file:
# Technically should be returning bytes, but
# SourceLoader.get_code() just passed what is returned to
# compile() which can handle str. And converting to bytes would
# require figuring out the encoding to decode to and
# tokenize.detect_encoding() only accepts bytes.
return file.read()
else:
return super().get_data(path)
class _LoadSourceCompatibility(_HackedGetData, _bootstrap.SourceFileLoader):
"""Compatibility support for implementing load_source()."""
#brython fix me
pass
def load_source(name, pathname, file=None):
msg = ('imp.load_source() is deprecated; use '
'importlib.machinery.SourceFileLoader(name, pathname).load_module()'
' instead')
warnings.warn(msg, DeprecationWarning, 2)
_LoadSourceCompatibility(name, pathname, file).load_module(name)
module = sys.modules[name]
# To allow reloading to potentially work, use a non-hacked loader which
# won't rely on a now-closed file object.
module.__loader__ = _bootstrap.SourceFileLoader(name, pathname)
return module
class _LoadCompiledCompatibility(_HackedGetData,
_bootstrap.SourcelessFileLoader):
"""Compatibility support for implementing load_compiled()."""
#brython fix me
pass
def load_compiled(name, pathname, file=None):
msg = ('imp.load_compiled() is deprecated; use '
'importlib.machinery.SourcelessFileLoader(name, pathname).'
'load_module() instead ')
warnings.warn(msg, DeprecationWarning, 2)
_LoadCompiledCompatibility(name, pathname, file).load_module(name)
module = sys.modules[name]
# To allow reloading to potentially work, use a non-hacked loader which
# won't rely on a now-closed file object.
module.__loader__ = _bootstrap.SourcelessFileLoader(name, pathname)
return module
def load_package(name, path):
msg = ('imp.load_package() is deprecated; use either '
'importlib.machinery.SourceFileLoader() or '
'importlib.machinery.SourcelessFileLoader() instead')
warnings.warn(msg, DeprecationWarning, 2)
if os.path.isdir(path):
extensions = (machinery.SOURCE_SUFFIXES[:] +
machinery.BYTECODE_SUFFIXES[:])
for extension in extensions:
path = os.path.join(path, '__init__'+extension)
if os.path.exists(path):
break
else:
raise ValueError('{!r} is not a package'.format(path))
return _bootstrap.SourceFileLoader(name, path).load_module(name)
def load_module(name, file, filename, details):
"""**DEPRECATED**
Load a module, given information returned by find_module().
The module name must include the full package name, if any.
"""
suffix, mode, type_ = details
with warnings.catch_warnings():
warnings.simplefilter('ignore')
if mode and (not mode.startswith(('r', 'U')) or '+' in mode):
raise ValueError('invalid file open mode {!r}'.format(mode))
elif file is None and type_ in {PY_SOURCE, PY_COMPILED}:
msg = 'file object required for import (type code {})'.format(type_)
raise ValueError(msg)
elif type_ == PY_SOURCE:
return load_source(name, filename, file)
elif type_ == PY_COMPILED:
return load_compiled(name, filename, file)
elif type_ == C_EXTENSION and load_dynamic is not None:
if file is None:
with open(filename, 'rb') as opened_file:
return load_dynamic(name, filename, opened_file)
else:
return load_dynamic(name, filename, file)
elif type_ == PKG_DIRECTORY:
return load_package(name, filename)
elif type_ == C_BUILTIN:
return init_builtin(name)
elif type_ == PY_FROZEN:
return init_frozen(name)
else:
msg = "Don't know how to import {} (type code {})".format(name, type_)
raise ImportError(msg, name=name)
def find_module(name, path=None):
"""**DEPRECATED**
Search for a module.
If path is omitted or None, search for a built-in, frozen or special
module and continue search in sys.path. The module name cannot
contain '.'; to search for a submodule of a package, pass the
submodule name and the package's __path__.
"""
if not isinstance(name, str):
raise TypeError("'name' must be a str, not {}".format(type(name)))
elif not isinstance(path, (type(None), list)):
# Backwards-compatibility
raise RuntimeError("'list' must be None or a list, "
"not {}".format(type(name)))
if path is None:
if is_builtin(name):
return None, None, ('', '', C_BUILTIN)
elif is_frozen(name):
return None, None, ('', '', PY_FROZEN)
else:
path = sys.path
for entry in path:
package_directory = os.path.join(entry, name)
for suffix in ['.py', machinery.BYTECODE_SUFFIXES[0]]:
package_file_name = '__init__' + suffix
file_path = os.path.join(package_directory, package_file_name)
if os.path.isfile(file_path):
return None, package_directory, ('', '', PKG_DIRECTORY)
with warnings.catch_warnings():
warnings.simplefilter('ignore')
for suffix, mode, type_ in get_suffixes():
file_name = name + suffix
file_path = os.path.join(entry, file_name)
if os.path.isfile(file_path):
break
else:
continue
break # Break out of outer loop when breaking out of inner loop.
else:
raise ImportError(_bootstrap._ERR_MSG.format(name), name=name)
encoding = None
if mode == 'U':
with open(file_path, 'rb') as file:
encoding = tokenize.detect_encoding(file.readline)[0]
file = open(file_path, mode, encoding=encoding)
return file, file_path, (suffix, mode, type_)
_RELOADING = {}
def reload(module):
"""Reload the module and return it.
The module must have been successfully imported before.
"""
if not module or type(module) != type(sys):
raise TypeError("reload() argument must be module")
name = module.__name__
if name not in sys.modules:
msg = "module {} not in sys.modules"
raise ImportError(msg.format(name), name=name)
if name in _RELOADING:
return _RELOADING[name]
_RELOADING[name] = module
try:
parent_name = name.rpartition('.')[0]
if parent_name and parent_name not in sys.modules:
msg = "parent {!r} not in sys.modules"
raise ImportError(msg.format(parent_name), name=parent_name)
module.__loader__.load_module(name)
# The module may have replaced itself in sys.modules!
return sys.modules[module.__name__]
finally:
try:
del _RELOADING[name]
except KeyError:
pass
|
gpl-2.0
|
mlperf/training_results_v0.5
|
v0.5.0/google/cloud_v3.8/gnmt-tpuv3-8/code/gnmt/model/staging/models/rough/mask_rcnn/object_detection/shape_utils.py
|
22
|
2359
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utils used to manipulate tensor shapes."""
import tensorflow as tf
def assert_shape_equal(shape_a, shape_b):
"""Asserts that shape_a and shape_b are equal.
If the shapes are static, raises a ValueError when the shapes
mismatch.
If the shapes are dynamic, raises a tf InvalidArgumentError when the shapes
mismatch.
Args:
shape_a: a list containing shape of the first tensor.
shape_b: a list containing shape of the second tensor.
Returns:
Either a tf.no_op() when shapes are all static and a tf.assert_equal() op
when the shapes are dynamic.
Raises:
ValueError: When shapes are both static and unequal.
"""
if (all(isinstance(dim, int) for dim in shape_a) and
all(isinstance(dim, int) for dim in shape_b)):
if shape_a != shape_b:
raise ValueError('Unequal shapes {}, {}'.format(shape_a, shape_b))
else: return tf.no_op()
else:
return tf.assert_equal(shape_a, shape_b)
def combined_static_and_dynamic_shape(tensor):
"""Returns a list containing static and dynamic values for the dimensions.
Returns a list of static and dynamic values for shape dimensions. This is
useful to preserve static shapes when available in reshape operation.
Args:
tensor: A tensor of any type.
Returns:
A list of size tensor.shape.ndims containing integers or a scalar tensor.
"""
static_tensor_shape = tensor.shape.as_list()
dynamic_tensor_shape = tf.shape(tensor)
combined_shape = []
for index, dim in enumerate(static_tensor_shape):
if dim is not None:
combined_shape.append(dim)
else:
combined_shape.append(dynamic_tensor_shape[index])
return combined_shape
|
apache-2.0
|
Salat-Cx65/python-for-android
|
python3-alpha/python3-src/Lib/asynchat.py
|
53
|
12133
|
# -*- Mode: Python; tab-width: 4 -*-
# Id: asynchat.py,v 2.26 2000/09/07 22:29:26 rushing Exp
# Author: Sam Rushing <rushing@nightmare.com>
# ======================================================================
# Copyright 1996 by Sam Rushing
#
# All Rights Reserved
#
# Permission to use, copy, modify, and distribute this software and
# its documentation for any purpose and without fee is hereby
# granted, provided that the above copyright notice appear in all
# copies and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of Sam
# Rushing not be used in advertising or publicity pertaining to
# distribution of the software without specific, written prior
# permission.
#
# SAM RUSHING DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN
# NO EVENT SHALL SAM RUSHING BE LIABLE FOR ANY SPECIAL, INDIRECT OR
# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
# ======================================================================
r"""A class supporting chat-style (command/response) protocols.
This class adds support for 'chat' style protocols - where one side
sends a 'command', and the other sends a response (examples would be
the common internet protocols - smtp, nntp, ftp, etc..).
The handle_read() method looks at the input stream for the current
'terminator' (usually '\r\n' for single-line responses, '\r\n.\r\n'
for multi-line output), calling self.found_terminator() on its
receipt.
for example:
Say you build an async nntp client using this class. At the start
of the connection, you'll have self.terminator set to '\r\n', in
order to process the single-line greeting. Just before issuing a
'LIST' command you'll set it to '\r\n.\r\n'. The output of the LIST
command will be accumulated (using your own 'collect_incoming_data'
method) up to the terminator, and then control will be returned to
you - by calling your self.found_terminator() method.
"""
import socket
import asyncore
from collections import deque
def buffer(obj, start=None, stop=None):
# if memoryview objects gain slicing semantics,
# this function will change for the better
# memoryview used for the TypeError
memoryview(obj)
if start == None:
start = 0
if stop == None:
stop = len(obj)
x = obj[start:stop]
## print("buffer type is: %s"%(type(x),))
return x
class async_chat (asyncore.dispatcher):
"""This is an abstract class. You must derive from this class, and add
the two methods collect_incoming_data() and found_terminator()"""
# these are overridable defaults
ac_in_buffer_size = 4096
ac_out_buffer_size = 4096
# we don't want to enable the use of encoding by default, because that is a
# sign of an application bug that we don't want to pass silently
use_encoding = 0
encoding = 'latin1'
def __init__ (self, sock=None, map=None):
# for string terminator matching
self.ac_in_buffer = b''
# we use a list here rather than cStringIO for a few reasons...
# del lst[:] is faster than sio.truncate(0)
# lst = [] is faster than sio.truncate(0)
# cStringIO will be gaining unicode support in py3k, which
# will negatively affect the performance of bytes compared to
# a ''.join() equivalent
self.incoming = []
# we toss the use of the "simple producer" and replace it with
# a pure deque, which the original fifo was a wrapping of
self.producer_fifo = deque()
asyncore.dispatcher.__init__ (self, sock, map)
def collect_incoming_data(self, data):
raise NotImplementedError("must be implemented in subclass")
def _collect_incoming_data(self, data):
self.incoming.append(data)
def _get_data(self):
d = b''.join(self.incoming)
del self.incoming[:]
return d
def found_terminator(self):
raise NotImplementedError("must be implemented in subclass")
def set_terminator (self, term):
"Set the input delimiter. Can be a fixed string of any length, an integer, or None"
if isinstance(term, str) and self.use_encoding:
term = bytes(term, self.encoding)
self.terminator = term
def get_terminator (self):
return self.terminator
# grab some more data from the socket,
# throw it to the collector method,
# check for the terminator,
# if found, transition to the next state.
def handle_read (self):
try:
data = self.recv (self.ac_in_buffer_size)
except socket.error as why:
self.handle_error()
return
if isinstance(data, str) and self.use_encoding:
data = bytes(str, self.encoding)
self.ac_in_buffer = self.ac_in_buffer + data
# Continue to search for self.terminator in self.ac_in_buffer,
# while calling self.collect_incoming_data. The while loop
# is necessary because we might read several data+terminator
# combos with a single recv(4096).
while self.ac_in_buffer:
lb = len(self.ac_in_buffer)
terminator = self.get_terminator()
if not terminator:
# no terminator, collect it all
self.collect_incoming_data (self.ac_in_buffer)
self.ac_in_buffer = b''
elif isinstance(terminator, int):
# numeric terminator
n = terminator
if lb < n:
self.collect_incoming_data (self.ac_in_buffer)
self.ac_in_buffer = b''
self.terminator = self.terminator - lb
else:
self.collect_incoming_data (self.ac_in_buffer[:n])
self.ac_in_buffer = self.ac_in_buffer[n:]
self.terminator = 0
self.found_terminator()
else:
# 3 cases:
# 1) end of buffer matches terminator exactly:
# collect data, transition
# 2) end of buffer matches some prefix:
# collect data to the prefix
# 3) end of buffer does not match any prefix:
# collect data
terminator_len = len(terminator)
index = self.ac_in_buffer.find(terminator)
if index != -1:
# we found the terminator
if index > 0:
# don't bother reporting the empty string (source of subtle bugs)
self.collect_incoming_data (self.ac_in_buffer[:index])
self.ac_in_buffer = self.ac_in_buffer[index+terminator_len:]
# This does the Right Thing if the terminator is changed here.
self.found_terminator()
else:
# check for a prefix of the terminator
index = find_prefix_at_end (self.ac_in_buffer, terminator)
if index:
if index != lb:
# we found a prefix, collect up to the prefix
self.collect_incoming_data (self.ac_in_buffer[:-index])
self.ac_in_buffer = self.ac_in_buffer[-index:]
break
else:
# no prefix, collect it all
self.collect_incoming_data (self.ac_in_buffer)
self.ac_in_buffer = b''
def handle_write (self):
self.initiate_send()
def handle_close (self):
self.close()
def push (self, data):
sabs = self.ac_out_buffer_size
if len(data) > sabs:
for i in range(0, len(data), sabs):
self.producer_fifo.append(data[i:i+sabs])
else:
self.producer_fifo.append(data)
self.initiate_send()
def push_with_producer (self, producer):
self.producer_fifo.append(producer)
self.initiate_send()
def readable (self):
"predicate for inclusion in the readable for select()"
# cannot use the old predicate, it violates the claim of the
# set_terminator method.
# return (len(self.ac_in_buffer) <= self.ac_in_buffer_size)
return 1
def writable (self):
"predicate for inclusion in the writable for select()"
return self.producer_fifo or (not self.connected)
def close_when_done (self):
"automatically close this channel once the outgoing queue is empty"
self.producer_fifo.append(None)
def initiate_send(self):
while self.producer_fifo and self.connected:
first = self.producer_fifo[0]
# handle empty string/buffer or None entry
if not first:
del self.producer_fifo[0]
if first is None:
## print("first is None")
self.handle_close()
return
## print("first is not None")
# handle classic producer behavior
obs = self.ac_out_buffer_size
try:
data = buffer(first, 0, obs)
except TypeError:
data = first.more()
if data:
self.producer_fifo.appendleft(data)
else:
del self.producer_fifo[0]
continue
if isinstance(data, str) and self.use_encoding:
data = bytes(data, self.encoding)
# send the data
try:
num_sent = self.send(data)
except socket.error:
self.handle_error()
return
if num_sent:
if num_sent < len(data) or obs < len(first):
self.producer_fifo[0] = first[num_sent:]
else:
del self.producer_fifo[0]
# we tried to send some actual data
return
def discard_buffers (self):
# Emergencies only!
self.ac_in_buffer = b''
del self.incoming[:]
self.producer_fifo.clear()
class simple_producer:
def __init__ (self, data, buffer_size=512):
self.data = data
self.buffer_size = buffer_size
def more (self):
if len (self.data) > self.buffer_size:
result = self.data[:self.buffer_size]
self.data = self.data[self.buffer_size:]
return result
else:
result = self.data
self.data = b''
return result
class fifo:
def __init__ (self, list=None):
if not list:
self.list = deque()
else:
self.list = deque(list)
def __len__ (self):
return len(self.list)
def is_empty (self):
return not self.list
def first (self):
return self.list[0]
def push (self, data):
self.list.append(data)
def pop (self):
if self.list:
return (1, self.list.popleft())
else:
return (0, None)
# Given 'haystack', see if any prefix of 'needle' is at its end. This
# assumes an exact match has already been checked. Return the number of
# characters matched.
# for example:
# f_p_a_e ("qwerty\r", "\r\n") => 1
# f_p_a_e ("qwertydkjf", "\r\n") => 0
# f_p_a_e ("qwerty\r\n", "\r\n") => <undefined>
# this could maybe be made faster with a computed regex?
# [answer: no; circa Python-2.0, Jan 2001]
# new python: 28961/s
# old python: 18307/s
# re: 12820/s
# regex: 14035/s
def find_prefix_at_end (haystack, needle):
l = len(needle) - 1
while l and not haystack.endswith(needle[:l]):
l -= 1
return l
|
apache-2.0
|
jomolinare/crate-web
|
src/web/utils.py
|
1
|
1865
|
# vim: set fileencodings=utf-8
# -*- coding: utf-8 -*-
__docformat__ = "reStructuredText"
import re
import hashlib
import logging
from StringIO import StringIO
from datetime import datetime
from markdown2 import markdown
from django.utils.html import strip_tags
from django.utils.text import Truncator
from django.utils.encoding import force_unicode
from django.utils.safestring import mark_safe
logger = logging.getLogger(__name__)
HEADER_RE = r'(\w+)\:\s([\S\s]*)\n'
def toDict(conf, posts):
site = conf.get('site', '')
return [dict(
id = hashlib.md5(x['url']).hexdigest(),
title = x['title'],
date = x['date'],
tags = x['tags'],
category = x['category'],
permalink = '{0}{1}'.format(site, x['url']),
content = u'',
excerpt = Truncator(strip_tags(markdown(force_unicode(x['raw_body']), safe_mode=True))).words(25),
author = x['author'],
) for x in posts]
def parseDate(date_str=None):
if date_str and re.match(r'^\d{4}\-\d{2}\-\d{2}T\d{2}:\d{2}$', date_str):
return datetime.strptime(date_str, '%Y-%m-%dT%H:%M')
elif date_str and re.match(r'^\d{4}\-\d{2}\-\d{2}', date_str):
return datetime.strptime(date_str, '%Y-%m-%d')
elif date_str and re.match(r'^\d{4}\/\d{2}\/\d{2} \d{2}:\d{2}:\d{2}', date_str):
return datetime.strptime(date_str, '%Y/%m/%d %H:%M:%S')
logger.warning("Date format not correct, should be 'yyyy-mm-dd', 'yyyy-mm-ddThh:mm' or 'yyyy/mm/dd hh:mm:ss'\n{0}".format(date_str))
return datetime.now()
def parsePost(post):
headers = {}
fn = StringIO(post.data())
for line in fn:
res = re.match(HEADER_RE, line)
if res:
header, value = res.groups()
headers[header.lower()] = value
else:
break
body = fn.read()
return (headers, body)
|
apache-2.0
|
eneldoserrata/marcos_openerp
|
addons/hr_expense/__openerp__.py
|
47
|
2927
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Expense Management',
'version': '1.0',
'category': 'Human Resources',
'sequence': 29,
'summary': 'Expenses Validation, Invoicing',
'description': """
Manage expenses by Employees
============================
This application allows you to manage your employees' daily expenses. It gives you access to your employees’ fee notes and give you the right to complete and validate or refuse the notes. After validation it creates an invoice for the employee.
Employee can encode their own expenses and the validation flow puts it automatically in the accounting after validation by managers.
The whole flow is implemented as:
---------------------------------
* Draft expense
* Confirmation of the sheet by the employee
* Validation by his manager
* Validation by the accountant and receipt creation
This module also uses analytic accounting and is compatible with the invoice on timesheet module so that you are able to automatically re-invoice your customers' expenses if your work by project.
""",
'author': 'OpenERP SA',
'website': 'http://www.openerp.com',
'images': ['images/hr_expenses_analysis.jpeg', 'images/hr_expenses.jpeg'],
'depends': ['hr', 'account_voucher', 'account_accountant'],
'data': [
'security/ir.model.access.csv',
'hr_expense_data.xml',
'hr_expense_sequence.xml',
'hr_expense_workflow.xml',
'hr_expense_view.xml',
'hr_expense_report.xml',
'process/hr_expense_process.xml',
'security/ir_rule.xml',
'report/hr_expense_report_view.xml',
'board_hr_expense_view.xml',
'hr_expense_installer_view.xml',
],
'demo': ['hr_expense_demo.xml'],
'test': [
'test/expense_demo.yml',
'test/expense_process.yml',
],
'installable': True,
'auto_install': False,
'application': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
jankoslavic/numpy
|
numpy/distutils/extension.py
|
162
|
3043
|
"""distutils.extension
Provides the Extension class, used to describe C/C++ extension
modules in setup scripts.
Overridden to support f2py.
"""
from __future__ import division, absolute_import, print_function
import sys
import re
from distutils.extension import Extension as old_Extension
if sys.version_info[0] >= 3:
basestring = str
cxx_ext_re = re.compile(r'.*[.](cpp|cxx|cc)\Z', re.I).match
fortran_pyf_ext_re = re.compile(r'.*[.](f90|f95|f77|for|ftn|f|pyf)\Z', re.I).match
class Extension(old_Extension):
def __init__ (self, name, sources,
include_dirs=None,
define_macros=None,
undef_macros=None,
library_dirs=None,
libraries=None,
runtime_library_dirs=None,
extra_objects=None,
extra_compile_args=None,
extra_link_args=None,
export_symbols=None,
swig_opts=None,
depends=None,
language=None,
f2py_options=None,
module_dirs=None,
extra_f77_compile_args=None,
extra_f90_compile_args=None,
):
old_Extension.__init__(self, name, [],
include_dirs,
define_macros,
undef_macros,
library_dirs,
libraries,
runtime_library_dirs,
extra_objects,
extra_compile_args,
extra_link_args,
export_symbols)
# Avoid assert statements checking that sources contains strings:
self.sources = sources
# Python 2.4 distutils new features
self.swig_opts = swig_opts or []
# swig_opts is assumed to be a list. Here we handle the case where it
# is specified as a string instead.
if isinstance(self.swig_opts, basestring):
import warnings
msg = "swig_opts is specified as a string instead of a list"
warnings.warn(msg, SyntaxWarning)
self.swig_opts = self.swig_opts.split()
# Python 2.3 distutils new features
self.depends = depends or []
self.language = language
# numpy_distutils features
self.f2py_options = f2py_options or []
self.module_dirs = module_dirs or []
self.extra_f77_compile_args = extra_f77_compile_args or []
self.extra_f90_compile_args = extra_f90_compile_args or []
return
def has_cxx_sources(self):
for source in self.sources:
if cxx_ext_re(str(source)):
return True
return False
def has_f2py_sources(self):
for source in self.sources:
if fortran_pyf_ext_re(source):
return True
return False
# class Extension
|
bsd-3-clause
|
veger/ansible
|
lib/ansible/plugins/action/async_status.py
|
26
|
2115
|
# Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.errors import AnsibleError
from ansible.plugins.action import ActionBase
from ansible.utils.vars import merge_hash
class ActionModule(ActionBase):
_VALID_ARGS = frozenset(('jid', 'mode'))
def run(self, tmp=None, task_vars=None):
results = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
if "jid" not in self._task.args:
raise AnsibleError("jid is required")
jid = self._task.args["jid"]
mode = self._task.args.get("mode", "status")
env_async_dir = [e for e in self._task.environment if
"ANSIBLE_ASYNC_DIR" in e]
if len(env_async_dir) > 0:
# for backwards compatibility we need to get the dir from
# ANSIBLE_ASYNC_DIR that is defined in the environment. This is
# deprecated and will be removed in favour of shell options
async_dir = env_async_dir[0]['ANSIBLE_ASYNC_DIR']
msg = "Setting the async dir from the environment keyword " \
"ANSIBLE_ASYNC_DIR is deprecated. Set the async_dir " \
"shell option instead"
self._display.deprecated(msg, "2.12")
else:
# inject the async directory based on the shell option into the
# module args
try:
async_dir = self._connection._shell.get_option('async_dir')
except KeyError:
# here for 3rd party shell plugin compatibility in case they do
# not define the async_dir option
async_dir = "~/.ansible_async"
module_args = dict(jid=jid, mode=mode, _async_dir=async_dir)
status = self._execute_module(task_vars=task_vars,
module_args=module_args)
results = merge_hash(results, status)
return results
|
gpl-3.0
|
domenkozar/nixops
|
nixops/backends/virtualbox.py
|
1
|
13850
|
# -*- coding: utf-8 -*-
import os
import sys
import time
import shutil
import stat
from nixops.backends import MachineDefinition, MachineState
import nixops.known_hosts
sata_ports = 8
class VirtualBoxDefinition(MachineDefinition):
"""Definition of a VirtualBox machine."""
@classmethod
def get_type(cls):
return "virtualbox"
def __init__(self, xml):
MachineDefinition.__init__(self, xml)
x = xml.find("attrs/attr[@name='virtualbox']/attrs")
assert x is not None
self.memory_size = x.find("attr[@name='memorySize']/int").get("value")
self.headless = x.find("attr[@name='headless']/bool").get("value") == "true"
def f(xml):
return {'port': int(xml.find("attrs/attr[@name='port']/int").get("value")),
'size': int(xml.find("attrs/attr[@name='size']/int").get("value")),
'baseImage': xml.find("attrs/attr[@name='baseImage']/string").get("value")}
self.disks = {k.get("name"): f(k) for k in x.findall("attr[@name='disks']/attrs/attr")}
class VirtualBoxState(MachineState):
"""State of a VirtualBox machine."""
@classmethod
def get_type(cls):
return "virtualbox"
state = nixops.util.attr_property("state", MachineState.MISSING, int) # override
private_ipv4 = nixops.util.attr_property("privateIpv4", None)
disks = nixops.util.attr_property("virtualbox.disks", {}, 'json')
_client_private_key = nixops.util.attr_property("virtualbox.clientPrivateKey", None)
_client_public_key = nixops.util.attr_property("virtualbox.clientPublicKey", None)
_headless = nixops.util.attr_property("virtualbox.headless", False, bool)
sata_controller_created = nixops.util.attr_property("virtualbox.sataControllerCreated", False, bool)
# Obsolete.
disk = nixops.util.attr_property("virtualbox.disk", None)
disk_attached = nixops.util.attr_property("virtualbox.diskAttached", False, bool)
def __init__(self, depl, name, id):
MachineState.__init__(self, depl, name, id)
self._disk_attached = False
@property
def resource_id(self):
return self.vm_id
def get_ssh_name(self):
assert self.private_ipv4
return self.private_ipv4
def get_ssh_private_key_file(self):
return self._ssh_private_key_file or self.write_ssh_private_key(self._client_private_key)
def get_ssh_flags(self):
return ["-o", "StrictHostKeyChecking=no", "-i", self.get_ssh_private_key_file()]
def get_physical_spec(self):
return [' require = [ <nixops/virtualbox-image-nixops.nix> ];']
def address_to(self, m):
if isinstance(m, VirtualBoxState):
return m.private_ipv4
return MachineState.address_to(self, m)
def has_really_fast_connection(self):
return True
def _get_vm_info(self):
'''Return the output of ‘VBoxManage showvminfo’ in a dictionary.'''
lines = self._logged_exec(
["VBoxManage", "showvminfo", "--machinereadable", self.vm_id],
capture_stdout=True, check=False).splitlines()
# We ignore the exit code, because it may be 1 while the VM is
# shutting down (even though the necessary info is returned on
# stdout).
if len(lines) == 0:
raise Exception("unable to get info on VirtualBox VM ‘{0}’".format(self.name))
vminfo = {}
for l in lines:
(k, v) = l.split("=", 1)
vminfo[k] = v
return vminfo
def _get_vm_state(self):
'''Return the state ("running", etc.) of a VM.'''
vminfo = self._get_vm_info()
if 'VMState' not in vminfo:
raise Exception("unable to get state of VirtualBox VM ‘{0}’".format(self.name))
return vminfo['VMState'].replace('"', '')
def _start(self):
self._logged_exec(
["VBoxManage", "guestproperty", "set", self.vm_id, "/VirtualBox/GuestInfo/Net/1/V4/IP", ''])
self._logged_exec(
["VBoxManage", "guestproperty", "set", self.vm_id, "/VirtualBox/GuestInfo/Charon/ClientPublicKey", self._client_public_key])
self._logged_exec(["VBoxManage", "startvm", self.vm_id] +
(["--type", "headless"] if self._headless else []))
self.state = self.STARTING
def _update_ip(self):
res = self._logged_exec(
["VBoxManage", "guestproperty", "get", self.vm_id, "/VirtualBox/GuestInfo/Net/1/V4/IP"],
capture_stdout=True).rstrip()
if res[0:7] != "Value: ": return
self.private_ipv4 = res[7:]
def _update_disk(self, name, state):
disks = self.disks
if state == None:
disks.pop(name, None)
else:
disks[name] = state
self.disks = disks
def _wait_for_ip(self):
self.log_start("waiting for IP address...")
while True:
self._update_ip()
if self.private_ipv4 != None: break
time.sleep(1)
self.log_continue(".")
self.log_end(" " + self.private_ipv4)
nixops.known_hosts.remove(self.private_ipv4)
def create(self, defn, check, allow_reboot, allow_recreate):
assert isinstance(defn, VirtualBoxDefinition)
if self.state != self.UP or check: self.check()
self.set_common_state(defn)
if not self.vm_id:
self.log("creating VirtualBox VM...")
vm_id = "nixops-{0}-{1}".format(self.depl.uuid, self.name)
self._logged_exec(["VBoxManage", "createvm", "--name", vm_id, "--ostype", "Linux", "--register"])
self.vm_id = vm_id
self.state = self.STOPPED
# Backwards compatibility.
if self.disk:
with self.depl._db:
self._update_disk("disk1", {"created": True, "path": self.disk,
"attached": self.disk_attached,
"port": 0})
self.disk = None
self.sata_controller_created = self.disk_attached
self.disk_attached = False
# Create the SATA controller.
if not self.sata_controller_created:
self._logged_exec(
["VBoxManage", "storagectl", self.vm_id,
"--name", "SATA", "--add", "sata", "--sataportcount", str(sata_ports),
"--bootable", "on", "--hostiocache", "on"])
self.sata_controller_created = True
vm_dir = os.environ['HOME'] + "/VirtualBox VMs/" + self.vm_id
if not os.path.isdir(vm_dir):
raise Exception("can't find directory of VirtualBox VM ‘{0}’".format(self.name))
# Create missing disks.
for disk_name, disk_def in defn.disks.items():
disk_state = self.disks.get(disk_name, {})
if not disk_state.get('created', False):
self.log("creating disk ‘{0}’...".format(disk_name))
disk_path = "{0}/{1}.vdi".format(vm_dir, disk_name)
base_image = disk_def.get('baseImage')
if base_image:
# Clone an existing disk image.
if base_image == "drv":
# FIXME: move this to deployment.py.
base_image = self._logged_exec(
["nix-build"]
+ self.depl._eval_flags(self.depl.nix_exprs) +
["--arg", "checkConfigurationOptions", "false",
"-A", "nodes.{0}.config.deployment.virtualbox.disks.{1}.baseImage".format(self.name, disk_name),
"-o", "{0}/vbox-image-{1}".format(self.depl.tempdir, self.name)],
capture_stdout=True).rstrip()
self._logged_exec(["VBoxManage", "clonehd", base_image, disk_path])
else:
# Create an empty disk.
if disk_def['size'] <= 0:
raise Exception("size of VirtualBox disk ‘{0}’ must be positive".format(disk_name))
self._logged_exec(["VBoxManage", "createhd", "--filename", disk_path, "--size", str(disk_def['size'])])
disk_state['size'] = disk_def['size']
disk_state['created'] = True
disk_state['path'] = disk_path
self._update_disk(disk_name, disk_state)
if not disk_state.get('attached', False):
self.log("attaching disk ‘{0}’...".format(disk_name))
if disk_def['port'] >= sata_ports:
raise Exception("SATA port number {0} of disk ‘{1}’ exceeds maximum ({2})".format(disk_def['port'], disk_name, sata_ports))
for disk_name2, disk_state2 in self.disks.items():
if disk_name != disk_name2 and disk_state2.get('attached', False) and \
disk_state2['port'] == disk_def['port']:
raise Exception("cannot attach disks ‘{0}’ and ‘{1}’ to the same SATA port on VirtualBox machine ‘{2}’".format(disk_name, disk_name2, self.name))
self._logged_exec(
["VBoxManage", "storageattach", self.vm_id,
"--storagectl", "SATA", "--port", str(disk_def['port']), "--device", "0",
"--type", "hdd", "--medium", disk_state['path']])
disk_state['attached'] = True
disk_state['port'] = disk_def['port']
self._update_disk(disk_name, disk_state)
# FIXME: warn about changed disk attributes (like size). Or
# even better, handle them (e.g. resize existing disks).
# Destroy obsolete disks.
for disk_name, disk_state in self.disks.items():
if disk_name not in defn.disks:
if not self.depl.logger.confirm("are you sure you want to destroy disk ‘{0}’ of VirtualBox instance ‘{1}’?".format(disk_name, self.name)):
raise Exception("not destroying VirtualBox disk ‘{0}’".format(disk_name))
self.log("destroying disk ‘{0}’".format(disk_name))
if disk_state.get('attached', False):
# FIXME: only do this if the device is actually
# attached (and remove check=False).
self._logged_exec(
["VBoxManage", "storageattach", self.vm_id,
"--storagectl", "SATA", "--port", str(disk_state['port']), "--device", "0",
"--type", "hdd", "--medium", "none"], check=False)
disk_state['attached'] = False
disk_state.pop('port')
self._update_disk(disk_name, disk_state)
if disk_state['created']:
self._logged_exec(
["VBoxManage", "closemedium", "disk", disk_state['path'], "--delete"])
self._update_disk(disk_name, None)
if not self._client_private_key:
(self._client_private_key, self._client_public_key) = nixops.util.create_key_pair()
if not self.started:
self._logged_exec(
["VBoxManage", "modifyvm", self.vm_id,
"--memory", defn.memory_size, "--vram", "10",
"--nictype1", "virtio", "--nictype2", "virtio",
"--nic2", "hostonly", "--hostonlyadapter2", "vboxnet0",
"--nestedpaging", "off"])
self._headless = defn.headless
self._start()
if not self.private_ipv4 or check:
self._wait_for_ip()
def destroy(self, wipe=False):
if not self.vm_id: return True
if not self.depl.logger.confirm("are you sure you want to destroy VirtualBox VM ‘{0}’?".format(self.name)): return False
self.log("destroying VirtualBox VM...")
if self._get_vm_state() == 'running':
self._logged_exec(["VBoxManage", "controlvm", self.vm_id, "poweroff"], check=False)
while self._get_vm_state() not in ['poweroff', 'aborted']:
time.sleep(1)
self.state = self.STOPPED
time.sleep(1) # hack to work around "machine locked" errors
self._logged_exec(["VBoxManage", "unregistervm", "--delete", self.vm_id])
return True
def stop(self):
if self._get_vm_state() != 'running': return
self.log_start("shutting down... ")
self.run_command("systemctl poweroff", check=False)
self.state = self.STOPPING
while True:
state = self._get_vm_state()
self.log_continue("[{0}] ".format(state))
if state == 'poweroff': break
time.sleep(1)
self.log_end("")
self.state = self.STOPPED
self.ssh_master = None
def start(self):
if self._get_vm_state() == 'running': return
self.log("restarting...")
prev_ipv4 = self.private_ipv4
self._start()
self._wait_for_ip()
if prev_ipv4 != self.private_ipv4:
self.warn("IP address has changed, you may need to run ‘nixops deploy’")
self.wait_for_ssh(check=True)
def _check(self, res):
if not self.vm_id:
res.exists = False
return
state = self._get_vm_state()
res.exists = True
#self.log("VM state is ‘{0}’".format(state))
if state == "poweroff" or state == "aborted":
res.is_up = False
self.state = self.STOPPED
elif state == "running":
res.is_up = True
self._update_ip()
MachineState._check(self, res)
else:
self.state = self.UNKNOWN
|
lgpl-3.0
|
dursk/django
|
django/contrib/gis/geos/prototypes/threadsafe.py
|
529
|
2859
|
import threading
from django.contrib.gis.geos.libgeos import (
CONTEXT_PTR, error_h, lgeos, notice_h,
)
class GEOSContextHandle(object):
"""
Python object representing a GEOS context handle.
"""
def __init__(self):
# Initializing the context handler for this thread with
# the notice and error handler.
self.ptr = lgeos.initGEOS_r(notice_h, error_h)
def __del__(self):
if self.ptr and lgeos:
lgeos.finishGEOS_r(self.ptr)
# Defining a thread-local object and creating an instance
# to hold a reference to GEOSContextHandle for this thread.
class GEOSContext(threading.local):
handle = None
thread_context = GEOSContext()
class GEOSFunc(object):
"""
Class that serves as a wrapper for GEOS C Functions, and will
use thread-safe function variants when available.
"""
def __init__(self, func_name):
try:
# GEOS thread-safe function signatures end with '_r', and
# take an additional context handle parameter.
self.cfunc = getattr(lgeos, func_name + '_r')
self.threaded = True
# Create a reference here to thread_context so it's not
# garbage-collected before an attempt to call this object.
self.thread_context = thread_context
except AttributeError:
# Otherwise, use usual function.
self.cfunc = getattr(lgeos, func_name)
self.threaded = False
def __call__(self, *args):
if self.threaded:
# If a context handle does not exist for this thread, initialize one.
if not self.thread_context.handle:
self.thread_context.handle = GEOSContextHandle()
# Call the threaded GEOS routine with pointer of the context handle
# as the first argument.
return self.cfunc(self.thread_context.handle.ptr, *args)
else:
return self.cfunc(*args)
def __str__(self):
return self.cfunc.__name__
# argtypes property
def _get_argtypes(self):
return self.cfunc.argtypes
def _set_argtypes(self, argtypes):
if self.threaded:
new_argtypes = [CONTEXT_PTR]
new_argtypes.extend(argtypes)
self.cfunc.argtypes = new_argtypes
else:
self.cfunc.argtypes = argtypes
argtypes = property(_get_argtypes, _set_argtypes)
# restype property
def _get_restype(self):
return self.cfunc.restype
def _set_restype(self, restype):
self.cfunc.restype = restype
restype = property(_get_restype, _set_restype)
# errcheck property
def _get_errcheck(self):
return self.cfunc.errcheck
def _set_errcheck(self, errcheck):
self.cfunc.errcheck = errcheck
errcheck = property(_get_errcheck, _set_errcheck)
|
bsd-3-clause
|
barachka/odoo
|
addons/purchase/report/purchase_report.py
|
49
|
6707
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
#
# Please note that these reports are not multi-currency !!!
#
from openerp.osv import fields,osv
from openerp import tools
class purchase_report(osv.osv):
_name = "purchase.report"
_description = "Purchases Orders"
_auto = False
_columns = {
'date': fields.date('Order Date', readonly=True, help="Date on which this document has been created"),
'state': fields.selection([('draft', 'Request for Quotation'),
('confirmed', 'Waiting Supplier Ack'),
('approved', 'Approved'),
('except_picking', 'Shipping Exception'),
('except_invoice', 'Invoice Exception'),
('done', 'Done'),
('cancel', 'Cancelled')],'Order Status', readonly=True),
'product_id':fields.many2one('product.product', 'Product', readonly=True),
'picking_type_id': fields.many2one('stock.warehouse', 'Warehouse', readonly=True),
'location_id': fields.many2one('stock.location', 'Destination', readonly=True),
'partner_id':fields.many2one('res.partner', 'Supplier', readonly=True),
'pricelist_id':fields.many2one('product.pricelist', 'Pricelist', readonly=True),
'date_approve':fields.date('Date Approved', readonly=True),
'expected_date':fields.date('Expected Date', readonly=True),
'validator' : fields.many2one('res.users', 'Validated By', readonly=True),
'product_uom' : fields.many2one('product.uom', 'Reference Unit of Measure', required=True),
'company_id':fields.many2one('res.company', 'Company', readonly=True),
'user_id':fields.many2one('res.users', 'Responsible', readonly=True),
'delay':fields.float('Days to Validate', digits=(16,2), readonly=True),
'delay_pass':fields.float('Days to Deliver', digits=(16,2), readonly=True),
'quantity': fields.float('Quantity', readonly=True),
'price_total': fields.float('Total Price', readonly=True),
'price_average': fields.float('Average Price', readonly=True, group_operator="avg"),
'negociation': fields.float('Purchase-Standard Price', readonly=True, group_operator="avg"),
'price_standard': fields.float('Products Value', readonly=True, group_operator="sum"),
'nbr': fields.integer('# of Lines', readonly=True),
'category_id': fields.many2one('product.category', 'Category', readonly=True)
}
_order = 'date desc, price_total desc'
def init(self, cr):
tools.sql.drop_view_if_exists(cr, 'purchase_report')
cr.execute("""
create or replace view purchase_report as (
select
min(l.id) as id,
s.date_order as date,
s.state,
s.date_approve,
s.minimum_planned_date as expected_date,
s.dest_address_id,
s.pricelist_id,
s.validator,
s.picking_type_id as picking_type_id,
s.partner_id as partner_id,
s.create_uid as user_id,
s.company_id as company_id,
l.product_id,
t.categ_id as category_id,
t.uom_id as product_uom,
s.location_id as location_id,
sum(l.product_qty/u.factor*u2.factor) as quantity,
extract(epoch from age(s.date_approve,s.date_order))/(24*60*60)::decimal(16,2) as delay,
extract(epoch from age(l.date_planned,s.date_order))/(24*60*60)::decimal(16,2) as delay_pass,
count(*) as nbr,
sum(l.price_unit*l.product_qty)::decimal(16,2) as price_total,
avg(100.0 * (l.price_unit*l.product_qty) / NULLIF(ip.value_float*l.product_qty/u.factor*u2.factor, 0.0))::decimal(16,2) as negociation,
sum(ip.value_float*l.product_qty/u.factor*u2.factor)::decimal(16,2) as price_standard,
(sum(l.product_qty*l.price_unit)/NULLIF(sum(l.product_qty/u.factor*u2.factor),0.0))::decimal(16,2) as price_average
from purchase_order_line l
join purchase_order s on (l.order_id=s.id)
left join product_product p on (l.product_id=p.id)
left join product_template t on (p.product_tmpl_id=t.id)
LEFT JOIN ir_property ip ON (ip.name='standard_price' AND ip.res_id=CONCAT('product.template,',t.id) AND ip.company_id=s.company_id)
left join product_uom u on (u.id=l.product_uom)
left join product_uom u2 on (u2.id=t.uom_id)
group by
s.company_id,
s.create_uid,
s.partner_id,
u.factor,
s.location_id,
l.price_unit,
s.date_approve,
l.date_planned,
l.product_uom,
s.minimum_planned_date,
s.pricelist_id,
s.validator,
s.dest_address_id,
l.product_id,
t.categ_id,
s.date_order,
s.state,
s.picking_type_id,
u.uom_type,
u.category_id,
t.uom_id,
u.id,
u2.factor
)
""")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
fuziontech/sentry
|
src/sentry/utils/samples.py
|
2
|
2278
|
"""
sentry.utils.samples
~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2013 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import os.path
from sentry.constants import DATA_ROOT, PLATFORM_ROOTS, PLATFORM_TITLES
from sentry.event_manager import EventManager
from sentry.utils import json
def load_data(platform, default=None):
data = None
for platform in (platform, default):
if platform is None:
continue
json_path = os.path.join(DATA_ROOT, 'samples', '%s.json' % (platform.encode('utf-8'),))
if not os.path.exists(json_path):
continue
with open(json_path) as fp:
data = json.loads(fp.read())
break
if data is None:
return
data['platform'] = platform
data['message'] = 'This is an example %s exception' % (
PLATFORM_TITLES.get(platform, platform.title()),)
data['sentry.interfaces.User'] = {
"username": "getsentry",
"id": "1671",
"email": "foo@example.com"
}
data['extra'] = {
'session': {
'foo': 'bar',
},
'results': [1, 2, 3, 4, 5],
'emptyList': [],
'emptyMap': {},
}
data['modules'] = {
'my.package': '1.0.0',
}
data['sentry.interfaces.Http'] = {
"cookies": {},
"url": "http://example.com/foo",
"headers": {
"Referer": "http://example.com",
"Content-Type": "application/json",
"User-Agent": "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/28.0.1500.72 Safari/537.36"
},
"env": {},
"query_string": "foo=bar",
"data": '{"hello": "world"}',
"method": "GET"
}
return data
def create_sample_event(project, platform=None, default=None, **kwargs):
if not platform:
platform = project.platform
if not platform and not default:
return
platform = PLATFORM_ROOTS.get(platform, platform)
data = load_data(platform, default)
if not data:
return
data.update(kwargs)
manager = EventManager(data)
manager.normalize()
return manager.save(project.id, raw=True)
|
bsd-3-clause
|
Willyham/tchannel-python
|
tchannel/messages/ping_request.py
|
1
|
1385
|
# Copyright (c) 2015 Uber Technologies, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import absolute_import
from .. import rw
from .base import BaseMessage
from .types import Types
class PingRequestMessage(BaseMessage):
"""Initiate a ping request."""
message_type = Types.PING_REQ
ping_req_rw = rw.instance(PingRequestMessage) # no body
|
mit
|
tammoippen/nest-simulator
|
pynest/nest/__init__.py
|
8
|
7112
|
# -*- coding: utf-8 -*-
#
# __init__.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Initializer of PyNEST.
"""
import sys
import os
# This is a workaround for readline import errors encountered with Anaconda
# Python running on Ubuntu, when invoked from the terminal
# "python -c 'import nest'"
if 'linux' in sys.platform and 'Anaconda' in sys.version:
import readline
# This is a workaround to avoid segmentation faults when importing
# scipy *after* nest. See https://github.com/numpy/numpy/issues/2521
try:
import scipy
except:
pass
# Make MPI-enabled NEST import properly. The underlying problem is that the
# shared object pynestkernel dynamically opens other libraries that open
# yet other libraries.
try:
# Python 3.3 and later has flags in os
sys.setdlopenflags(os.RTLD_NOW | os.RTLD_GLOBAL)
except AttributeError:
# Python 2.6 and 2.7 have flags in ctypes, but RTLD_NOW may only
# be available in dl or DLFCN and is required at least under
# Ubuntu 14.04. The latter two are not available under OSX,
# but OSX does not have and does not need RTLD_NOW. We therefore
# first try dl and DLFCN, then ctypes just for OSX.
try:
import dl
sys.setdlopenflags(dl.RTLD_GLOBAL | dl.RTLD_NOW)
except (ImportError, AttributeError):
try:
import DLFCN
sys.setdlopenflags(DLFCN.RTLD_GLOBAL | DLFCN.RTLD_NOW)
except (ImportError, AttributeError):
import ctypes
try:
sys.setdlopenflags(ctypes.RTLD_GLOBAL | ctypes.RTLD_NOW)
except AttributeError:
# We must test this last, since it is the only case without
# RTLD_NOW (OSX)
sys.setdlopenflags(ctypes.RTLD_GLOBAL)
from . import pynestkernel as _kernel # noqa
from .lib import hl_api_helper as hl_api # noqa
engine = _kernel.NESTEngine()
sli_push = hl_api.sps = engine.push
sli_pop = hl_api.spp = engine.pop
hl_api.pcd = engine.push_connection_datums
hl_api.kernel = _kernel
initialized = False
def catching_sli_run(cmd):
"""Send a command string to the NEST kernel to be executed, catch
SLI errors and re-raise them in Python.
Parameters
----------
cmd : str
The SLI command to be executed.
Raises
------
NESTError
SLI errors are bubbled to the Python API as NESTErrors.
"""
if sys.version_info >= (3, ):
def encode(s):
return s
def decode(s):
return s
else:
def encode(s):
return s.encode('utf-8')
def decode(s):
return s.decode('utf-8')
engine.run('{%s} runprotected' % decode(cmd))
if not sli_pop():
errorname = sli_pop()
message = sli_pop()
commandname = sli_pop()
engine.run('clear')
errorstring = '%s in %s%s' % (errorname, commandname, message)
raise _kernel.NESTError(encode(errorstring))
sli_run = hl_api.sr = catching_sli_run
def sli_func(s, *args, **kwargs):
"""Convenience function for executing an SLI command s with
arguments args.
This executes the SLI sequence:
``sli_push(args); sli_run(s); y=sli_pop()``
Parameters
----------
s : str
Function to call
*args
Arbitrary number of arguments to pass to the SLI function
**kwargs
namespace : str
The sli code is executed in the given SLI namespace.
litconv : bool
Convert string args beginning with / to literals.
Returns
-------
The function may have multiple return values. The number of return values
is determined by the SLI function that was called.
Examples
--------
r,q = sli_func('dup rollu add',2,3)
r = sli_func('add',2,3)
r = sli_func('add pop',2,3)
l = sli_func('CreateLayer', {...}, namespace='topology')
"""
# check for namespace
slifun = 'sli_func' # version not converting to literals
if 'namespace' in kwargs:
s = kwargs['namespace'] + ' using ' + s + ' endusing'
elif 'litconv' in kwargs:
if kwargs['litconv']:
slifun = 'sli_func_litconv'
elif len(kwargs) > 0:
_kernel.NESTError(
"'namespace' and 'litconv' are the only valid keyword arguments.")
sli_push(args) # push array of arguments on SLI stack
sli_push(s) # push command string
sli_run(slifun) # SLI support code to execute s on args
r = sli_pop() # return value is an array
if len(r) == 1: # 1 return value is no tuple
return r[0]
if len(r) != 0:
return r
hl_api.sli_func = sli_func
def init(argv):
"""Initializes NEST.
Parameters
----------
argv : list
Command line arguments, passed to the NEST kernel
Raises
------
_kernel.NESTError
"""
global initialized
if initialized:
raise _kernel.NESTError("NEST already initialized.")
return
quiet = False
if argv.count("--quiet"):
quiet = True
argv.remove("--quiet")
initialized |= engine.init(argv, __path__[0])
if initialized:
if not quiet:
engine.run("pywelcome")
# Dirty hack to get tab-completion for models in IPython.
try:
__IPYTHON__
except NameError:
pass
else:
try:
import keyword
keyword.kwlist += Models()
except ImportError:
pass
else:
_kernel.NESTError("Initiatization of NEST failed.")
def test():
"""Runs all PyNEST unit tests."""
from . import tests
import unittest
debug = hl_api.get_debug()
hl_api.set_debug(True)
runner = unittest.TextTestRunner(verbosity=2)
runner.run(tests.suite())
hl_api.set_debug(debug)
from .pynestkernel import * # noqa
from .lib.hl_api_helper import * # noqa
# We search through the subdirectory "lib" of the "nest" module
# directory and import the content of all Python files therein into
# the global namespace. This makes the API functions of PyNEST itself
# and those of extra modules available to the user.
for name in os.listdir(os.path.join(os.path.dirname(__file__), "lib")):
if name.endswith(".py") and not name.startswith('__'):
exec("from .lib.{0} import *".format(name[:-3]))
if 'DELAY_PYNEST_INIT' not in os.environ:
init(sys.argv)
|
gpl-2.0
|
sivaramakrishnansr/ryu
|
ryu/lib/packet/linux.py
|
38
|
2264
|
# Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import struct
from . import packet_base
from . import vlan
from . import mpls
from . import ether_types as ether
from ryu.lib import addrconv
class linuxcooked(packet_base.PacketBase):
_PACK_STR = '!HHH8sH'
_MIN_LEN = struct.calcsize(_PACK_STR)
def __init__(self, pkt_type, arphrd_type, address_length, address,
proto_type):
super(linuxcooked, self).__init__()
self.pkt_type = pkt_type
self.arphrd_type = arphrd_type
self.address_length = address_length
self.address = address
self.proto_type = proto_type
@classmethod
def parser(cls, buf):
(pkt_type, arphrd_type, address_length, addres,
proto_type) = struct.unpack_from(cls._PACK_STR, buf)
l = cls(pkt_type, arphrd_type, address_length, addres, proto_type)
return (l, linuxcooked.get_packet_type(proto_type),
buf[linuxcooked._MIN_LEN:])
@classmethod
def get_packet_type(cls, type_):
"""Override method for the ethernet IEEE802.3 Length/Type
field (self.ethertype).
If the value of Length/Type field is less than or equal to
1500 decimal(05DC hexadecimal), it means Length interpretation
and be passed to the LLC sublayer."""
if type_ <= ether.ETH_TYPE_IEEE802_3:
type_ = ether.ETH_TYPE_IEEE802_3
return cls._TYPES.get(type_)
# copy vlan _TYPES
linuxcooked._TYPES = vlan.vlan._TYPES
linuxcooked.register_packet_type(vlan.vlan, ether.ETH_TYPE_8021Q)
linuxcooked.register_packet_type(vlan.svlan, ether.ETH_TYPE_8021AD)
linuxcooked.register_packet_type(mpls.mpls, ether.ETH_TYPE_MPLS)
|
apache-2.0
|
Naeka/vosae-app
|
www/timeline/api/resources/base.py
|
1
|
1627
|
# -*- coding:Utf-8 -*-
from tastypie import fields as base_fields
from tastypie_mongoengine import fields
from core.api.utils import TenantResource
from timeline.models import TimelineEntry
from timeline.api.doc import HELP_TEXT
__all__ = (
'TimelineEntryBaseResource',
)
class TimelineEntryBaseResource(TenantResource):
module = base_fields.CharField(
attribute='module',
help_text=HELP_TEXT['timeline_entry']['module']
)
datetime = base_fields.DateTimeField(
attribute='datetime',
help_text=HELP_TEXT['timeline_entry']['datetime']
)
issuer_name = base_fields.CharField(
attribute='issuer__get_full_name',
help_text=HELP_TEXT['entity_saved']['contact'],
null=True,
)
issuer = fields.ReferenceField(
to='core.api.resources.VosaeUserResource',
attribute='issuer',
blank=True,
null=True,
help_text=HELP_TEXT['timeline_entry']['issuer']
)
class Meta(TenantResource.Meta):
object_class = TimelineEntry
excludes = ('tenant', 'access_permission', 'see_permission')
list_allowed_methods = ('get',)
detail_allowed_methods = ('get',)
def get_object_list(self, request):
object_list = super(TimelineEntryBaseResource, self).get_object_list(request)
if request and getattr(request, 'vosae_user', None):
return object_list.filter(
access_permission__in=request.vosae_user.permissions.access_perms,
see_permission__in=request.vosae_user.permissions.see_perms
)
return object_list
|
agpl-3.0
|
acfogarty/espressopp
|
contrib/mpi4py/mpi4py-2.0.0/test/test_datatype.py
|
8
|
16240
|
from mpi4py import MPI
import mpiunittest as unittest
datatypes_c = [
MPI.CHAR, MPI.WCHAR,
MPI.SIGNED_CHAR, MPI.SHORT, MPI.INT, MPI.LONG,
MPI.UNSIGNED_CHAR, MPI.UNSIGNED_SHORT, MPI.UNSIGNED, MPI.UNSIGNED_LONG,
MPI.LONG_LONG, MPI.UNSIGNED_LONG_LONG,
MPI.FLOAT, MPI.DOUBLE, MPI.LONG_DOUBLE,
]
datatypes_c99 = [
MPI.C_BOOL,
MPI.INT8_T, MPI.INT16_T, MPI.INT32_T, MPI.INT64_T,
MPI.UINT8_T, MPI.UINT16_T, MPI.UINT32_T, MPI.UINT64_T,
MPI.C_COMPLEX, MPI.C_FLOAT_COMPLEX,
MPI.C_DOUBLE_COMPLEX, MPI.C_LONG_DOUBLE_COMPLEX,
]
datatypes_f = [
MPI.CHARACTER, MPI.LOGICAL, MPI.INTEGER,
MPI.REAL, MPI.DOUBLE_PRECISION,
MPI.COMPLEX, MPI.DOUBLE_COMPLEX,
]
datatypes_f90 = [
MPI.LOGICAL1, MPI.LOGICAL2, MPI.LOGICAL4, MPI.LOGICAL8,
MPI.INTEGER1, MPI.INTEGER2, MPI.INTEGER4, MPI.INTEGER8, MPI.INTEGER16,
MPI.REAL2, MPI.REAL4, MPI.REAL8, MPI.REAL16,
MPI.COMPLEX4, MPI.COMPLEX8, MPI.COMPLEX16, MPI.COMPLEX32,
]
datatypes_mpi = [
MPI.PACKED, MPI.BYTE, MPI.AINT, MPI.OFFSET,
]
datatypes = []
datatypes += datatypes_c
datatypes += datatypes_c99
datatypes += datatypes_f
datatypes += datatypes_f90
datatypes += datatypes_mpi
datatypes = [t for t in datatypes if t != MPI.DATATYPE_NULL]
combiner_map = {}
class TestDatatype(unittest.TestCase):
def testBoolEqNe(self):
for dtype in datatypes:
self.assertTrue (not not dtype)
self.assertTrue (dtype == MPI.Datatype(dtype))
self.assertFalse(dtype != MPI.Datatype(dtype))
def testGetExtent(self):
for dtype in datatypes:
lb, ext = dtype.Get_extent()
self.assertEqual(dtype.lb, lb)
self.assertEqual(dtype.ub, lb+ext)
self.assertEqual(dtype.extent, ext)
def testGetSize(self):
for dtype in datatypes:
size = dtype.Get_size()
self.assertTrue(dtype.size, size)
def testGetTrueExtent(self):
for dtype in datatypes:
try:
lb, ext = dtype.Get_true_extent()
self.assertEqual(dtype.true_lb, lb)
self.assertEqual(dtype.true_ub, lb+ext)
self.assertEqual(dtype.true_extent, ext)
except NotImplementedError:
return
def testGetEnvelope(self):
for dtype in datatypes:
try:
envelope = dtype.Get_envelope()
except NotImplementedError:
return
if ('LAM/MPI' == MPI.get_vendor()[0] and
"COMPLEX" in dtype.name): continue
ni, na, nd, combiner = envelope
self.assertEqual(combiner, MPI.COMBINER_NAMED)
self.assertEqual(ni, 0)
self.assertEqual(na, 0)
self.assertEqual(nd, 0)
self.assertEqual(dtype.envelope, envelope)
self.assertEqual(dtype.combiner, combiner)
self.assertTrue(dtype.is_named)
self.assertTrue(dtype.is_predefined)
otype = dtype.decode()
self.assertTrue(dtype is otype)
def check_datatype_contents(self, oldtype, factory, newtype):
try:
envelope = newtype.Get_envelope()
contents = newtype.Get_contents()
except NotImplementedError:
return
ni, na, nd, combiner = envelope
i, a, d = contents
self.assertEqual(ni, len(i))
self.assertEqual(na, len(a))
self.assertEqual(nd, len(d))
self.assertTrue(combiner != MPI.COMBINER_NAMED)
self.assertEqual(newtype.envelope, envelope)
self.assertEqual(newtype.contents, contents)
self.assertEqual(newtype.combiner, combiner)
self.assertFalse(newtype.is_named)
if combiner in (MPI.COMBINER_F90_INTEGER,
MPI.COMBINER_F90_REAL,
MPI.COMBINER_F90_COMPLEX,):
self.assertTrue(newtype.is_predefined)
else:
self.assertFalse(newtype.is_predefined)
name = factory.__name__
NAME = name.replace('Create_', '').upper()
symbol = getattr(MPI, 'COMBINER_' + NAME)
if symbol == MPI.UNDEFINED: return
if combiner_map is None: return
symbol = combiner_map.get(symbol, symbol)
if symbol is None: return
self.assertEqual(symbol, combiner)
decoded = newtype.decode()
oldtype, constructor, kargs = decoded
constructor = 'Create_' + constructor.lower()
newtype2 = getattr(oldtype, constructor)(**kargs)
decoded2 = newtype2.decode()
self.assertEqual(decoded[1], decoded2[1])
self.assertEqual(decoded[2], decoded2[2])
if combiner not in (MPI.COMBINER_F90_INTEGER,
MPI.COMBINER_F90_REAL,
MPI.COMBINER_F90_COMPLEX,):
self.assertFalse(newtype2.is_predefined)
newtype2.Free()
else:
self.assertTrue(newtype2.is_predefined)
def check_datatype(self, oldtype, factory, *args):
try:
if isinstance(oldtype, MPI.Datatype):
newtype = factory(oldtype, *args)
else:
newtype = factory(*args)
except NotImplementedError:
return
self.check_datatype_contents(oldtype, factory, newtype)
newtype.Commit()
self.check_datatype_contents(oldtype, factory, newtype)
combiner = newtype.Get_envelope()[-1]
if combiner not in (MPI.COMBINER_F90_INTEGER,
MPI.COMBINER_F90_REAL,
MPI.COMBINER_F90_COMPLEX,):
newtype.Free()
def testDup(self):
for dtype in datatypes:
factory = MPI.Datatype.Dup
self.check_datatype(dtype, factory)
def testCreateContiguous(self):
for dtype in datatypes:
for count in range(5):
factory = MPI.Datatype.Create_contiguous
args = (count, )
self.check_datatype(dtype, factory, *args)
def testCreateVector(self):
for dtype in datatypes:
for count in range(5):
for blocklength in range(5):
for stride in range(5):
factory = MPI.Datatype.Create_vector
args = (count, blocklength, stride)
self.check_datatype(dtype, factory, *args)
def testCreateHvector(self):
for dtype in datatypes:
for count in range(5):
for blocklength in range(5):
for stride in range(5):
factory = MPI.Datatype.Create_hvector
args = (count, blocklength, stride)
self.check_datatype(dtype, factory, *args)
def testCreateIndexed(self):
for dtype in datatypes:
for block in range(5):
blocklengths = list(range(block, block+5))
displacements = [0]
for b in blocklengths[:-1]:
stride = displacements[-1] + b * dtype.extent + 1
displacements.append(stride)
factory = MPI.Datatype.Create_indexed
args = (blocklengths, displacements)
self.check_datatype(dtype, factory, *args)
#args = (block, displacements) XXX
#self.check_datatype(dtype, factory, *args) XXX
def testCreateIndexedBlock(self):
for dtype in datatypes:
for block in range(5):
blocklengths = list(range(block, block+5))
displacements = [0]
for b in blocklengths[:-1]:
stride = displacements[-1] + b * dtype.extent + 1
displacements.append(stride)
factory = MPI.Datatype.Create_indexed_block
args = (block, displacements)
self.check_datatype(dtype, factory, *args)
def testCreateHindexed(self):
for dtype in datatypes:
for block in range(5):
blocklengths = list(range(block, block+5))
displacements = [0]
for b in blocklengths[:-1]:
stride = displacements[-1] + b * dtype.extent + 1
displacements.append(stride)
factory = MPI.Datatype.Create_hindexed
args = (blocklengths, displacements)
self.check_datatype(dtype, factory, *args)
#args = (block, displacements) XXX
#self.check_datatype(dtype, factory, *args) XXX
def testCreateHindexedBlock(self):
for dtype in datatypes:
for block in range(5):
displacements = [0]
for i in range(5):
stride = displacements[-1] + block * dtype.extent + 1
displacements.append(stride)
factory = MPI.Datatype.Create_hindexed_block
args = (block, displacements)
self.check_datatype(dtype, factory, *args)
def testCreateStruct(self):
for dtype1 in datatypes:
for dtype2 in datatypes:
dtypes = (dtype1, dtype2)
blocklengths = (2, 3)
displacements = [0]
for dtype in dtypes[:-1]:
stride = displacements[-1] + dtype.extent
displacements.append(stride)
factory = MPI.Datatype.Create_struct
args = (blocklengths, displacements, dtypes)
self.check_datatype(dtypes, factory, *args)
def testCreateSubarray(self):
for dtype in datatypes:
for ndim in range(1, 5):
for size in range(1, 5):
for subsize in range(1, size):
for start in range(size-subsize):
for order in [MPI.ORDER_C,
MPI.ORDER_FORTRAN,
MPI.ORDER_F,
]:
sizes = [size] * ndim
subsizes = [subsize] * ndim
starts = [start] * ndim
factory = MPI.Datatype.Create_subarray
args = sizes, subsizes, starts, order
self.check_datatype(dtype, factory, *args)
def testCreateDarray(self):
for dtype in datatypes:
for ndim in range(1, 3+1):
for size in (4, 8, 9, 27):
for rank in (0, size-1):
for dist in [MPI.DISTRIBUTE_BLOCK, MPI.DISTRIBUTE_CYCLIC]:
for order in [MPI.ORDER_C, MPI.ORDER_F]:
gsizes = [size]*ndim
distribs = [dist]*ndim
dargs = [MPI.DISTRIBUTE_DFLT_DARG]*ndim
psizes = MPI.Compute_dims(size, [0]*ndim)
factory = MPI.Datatype.Create_darray
args = size, rank, gsizes, distribs, dargs, psizes, order
self.check_datatype(dtype, factory, *args)
def testCreateF90Integer(self):
for r in (1, 2, 4):
factory = MPI.Datatype.Create_f90_integer
args = (r,)
self.check_datatype(None, factory, *args)
def testCreateF90RealSingle(self):
(p, r) = (6, 30)
factory = MPI.Datatype.Create_f90_real
args = (p, r)
self.check_datatype(None, factory, *args)
def testCreateF90RealDouble(self):
(p, r) = (15, 300)
factory = MPI.Datatype.Create_f90_real
args = (p, r)
self.check_datatype(None, factory, *args)
def testCreateF90ComplexSingle(self):
(p, r) = (6, 30)
factory = MPI.Datatype.Create_f90_complex
args = (p, r)
self.check_datatype(None, factory, *args)
def testCreateF90ComplexDouble(self):
(p, r) = (15, 300)
factory = MPI.Datatype.Create_f90_complex
args = (p, r)
self.check_datatype(None, factory, *args)
match_size_integer = [1, 2, 4, 8]
match_size_real = [4, 8]
match_size_complex = [8, 16]
def testMatchSize(self):
typeclass = MPI.TYPECLASS_INTEGER
for size in self.match_size_integer:
datatype = MPI.Datatype.Match_size(typeclass, size)
self.assertEqual(size, datatype.size)
typeclass = MPI.TYPECLASS_REAL
for size in self.match_size_real:
datatype = MPI.Datatype.Match_size(typeclass, size)
self.assertEqual(size, datatype.size)
typeclass = MPI.TYPECLASS_COMPLEX
for size in self.match_size_complex:
datatype = MPI.Datatype.Match_size(typeclass, size)
self.assertEqual(size, datatype.size)
def testCreateResized(self):
for dtype in datatypes:
for lb in range(-10, 10):
for extent in range(1, 10):
factory = MPI.Datatype.Create_resized
args = lb, extent
self.check_datatype(dtype, factory, *args)
def testGetSetName(self):
for dtype in datatypes:
try:
name = dtype.Get_name()
self.assertTrue(name)
dtype.Set_name(name)
self.assertEqual(name, dtype.Get_name())
except NotImplementedError:
return
def testCommit(self):
for dtype in datatypes:
dtype.Commit()
class TestGetAddress(unittest.TestCase):
def testGetAddress(self):
try:
from array import array
location = array('i', range(10))
bufptr, _ = location.buffer_info()
addr = MPI.Get_address(location)
self.assertEqual(addr, bufptr)
except ImportError:
pass
try:
from numpy import asarray
location = asarray(range(10), dtype='i')
bufptr, _ = location.__array_interface__['data']
addr = MPI.Get_address(location)
self.assertEqual(addr, bufptr)
except ImportError:
pass
import sys
name, version = MPI.get_vendor()
if name == 'LAM/MPI':
combiner_map[MPI.COMBINER_INDEXED_BLOCK] = MPI.COMBINER_INDEXED
elif name == 'MPICH1':
combiner_map[MPI.COMBINER_VECTOR] = None
combiner_map[MPI.COMBINER_HVECTOR] = None
combiner_map[MPI.COMBINER_INDEXED] = None
combiner_map[MPI.COMBINER_HINDEXED_BLOCK] = None
for t in datatypes_f: datatypes.remove(t)
elif MPI.Get_version() < (2,0):
combiner_map = None
if MPI.Get_version() < (2,0):
del TestDatatype.testMatchSize
if name == 'Open MPI':
del TestDatatype.testCreateF90RealSingle
del TestDatatype.testCreateF90RealDouble
del TestDatatype.testCreateF90ComplexSingle
del TestDatatype.testCreateF90ComplexDouble
if MPI.CHARACTER.Get_size() == 0:
del TestDatatype.testMatchSize
for dtype in datatypes_f + datatypes_f90:
if dtype and dtype in datatypes:
datatypes.remove(dtype)
if version < (1,8,2) and MPI.VERSION == 3:
del TestDatatype.testCreateHindexedBlock
if (1,6,0) < version < (1,7,0):
del TestDatatype.match_size_complex[:]
if version < (1,5,2):
for t in datatypes_f90[-4:]:
if t != MPI.DATATYPE_NULL:
datatypes.remove(t)
if sys.platform.startswith('win'):
del TestDatatype.testCommit
del TestDatatype.testDup
del TestDatatype.testCreateResized
if name == 'Microsoft MPI':
del TestDatatype.testCreateF90RealSingle
del TestDatatype.testCreateF90RealDouble
del TestDatatype.testCreateF90ComplexSingle
del TestDatatype.testCreateF90ComplexDouble
if name == 'Platform MPI':
combiner_map[MPI.COMBINER_INDEXED_BLOCK] = MPI.COMBINER_INDEXED
combiner_map[MPI.COMBINER_DARRAY] = MPI.COMBINER_STRUCT
combiner_map[MPI.COMBINER_SUBARRAY] = MPI.COMBINER_STRUCT
del TestDatatype.match_size_complex[:]
if sys.version_info[0] >=3:
del TestGetAddress
if __name__ == '__main__':
unittest.main()
|
gpl-3.0
|
lfckop/graphite-web
|
webapp/tests/test_finders.py
|
39
|
1517
|
import random
import time
from django.test import TestCase
from graphite.intervals import Interval, IntervalSet
from graphite.node import LeafNode, BranchNode
from graphite.storage import Store, get_finder
class FinderTest(TestCase):
def test_custom_finder(self):
store = Store(finders=[get_finder('tests.test_finders.DummyFinder')])
nodes = list(store.find("foo"))
self.assertEqual(len(nodes), 1)
self.assertEqual(nodes[0].path, 'foo')
nodes = list(store.find('bar.*'))
self.assertEqual(len(nodes), 10)
node = nodes[0]
self.assertEqual(node.path.split('.')[0], 'bar')
time_info, series = node.fetch(100, 200)
self.assertEqual(time_info, (100, 200, 10))
self.assertEqual(len(series), 10)
class DummyReader(object):
__slots__ = ('path',)
def __init__(self, path):
self.path = path
def fetch(self, start_time, end_time):
npoints = (end_time - start_time) / 10
return (start_time, end_time, 10), [
random.choice([None, 1, 2, 3]) for i in range(npoints)
]
def get_intervals(self):
return IntervalSet([Interval(time.time() - 3600, time.time())])
class DummyFinder(object):
def find_nodes(self, query):
if query.pattern == 'foo':
yield BranchNode('foo')
elif query.pattern == 'bar.*':
for i in xrange(10):
path = 'bar.{0}'.format(i)
yield LeafNode(path, DummyReader(path))
|
apache-2.0
|
skidzo/sympy
|
sympy/stats/error_prop.py
|
19
|
3119
|
"""Tools for arithmetic error propogation."""
from __future__ import print_function, division
from itertools import repeat, combinations
from sympy import S, Symbol, Add, Mul, simplify, Pow, exp
from sympy.stats.symbolic_probability import RandomSymbol, Variance, Covariance
_arg0_or_var = lambda var: var.args[0] if len(var.args) > 0 else var
def variance_prop(expr, consts=(), include_covar=False):
"""Symbolically propagates variance (`\sigma^2`) for expressions.
This is computed as as seen in [1]_.
Parameters
==========
expr : Expr
A sympy expression to compute the variance for.
consts : sequence of Symbols, optional
Represents symbols that are known constants in the expr,
and thus have zero variance. All symbols not in consts are
assumed to be variant.
include_covar : bool, optional
Flag for whether or not to include covariances, default=False.
Returns
=======
var_expr : Expr
An expression for the total variance of the expr.
The variance for the original symbols (e.g. x) are represented
via instance of the Variance symbol (e.g. Variance(x)).
Examples
========
>>> from sympy import symbols, exp
>>> from sympy.stats.error_prop import variance_prop
>>> x, y = symbols('x y')
>>> variance_prop(x + y)
Variance(x) + Variance(y)
>>> variance_prop(x * y)
x**2*Variance(y) + y**2*Variance(x)
>>> variance_prop(exp(2*x))
4*exp(4*x)*Variance(x)
References
==========
.. [1] https://en.wikipedia.org/wiki/Propagation_of_uncertainty
"""
args = expr.args
if len(args) == 0:
if expr in consts:
return S(0)
elif isinstance(expr, RandomSymbol):
return Variance(expr).doit()
elif isinstance(expr, Symbol):
return Variance(RandomSymbol(expr)).doit()
else:
return S(0)
nargs = len(args)
var_args = list(map(variance_prop, args, repeat(consts, nargs),
repeat(include_covar, nargs)))
if isinstance(expr, Add):
var_expr = Add(*var_args)
if include_covar:
terms = [2 * Covariance(_arg0_or_var(x), _arg0_or_var(y)).doit() \
for x, y in combinations(var_args, 2)]
var_expr += Add(*terms)
elif isinstance(expr, Mul):
terms = [v/a**2 for a, v in zip(args, var_args)]
var_expr = simplify(expr**2 * Add(*terms))
if include_covar:
terms = [2*Covariance(_arg0_or_var(x), _arg0_or_var(y)).doit()/(a*b) \
for (a, b), (x, y) in zip(combinations(args, 2),
combinations(var_args, 2))]
var_expr += Add(*terms)
elif isinstance(expr, Pow):
b = args[1]
v = var_args[0] * (expr * b / args[0])**2
var_expr = simplify(v)
elif isinstance(expr, exp):
var_expr = simplify(var_args[0] * expr**2)
else:
# unknown how to proceed, return variance of whole expr.
var_expr = Variance(expr)
return var_expr
|
bsd-3-clause
|
KevinNJ/Projects
|
Sallen Key Solver/SallenKey_Design.py
|
1
|
5404
|
# -*- coding: utf-8 -*-
from __future__ import division
import math
import random
import matplotlib.pyplot as plt
import scipy.signal as sig
from itertools import product
from misc import common_part_values, metric_prefix
from anneal import Annealer
# Setup optimization targets
target_q = 0.707 # 1/sqrt(2) - Which is the maximally flat response
target_freq = 500 # Hz
target_atten = -40 # dB
rvalues, cvalues = common_part_values()
def f0(system):
"""Return the natural frequency of the system."""
c1,c2,r1,r2 = system
fn = 1 / (2 * math.pi * math.sqrt(r1 * c1 * r2 * c2))
return fn
def q(system):
"""Return the Q Value of the system."""
c1,c2,r1,r2 = system
q = math.sqrt(c1 * c2 * r1 * r2)/ (c2 * (r1 + r2))
return q
def frf(system):
"""Return the Frequency Response Function of the system.
Returns a function which takes a frequency as an argument.
This function when evaluated at any frequency returns the complex
frequency response at that frequency.
Example: frf(system)(10) returns the complex FRF at 10 Hz
"""
def internal(f):
c1,c2,r1,r2 = system
w = 2 * math.pi * f
num = 1 / (c1*c2*r1*r2)
den = 1 / (c1*c2*r1*r2) + (r1+r2)/(c1*r1*r2) * 1j*w - w**2
return num/den
return internal
def dB(x):
"""Returns the argument in decibels"""
return 20 * math.log10(abs(x))
def stepResponse(system):
"""Computes the step response for a given system"""
c1,c2,r1,r2 = system
num = 1 / (c1*c2*r1*r2)
den = (1, (r1+r2)/(c1*r1*r2), 1/(c1*c2*r1*r2))
return sig.step((num,den))
def energy(system):
"""Computes the energy of a given system.
The energy is defined as decreasing towards zero as the system
approaches an ideal system.
"""
frf_ = frf(system)
f0_ = f0(system)
q_ = q(system)
c1,c2,r1,r2 = system
e = 0
e += abs(target_atten - dB(frf_(target_freq))) / abs(target_atten) # percent error off frequency @ attenuation
e += abs(target_q - q_) / abs(target_q) # percent error off ideal Q value
e += abs(c1-c2) / abs((c1+c2)/2) * 0.1 # percent difference in capacitor values
e += abs(r1-r2) / abs((r1+r2)/2) * 0.1 # percent difference in resistor values
return e
def move(system):
""" Changes the system randomly
This function makes a random change to one of the component values
in the system.
"""
component = random.randrange(0, 4)
if component == 0:
index = random.randrange(0, len(cvalues))
system[0] = cvalues[index]
elif component == 1:
index = random.randrange(0, len(cvalues))
system[1] = cvalues[index]
elif component == 2:
index = random.randrange(0, len(rvalues))
system[2] = rvalues[index]
elif component == 3:
index = random.randrange(0, len(rvalues))
system[3] = rvalues[index]
if __name__ == '__main__':
# set up simulated annealing algorithm
units=('F', 'F', u'Ω', u'Ω') # units of the values in the system
initial_system = [cvalues[0], cvalues[0], rvalues[0], rvalues[0]]
annealer = Annealer(energy, move)
schedule = annealer.auto(initial_system, minutes=0.1)
# run simulated annealing algorithm and compute properties of the final system
final_system, error = annealer.anneal(initial_system, schedule['tmax'], schedule['tmin'], schedule['steps'], updates=100)
final_frf = frf(final_system)
final_f0 = f0(final_system)
final_q = q(final_system)
final_vals = [metric_prefix(*s) for s in zip(final_system, units)]
print 'Soln: (%s), Remaining Energy: %s' % (', '.join(final_vals), error)
# calculate data for graphs
freqs = range(1000000) # response from 0 Hz to 1 MHz
response = [dB(final_frf(f)) for f in freqs]
natural = final_f0, dB(final_frf(final_f0))
target = target_freq, dB(final_frf(target_freq))
step_freqs, step_response = stepResponse(final_system)
plt.figure()
# bode response plot
ax = plt.subplot(2,1,1)
plt.semilogx(freqs,response)
plt.semilogx(natural[0], natural[1], 'r+', ms=10)
plt.annotate('Natural Freq: (%.2f Hz, %.2f dB) ' % natural, xy=natural, xytext=(10,10), textcoords='offset points')
plt.semilogx(target[0], target[1], 'r+', ms=10)
plt.annotate('target attenuation: (%.2f Hz, %.2f dB)'%target, xy=target, xytext=(10,10), textcoords='offset points')
plt.title('Bode Plot (F0: %.2f Hz, Q-Factor: %.2f)\n' % (final_f0, final_q) + 'Soln: (%s)' % ', '.join(final_vals))
plt.xlabel('Frequency [Hz]')
plt.ylabel('Gain [dB]')
lims=list(ax.get_ylim())
lims[1]=20
plt.ylim(lims)
# step response plot
plt.subplot(2,1,2)
plt.plot(step_freqs, step_response)
plt.title('Step Response')
plt.xlabel('Time (s)')
plt.ylabel('Response (v)')
plt.show()
"""
References:
[1] http://en.wikipedia.org/wiki/Sallen%E2%80%93Key_topology
[2] http://en.wikipedia.org/wiki/Q_factor
[3] http://sim.okawa-denshi.jp/en/OPstool.php
[4] http://www.falstad.com/circuit/
"""
|
mit
|
popazerty/blackhole-vuplus
|
tools/svg2skin.py
|
57
|
2482
|
#!/usr/bin/python
# don't expect too much.
# this is a really simple&stupid svg parser, which will use rectangles
# and text fields to produce <widget> snippets for a skin.
# use object "id" fields for source names if you want.
# extracting font information is buggy.
# if you want text fields, please use flow text regions, instead of simple
# text. otherwise, width and height are unknown.
#
# tested only with a single inkscape-generated SVG.
import sys
import os
import string
from xml.sax import make_parser
from xml.sax.handler import ContentHandler
def getattrs(attrs, *a):
res = []
for x in a:
res.append(float(attrs[x]))
return res
def parsedict(attrs):
if not attrs:
return []
d = attrs.split(';')
r = { }
for x in d:
(key, val) = x.split(':')
r[key] = val
return r
def px(x):
return int(float(x[:-2]) + .5)
def contains(box_o, box_i):
return box_o[0] <= box_i[0] and box_o[1] <= box_i[1] and box_o[2] >= box_i[2] and box_o[3] >= box_i[3]
class parseXML(ContentHandler):
def __init__(self):
self.isPointsElement, self.isReboundsElement = 0, 0
self.bbox = None
self.find_bbox = False
self.flow = None
def startElement(self, name, attrs):
if self.find_bbox:
if name != "rect":
return
box = getattrs(attrs, "x", "y", "width", "height")
if not self.bbox or contains(box, self.bbox):
self.bbox = box
return
if name == "rect":
(x, y, width, height) = getattrs(attrs, "x", "y", "width", "height")
x -= self.bbox[0]
y -= self.bbox[1]
id = attrs["id"]
if self.flow:
id = self.flow
self.flow = None
styles = parsedict(attrs.get("style", ""))
elif name == "text":
(x, y) = getattrs(attrs, "x", "y")
x -= self.bbox[0]
y -= self.bbox[1]
width, height = 0, 0
styles = parsedict(attrs["style"])
id = attrs["id"]
elif name == "flowRoot":
self.flow = attrs["id"]
return
else:
return
if "font-size" in styles:
font = ' font="Regular;%d"' % px(styles["font-size"])
else:
font = ""
print """\t\t<widget source="%s" render="Label" position="%d,%d" size="%d,%d" %s />""" % (id, x, y, width, height, font)
parser = make_parser()
contentHandler = parseXML()
parser.setContentHandler(contentHandler)
contentHandler.find_bbox = True
parser.parse(sys.argv[1])
bboxi = tuple([int(x) for x in contentHandler.bbox])
contentHandler.find_bbox = False
print '\t<screen name="" position="%d,%d" size="%d,%d" title="">' % bboxi
parser.parse(sys.argv[1])
print '\t</screen>'
|
gpl-2.0
|
glogiotatidis/mozillians-new
|
vendor-local/lib/python/celery/tests/test_task/__init__.py
|
14
|
31806
|
from __future__ import absolute_import
from __future__ import with_statement
from datetime import datetime, timedelta
from functools import wraps
from celery import task
from celery.app import app_or_default
from celery.task import task as task_dec
from celery.exceptions import RetryTaskError
from celery.execute import send_task
from celery.result import EagerResult
from celery.schedules import crontab, crontab_parser, ParseException
from celery.utils import uuid
from celery.utils.timeutils import parse_iso8601
from celery.tests.utils import Case, with_eager_tasks, WhateverIO
def return_True(*args, **kwargs):
# Task run functions can't be closures/lambdas, as they're pickled.
return True
return_True_task = task_dec()(return_True)
def raise_exception(self, **kwargs):
raise Exception("%s error" % self.__class__)
class MockApplyTask(task.Task):
def run(self, x, y):
return x * y
@classmethod
def apply_async(self, *args, **kwargs):
pass
class IncrementCounterTask(task.Task):
name = "c.unittest.increment_counter_task"
count = 0
def run(self, increment_by=1, **kwargs):
increment_by = increment_by or 1
self.__class__.count += increment_by
return self.__class__.count
class RaisingTask(task.Task):
name = "c.unittest.raising_task"
def run(self, **kwargs):
raise KeyError("foo")
class RetryTask(task.Task):
max_retries = 3
iterations = 0
def run(self, arg1, arg2, kwarg=1, max_retries=None, care=True):
self.__class__.iterations += 1
rmax = self.max_retries if max_retries is None else max_retries
retries = self.request.retries
if care and retries >= rmax:
return arg1
else:
return self.retry(countdown=0, max_retries=max_retries)
class RetryTaskNoArgs(task.Task):
max_retries = 3
iterations = 0
def run(self, **kwargs):
self.__class__.iterations += 1
retries = kwargs["task_retries"]
if retries >= 3:
return 42
else:
return self.retry(kwargs=kwargs, countdown=0)
class RetryTaskMockApply(task.Task):
max_retries = 3
iterations = 0
applied = 0
def run(self, arg1, arg2, kwarg=1, **kwargs):
self.__class__.iterations += 1
retries = kwargs["task_retries"]
if retries >= 3:
return arg1
else:
kwargs.update({"kwarg": kwarg})
return self.retry(args=[arg1, arg2], kwargs=kwargs, countdown=0)
@classmethod
def apply_async(self, *args, **kwargs):
self.applied = 1
class MyCustomException(Exception):
"""Random custom exception."""
class RetryTaskCustomExc(task.Task):
max_retries = 3
iterations = 0
def run(self, arg1, arg2, kwarg=1, **kwargs):
self.__class__.iterations += 1
retries = kwargs["task_retries"]
if retries >= 3:
return arg1 + kwarg
else:
try:
raise MyCustomException("Elaine Marie Benes")
except MyCustomException, exc:
kwargs.update({"kwarg": kwarg})
return self.retry(args=[arg1, arg2], kwargs=kwargs,
countdown=0, exc=exc)
class TestTaskRetries(Case):
def test_retry(self):
RetryTask.max_retries = 3
RetryTask.iterations = 0
result = RetryTask.apply([0xFF, 0xFFFF])
self.assertEqual(result.get(), 0xFF)
self.assertEqual(RetryTask.iterations, 4)
RetryTask.max_retries = 3
RetryTask.iterations = 0
result = RetryTask.apply([0xFF, 0xFFFF], {"max_retries": 10})
self.assertEqual(result.get(), 0xFF)
self.assertEqual(RetryTask.iterations, 11)
def test_retry_no_args(self):
RetryTaskNoArgs.max_retries = 3
RetryTaskNoArgs.iterations = 0
result = RetryTaskNoArgs.apply()
self.assertEqual(result.get(), 42)
self.assertEqual(RetryTaskNoArgs.iterations, 4)
def test_retry_kwargs_can_be_empty(self):
with self.assertRaises(RetryTaskError):
RetryTaskMockApply.retry(args=[4, 4], kwargs=None)
def test_retry_not_eager(self):
RetryTaskMockApply.request.called_directly = False
exc = Exception("baz")
try:
RetryTaskMockApply.retry(args=[4, 4], kwargs={"task_retries": 0},
exc=exc, throw=False)
self.assertTrue(RetryTaskMockApply.applied)
finally:
RetryTaskMockApply.applied = 0
try:
with self.assertRaises(RetryTaskError):
RetryTaskMockApply.retry(
args=[4, 4], kwargs={"task_retries": 0},
exc=exc, throw=True)
self.assertTrue(RetryTaskMockApply.applied)
finally:
RetryTaskMockApply.applied = 0
def test_retry_with_kwargs(self):
RetryTaskCustomExc.max_retries = 3
RetryTaskCustomExc.iterations = 0
result = RetryTaskCustomExc.apply([0xFF, 0xFFFF], {"kwarg": 0xF})
self.assertEqual(result.get(), 0xFF + 0xF)
self.assertEqual(RetryTaskCustomExc.iterations, 4)
def test_retry_with_custom_exception(self):
RetryTaskCustomExc.max_retries = 2
RetryTaskCustomExc.iterations = 0
result = RetryTaskCustomExc.apply([0xFF, 0xFFFF], {"kwarg": 0xF})
with self.assertRaises(MyCustomException):
result.get()
self.assertEqual(RetryTaskCustomExc.iterations, 3)
def test_max_retries_exceeded(self):
RetryTask.max_retries = 2
RetryTask.iterations = 0
result = RetryTask.apply([0xFF, 0xFFFF], {"care": False})
with self.assertRaises(RetryTask.MaxRetriesExceededError):
result.get()
self.assertEqual(RetryTask.iterations, 3)
RetryTask.max_retries = 1
RetryTask.iterations = 0
result = RetryTask.apply([0xFF, 0xFFFF], {"care": False})
with self.assertRaises(RetryTask.MaxRetriesExceededError):
result.get()
self.assertEqual(RetryTask.iterations, 2)
class TestCeleryTasks(Case):
def test_unpickle_task(self):
import pickle
@task_dec
def xxx():
pass
self.assertIs(pickle.loads(pickle.dumps(xxx)), xxx)
def createTaskCls(self, cls_name, task_name=None):
attrs = {"__module__": self.__module__}
if task_name:
attrs["name"] = task_name
cls = type(cls_name, (task.Task, ), attrs)
cls.run = return_True
return cls
def test_AsyncResult(self):
task_id = uuid()
result = RetryTask.AsyncResult(task_id)
self.assertEqual(result.backend, RetryTask.backend)
self.assertEqual(result.task_id, task_id)
def assertNextTaskDataEqual(self, consumer, presult, task_name,
test_eta=False, test_expires=False, **kwargs):
next_task = consumer.fetch()
task_data = next_task.decode()
self.assertEqual(task_data["id"], presult.task_id)
self.assertEqual(task_data["task"], task_name)
task_kwargs = task_data.get("kwargs", {})
if test_eta:
self.assertIsInstance(task_data.get("eta"), basestring)
to_datetime = parse_iso8601(task_data.get("eta"))
self.assertIsInstance(to_datetime, datetime)
if test_expires:
self.assertIsInstance(task_data.get("expires"), basestring)
to_datetime = parse_iso8601(task_data.get("expires"))
self.assertIsInstance(to_datetime, datetime)
for arg_name, arg_value in kwargs.items():
self.assertEqual(task_kwargs.get(arg_name), arg_value)
def test_incomplete_task_cls(self):
class IncompleteTask(task.Task):
name = "c.unittest.t.itask"
with self.assertRaises(NotImplementedError):
IncompleteTask().run()
def test_task_kwargs_must_be_dictionary(self):
with self.assertRaises(ValueError):
IncrementCounterTask.apply_async([], "str")
def test_task_args_must_be_list(self):
with self.assertRaises(ValueError):
IncrementCounterTask.apply_async("str", {})
def test_regular_task(self):
T1 = self.createTaskCls("T1", "c.unittest.t.t1")
self.assertIsInstance(T1(), T1)
self.assertTrue(T1().run())
self.assertTrue(callable(T1()),
"Task class is callable()")
self.assertTrue(T1()(),
"Task class runs run() when called")
# task name generated out of class module + name.
T2 = self.createTaskCls("T2")
self.assertTrue(T2().name.endswith("test_task.T2"))
t1 = T1()
consumer = t1.get_consumer()
with self.assertRaises(NotImplementedError):
consumer.receive("foo", "foo")
consumer.discard_all()
self.assertIsNone(consumer.fetch())
# Without arguments.
presult = t1.delay()
self.assertNextTaskDataEqual(consumer, presult, t1.name)
# With arguments.
presult2 = t1.apply_async(kwargs=dict(name="George Costanza"))
self.assertNextTaskDataEqual(consumer, presult2, t1.name,
name="George Costanza")
# send_task
sresult = send_task(t1.name, kwargs=dict(name="Elaine M. Benes"))
self.assertNextTaskDataEqual(consumer, sresult, t1.name,
name="Elaine M. Benes")
# With eta.
presult2 = t1.apply_async(kwargs=dict(name="George Costanza"),
eta=datetime.utcnow() + timedelta(days=1),
expires=datetime.utcnow() + timedelta(days=2))
self.assertNextTaskDataEqual(consumer, presult2, t1.name,
name="George Costanza", test_eta=True, test_expires=True)
# With countdown.
presult2 = t1.apply_async(kwargs=dict(name="George Costanza"),
countdown=10, expires=12)
self.assertNextTaskDataEqual(consumer, presult2, t1.name,
name="George Costanza", test_eta=True, test_expires=True)
# Discarding all tasks.
consumer.discard_all()
t1.apply_async()
self.assertEqual(consumer.discard_all(), 1)
self.assertIsNone(consumer.fetch())
self.assertFalse(presult.successful())
t1.backend.mark_as_done(presult.task_id, result=None)
self.assertTrue(presult.successful())
publisher = t1.get_publisher()
self.assertTrue(publisher.exchange)
def test_context_get(self):
request = self.createTaskCls("T1", "c.unittest.t.c.g").request
request.foo = 32
self.assertEqual(request.get("foo"), 32)
self.assertEqual(request.get("bar", 36), 36)
request.clear()
def test_task_class_repr(self):
task = self.createTaskCls("T1", "c.unittest.t.repr")
self.assertIn("class Task of", repr(task.app.Task))
def test_after_return(self):
task = self.createTaskCls("T1", "c.unittest.t.after_return")()
task.request.chord = return_True_task.subtask()
task.after_return("SUCCESS", 1.0, "foobar", (), {}, None)
task.request.clear()
def test_send_task_sent_event(self):
T1 = self.createTaskCls("T1", "c.unittest.t.t1")
conn = T1.app.broker_connection()
chan = conn.channel()
T1.app.conf.CELERY_SEND_TASK_SENT_EVENT = True
dispatcher = [None]
class Pub(object):
channel = chan
def delay_task(self, *args, **kwargs):
dispatcher[0] = kwargs.get("event_dispatcher")
try:
T1.apply_async(publisher=Pub())
finally:
T1.app.conf.CELERY_SEND_TASK_SENT_EVENT = False
chan.close()
conn.close()
self.assertTrue(dispatcher[0])
def test_get_publisher(self):
connection = app_or_default().broker_connection()
p = IncrementCounterTask.get_publisher(connection, auto_declare=False,
exchange="foo")
self.assertEqual(p.exchange.name, "foo")
p = IncrementCounterTask.get_publisher(connection, auto_declare=False,
exchange_type="fanout")
self.assertEqual(p.exchange.type, "fanout")
def test_update_state(self):
@task_dec
def yyy():
pass
tid = uuid()
yyy.update_state(tid, "FROBULATING", {"fooz": "baaz"})
self.assertEqual(yyy.AsyncResult(tid).status, "FROBULATING")
self.assertDictEqual(yyy.AsyncResult(tid).result, {"fooz": "baaz"})
yyy.request.id = tid
yyy.update_state(state="FROBUZATING", meta={"fooz": "baaz"})
self.assertEqual(yyy.AsyncResult(tid).status, "FROBUZATING")
self.assertDictEqual(yyy.AsyncResult(tid).result, {"fooz": "baaz"})
def test_repr(self):
@task_dec
def task_test_repr():
pass
self.assertIn("task_test_repr", repr(task_test_repr))
def test_has___name__(self):
@task_dec
def yyy2():
pass
self.assertTrue(yyy2.__name__)
def test_get_logger(self):
T1 = self.createTaskCls("T1", "c.unittest.t.t1")
t1 = T1()
logfh = WhateverIO()
logger = t1.get_logger(logfile=logfh, loglevel=0)
self.assertTrue(logger)
T1.request.loglevel = 3
logger = t1.get_logger(logfile=logfh, loglevel=None)
self.assertTrue(logger)
class TestTaskSet(Case):
@with_eager_tasks
def test_function_taskset(self):
subtasks = [return_True_task.subtask([i]) for i in range(1, 6)]
ts = task.TaskSet(subtasks)
res = ts.apply_async()
self.assertListEqual(res.join(), [True, True, True, True, True])
def test_counter_taskset(self):
IncrementCounterTask.count = 0
ts = task.TaskSet(tasks=[
IncrementCounterTask.subtask((), {}),
IncrementCounterTask.subtask((), {"increment_by": 2}),
IncrementCounterTask.subtask((), {"increment_by": 3}),
IncrementCounterTask.subtask((), {"increment_by": 4}),
IncrementCounterTask.subtask((), {"increment_by": 5}),
IncrementCounterTask.subtask((), {"increment_by": 6}),
IncrementCounterTask.subtask((), {"increment_by": 7}),
IncrementCounterTask.subtask((), {"increment_by": 8}),
IncrementCounterTask.subtask((), {"increment_by": 9}),
])
self.assertEqual(ts.total, 9)
consumer = IncrementCounterTask().get_consumer()
consumer.purge()
consumer.close()
taskset_res = ts.apply_async()
subtasks = taskset_res.subtasks
taskset_id = taskset_res.taskset_id
consumer = IncrementCounterTask().get_consumer()
for subtask in subtasks:
m = consumer.fetch().payload
self.assertDictContainsSubset({"taskset": taskset_id,
"task": IncrementCounterTask.name,
"id": subtask.task_id}, m)
IncrementCounterTask().run(
increment_by=m.get("kwargs", {}).get("increment_by"))
self.assertEqual(IncrementCounterTask.count, sum(xrange(1, 10)))
def test_named_taskset(self):
prefix = "test_named_taskset-"
ts = task.TaskSet([return_True_task.subtask([1])])
res = ts.apply(taskset_id=prefix + uuid())
self.assertTrue(res.taskset_id.startswith(prefix))
class TestTaskApply(Case):
def test_apply_throw(self):
with self.assertRaises(KeyError):
RaisingTask.apply(throw=True)
def test_apply_with_CELERY_EAGER_PROPAGATES_EXCEPTIONS(self):
RaisingTask.app.conf.CELERY_EAGER_PROPAGATES_EXCEPTIONS = True
try:
with self.assertRaises(KeyError):
RaisingTask.apply()
finally:
RaisingTask.app.conf.CELERY_EAGER_PROPAGATES_EXCEPTIONS = False
def test_apply(self):
IncrementCounterTask.count = 0
e = IncrementCounterTask.apply()
self.assertIsInstance(e, EagerResult)
self.assertEqual(e.get(), 1)
e = IncrementCounterTask.apply(args=[1])
self.assertEqual(e.get(), 2)
e = IncrementCounterTask.apply(kwargs={"increment_by": 4})
self.assertEqual(e.get(), 6)
self.assertTrue(e.successful())
self.assertTrue(e.ready())
self.assertTrue(repr(e).startswith("<EagerResult:"))
f = RaisingTask.apply()
self.assertTrue(f.ready())
self.assertFalse(f.successful())
self.assertTrue(f.traceback)
with self.assertRaises(KeyError):
f.get()
class MyPeriodic(task.PeriodicTask):
run_every = timedelta(hours=1)
class TestPeriodicTask(Case):
def test_must_have_run_every(self):
with self.assertRaises(NotImplementedError):
type("Foo", (task.PeriodicTask, ), {"__module__": __name__})
def test_remaining_estimate(self):
self.assertIsInstance(
MyPeriodic().remaining_estimate(datetime.utcnow()),
timedelta)
def test_is_due_not_due(self):
due, remaining = MyPeriodic().is_due(datetime.utcnow())
self.assertFalse(due)
# This assertion may fail if executed in the
# first minute of an hour, thus 59 instead of 60
self.assertGreater(remaining, 59)
def test_is_due(self):
p = MyPeriodic()
due, remaining = p.is_due(datetime.utcnow() - p.run_every.run_every)
self.assertTrue(due)
self.assertEqual(remaining,
p.timedelta_seconds(p.run_every.run_every))
def test_schedule_repr(self):
p = MyPeriodic()
self.assertTrue(repr(p.run_every))
class EveryMinutePeriodic(task.PeriodicTask):
run_every = crontab()
class QuarterlyPeriodic(task.PeriodicTask):
run_every = crontab(minute="*/15")
class HourlyPeriodic(task.PeriodicTask):
run_every = crontab(minute=30)
class DailyPeriodic(task.PeriodicTask):
run_every = crontab(hour=7, minute=30)
class WeeklyPeriodic(task.PeriodicTask):
run_every = crontab(hour=7, minute=30, day_of_week="thursday")
def patch_crontab_nowfun(cls, retval):
def create_patcher(fun):
@wraps(fun)
def __inner(*args, **kwargs):
prev_nowfun = cls.run_every.nowfun
cls.run_every.nowfun = lambda: retval
try:
return fun(*args, **kwargs)
finally:
cls.run_every.nowfun = prev_nowfun
return __inner
return create_patcher
class test_crontab_parser(Case):
def test_parse_star(self):
self.assertEqual(crontab_parser(24).parse('*'), set(range(24)))
self.assertEqual(crontab_parser(60).parse('*'), set(range(60)))
self.assertEqual(crontab_parser(7).parse('*'), set(range(7)))
def test_parse_range(self):
self.assertEqual(crontab_parser(60).parse('1-10'),
set(range(1, 10 + 1)))
self.assertEqual(crontab_parser(24).parse('0-20'),
set(range(0, 20 + 1)))
self.assertEqual(crontab_parser().parse('2-10'),
set(range(2, 10 + 1)))
def test_parse_groups(self):
self.assertEqual(crontab_parser().parse('1,2,3,4'),
set([1, 2, 3, 4]))
self.assertEqual(crontab_parser().parse('0,15,30,45'),
set([0, 15, 30, 45]))
def test_parse_steps(self):
self.assertEqual(crontab_parser(8).parse('*/2'),
set([0, 2, 4, 6]))
self.assertEqual(crontab_parser().parse('*/2'),
set(i * 2 for i in xrange(30)))
self.assertEqual(crontab_parser().parse('*/3'),
set(i * 3 for i in xrange(20)))
def test_parse_composite(self):
self.assertEqual(crontab_parser(8).parse('*/2'), set([0, 2, 4, 6]))
self.assertEqual(crontab_parser().parse('2-9/5'), set([2, 7]))
self.assertEqual(crontab_parser().parse('2-10/5'), set([2, 7]))
self.assertEqual(crontab_parser().parse('2-11/5,3'), set([2, 3, 7]))
self.assertEqual(crontab_parser().parse('2-4/3,*/5,0-21/4'),
set([0, 2, 4, 5, 8, 10, 12, 15, 16,
20, 25, 30, 35, 40, 45, 50, 55]))
self.assertEqual(crontab_parser().parse('1-9/2'),
set([1, 3, 5, 7, 9]))
def test_parse_errors_on_empty_string(self):
with self.assertRaises(ParseException):
crontab_parser(60).parse('')
def test_parse_errors_on_empty_group(self):
with self.assertRaises(ParseException):
crontab_parser(60).parse('1,,2')
def test_parse_errors_on_empty_steps(self):
with self.assertRaises(ParseException):
crontab_parser(60).parse('*/')
def test_parse_errors_on_negative_number(self):
with self.assertRaises(ParseException):
crontab_parser(60).parse('-20')
def test_expand_cronspec_eats_iterables(self):
self.assertEqual(crontab._expand_cronspec(iter([1, 2, 3]), 100),
set([1, 2, 3]))
def test_expand_cronspec_invalid_type(self):
with self.assertRaises(TypeError):
crontab._expand_cronspec(object(), 100)
def test_repr(self):
self.assertIn("*", repr(crontab("*")))
def test_eq(self):
self.assertEqual(crontab(day_of_week="1, 2"),
crontab(day_of_week="1-2"))
self.assertEqual(crontab(minute="1", hour="2", day_of_week="5"),
crontab(minute="1", hour="2", day_of_week="5"))
self.assertNotEqual(crontab(minute="1"), crontab(minute="2"))
self.assertFalse(object() == crontab(minute="1"))
self.assertFalse(crontab(minute="1") == object())
class test_crontab_remaining_estimate(Case):
def next_ocurrance(self, crontab, now):
crontab.nowfun = lambda: now
return now + crontab.remaining_estimate(now)
def test_next_minute(self):
next = self.next_ocurrance(crontab(),
datetime(2010, 9, 11, 14, 30, 15))
self.assertEqual(next, datetime(2010, 9, 11, 14, 31))
def test_not_next_minute(self):
next = self.next_ocurrance(crontab(),
datetime(2010, 9, 11, 14, 59, 15))
self.assertEqual(next, datetime(2010, 9, 11, 15, 0))
def test_this_hour(self):
next = self.next_ocurrance(crontab(minute=[5, 42]),
datetime(2010, 9, 11, 14, 30, 15))
self.assertEqual(next, datetime(2010, 9, 11, 14, 42))
def test_not_this_hour(self):
next = self.next_ocurrance(crontab(minute=[5, 10, 15]),
datetime(2010, 9, 11, 14, 30, 15))
self.assertEqual(next, datetime(2010, 9, 11, 15, 5))
def test_today(self):
next = self.next_ocurrance(crontab(minute=[5, 42], hour=[12, 17]),
datetime(2010, 9, 11, 14, 30, 15))
self.assertEqual(next, datetime(2010, 9, 11, 17, 5))
def test_not_today(self):
next = self.next_ocurrance(crontab(minute=[5, 42], hour=[12]),
datetime(2010, 9, 11, 14, 30, 15))
self.assertEqual(next, datetime(2010, 9, 12, 12, 5))
def test_weekday(self):
next = self.next_ocurrance(crontab(minute=30,
hour=14,
day_of_week="sat"),
datetime(2010, 9, 11, 14, 30, 15))
self.assertEqual(next, datetime(2010, 9, 18, 14, 30))
def test_not_weekday(self):
next = self.next_ocurrance(crontab(minute=[5, 42],
day_of_week="mon-fri"),
datetime(2010, 9, 11, 14, 30, 15))
self.assertEqual(next, datetime(2010, 9, 13, 0, 5))
class test_crontab_is_due(Case):
def setUp(self):
self.now = datetime.utcnow()
self.next_minute = 60 - self.now.second - 1e-6 * self.now.microsecond
def test_default_crontab_spec(self):
c = crontab()
self.assertEqual(c.minute, set(range(60)))
self.assertEqual(c.hour, set(range(24)))
self.assertEqual(c.day_of_week, set(range(7)))
def test_simple_crontab_spec(self):
c = crontab(minute=30)
self.assertEqual(c.minute, set([30]))
self.assertEqual(c.hour, set(range(24)))
self.assertEqual(c.day_of_week, set(range(7)))
def test_crontab_spec_minute_formats(self):
c = crontab(minute=30)
self.assertEqual(c.minute, set([30]))
c = crontab(minute='30')
self.assertEqual(c.minute, set([30]))
c = crontab(minute=(30, 40, 50))
self.assertEqual(c.minute, set([30, 40, 50]))
c = crontab(minute=set([30, 40, 50]))
self.assertEqual(c.minute, set([30, 40, 50]))
def test_crontab_spec_invalid_minute(self):
with self.assertRaises(ValueError):
crontab(minute=60)
with self.assertRaises(ValueError):
crontab(minute='0-100')
def test_crontab_spec_hour_formats(self):
c = crontab(hour=6)
self.assertEqual(c.hour, set([6]))
c = crontab(hour='5')
self.assertEqual(c.hour, set([5]))
c = crontab(hour=(4, 8, 12))
self.assertEqual(c.hour, set([4, 8, 12]))
def test_crontab_spec_invalid_hour(self):
with self.assertRaises(ValueError):
crontab(hour=24)
with self.assertRaises(ValueError):
crontab(hour='0-30')
def test_crontab_spec_dow_formats(self):
c = crontab(day_of_week=5)
self.assertEqual(c.day_of_week, set([5]))
c = crontab(day_of_week='5')
self.assertEqual(c.day_of_week, set([5]))
c = crontab(day_of_week='fri')
self.assertEqual(c.day_of_week, set([5]))
c = crontab(day_of_week='tuesday,sunday,fri')
self.assertEqual(c.day_of_week, set([0, 2, 5]))
c = crontab(day_of_week='mon-fri')
self.assertEqual(c.day_of_week, set([1, 2, 3, 4, 5]))
c = crontab(day_of_week='*/2')
self.assertEqual(c.day_of_week, set([0, 2, 4, 6]))
def seconds_almost_equal(self, a, b, precision):
for index, skew in enumerate((+0.1, 0, -0.1)):
try:
self.assertAlmostEqual(a, b + skew, precision)
except AssertionError:
if index + 1 >= 3:
raise
else:
break
def test_crontab_spec_invalid_dow(self):
with self.assertRaises(ValueError):
crontab(day_of_week='fooday-barday')
with self.assertRaises(ValueError):
crontab(day_of_week='1,4,foo')
with self.assertRaises(ValueError):
crontab(day_of_week='7')
with self.assertRaises(ValueError):
crontab(day_of_week='12')
def test_every_minute_execution_is_due(self):
last_ran = self.now - timedelta(seconds=61)
due, remaining = EveryMinutePeriodic().is_due(last_ran)
self.assertTrue(due)
self.seconds_almost_equal(remaining, self.next_minute, 1)
def test_every_minute_execution_is_not_due(self):
last_ran = self.now - timedelta(seconds=self.now.second)
due, remaining = EveryMinutePeriodic().is_due(last_ran)
self.assertFalse(due)
self.seconds_almost_equal(remaining, self.next_minute, 1)
# 29th of May 2010 is a saturday
@patch_crontab_nowfun(HourlyPeriodic, datetime(2010, 5, 29, 10, 30))
def test_execution_is_due_on_saturday(self):
last_ran = self.now - timedelta(seconds=61)
due, remaining = EveryMinutePeriodic().is_due(last_ran)
self.assertTrue(due)
self.seconds_almost_equal(remaining, self.next_minute, 1)
# 30th of May 2010 is a sunday
@patch_crontab_nowfun(HourlyPeriodic, datetime(2010, 5, 30, 10, 30))
def test_execution_is_due_on_sunday(self):
last_ran = self.now - timedelta(seconds=61)
due, remaining = EveryMinutePeriodic().is_due(last_ran)
self.assertTrue(due)
self.seconds_almost_equal(remaining, self.next_minute, 1)
# 31st of May 2010 is a monday
@patch_crontab_nowfun(HourlyPeriodic, datetime(2010, 5, 31, 10, 30))
def test_execution_is_due_on_monday(self):
last_ran = self.now - timedelta(seconds=61)
due, remaining = EveryMinutePeriodic().is_due(last_ran)
self.assertTrue(due)
self.seconds_almost_equal(remaining, self.next_minute, 1)
@patch_crontab_nowfun(HourlyPeriodic, datetime(2010, 5, 10, 10, 30))
def test_every_hour_execution_is_due(self):
due, remaining = HourlyPeriodic().is_due(datetime(2010, 5, 10, 6, 30))
self.assertTrue(due)
self.assertEqual(remaining, 60 * 60)
@patch_crontab_nowfun(HourlyPeriodic, datetime(2010, 5, 10, 10, 29))
def test_every_hour_execution_is_not_due(self):
due, remaining = HourlyPeriodic().is_due(datetime(2010, 5, 10, 9, 30))
self.assertFalse(due)
self.assertEqual(remaining, 60)
@patch_crontab_nowfun(QuarterlyPeriodic, datetime(2010, 5, 10, 10, 15))
def test_first_quarter_execution_is_due(self):
due, remaining = QuarterlyPeriodic().is_due(
datetime(2010, 5, 10, 6, 30))
self.assertTrue(due)
self.assertEqual(remaining, 15 * 60)
@patch_crontab_nowfun(QuarterlyPeriodic, datetime(2010, 5, 10, 10, 30))
def test_second_quarter_execution_is_due(self):
due, remaining = QuarterlyPeriodic().is_due(
datetime(2010, 5, 10, 6, 30))
self.assertTrue(due)
self.assertEqual(remaining, 15 * 60)
@patch_crontab_nowfun(QuarterlyPeriodic, datetime(2010, 5, 10, 10, 14))
def test_first_quarter_execution_is_not_due(self):
due, remaining = QuarterlyPeriodic().is_due(
datetime(2010, 5, 10, 10, 0))
self.assertFalse(due)
self.assertEqual(remaining, 60)
@patch_crontab_nowfun(QuarterlyPeriodic, datetime(2010, 5, 10, 10, 29))
def test_second_quarter_execution_is_not_due(self):
due, remaining = QuarterlyPeriodic().is_due(
datetime(2010, 5, 10, 10, 15))
self.assertFalse(due)
self.assertEqual(remaining, 60)
@patch_crontab_nowfun(DailyPeriodic, datetime(2010, 5, 10, 7, 30))
def test_daily_execution_is_due(self):
due, remaining = DailyPeriodic().is_due(datetime(2010, 5, 9, 7, 30))
self.assertTrue(due)
self.assertEqual(remaining, 24 * 60 * 60)
@patch_crontab_nowfun(DailyPeriodic, datetime(2010, 5, 10, 10, 30))
def test_daily_execution_is_not_due(self):
due, remaining = DailyPeriodic().is_due(datetime(2010, 5, 10, 7, 30))
self.assertFalse(due)
self.assertEqual(remaining, 21 * 60 * 60)
@patch_crontab_nowfun(WeeklyPeriodic, datetime(2010, 5, 6, 7, 30))
def test_weekly_execution_is_due(self):
due, remaining = WeeklyPeriodic().is_due(datetime(2010, 4, 30, 7, 30))
self.assertTrue(due)
self.assertEqual(remaining, 7 * 24 * 60 * 60)
@patch_crontab_nowfun(WeeklyPeriodic, datetime(2010, 5, 7, 10, 30))
def test_weekly_execution_is_not_due(self):
due, remaining = WeeklyPeriodic().is_due(datetime(2010, 5, 6, 7, 30))
self.assertFalse(due)
self.assertEqual(remaining, 6 * 24 * 60 * 60 - 3 * 60 * 60)
|
bsd-3-clause
|
OpenNeuroLab/brainspell-neo
|
brainspell/article_helpers.py
|
2
|
15579
|
# functions related to adding and editing article data
import re
import urllib.request
import Bio
from Bio import Entrez, Medline
from Bio.Entrez import efetch, esearch, parse, read
from models import *
from search_helpers import get_article_object
Entrez.email = "neel@berkeley.edu"
# BEGIN: article helper functions
def update_authors(pmid, authors):
""" Update the authors for an article. """
Articles.update(authors=authors).where(Articles.pmid == pmid).execute()
def update_vote_in_struct(struct, tag_name, username, direction, label_name):
""" Update a voting structure to toggle a user's vote. Modifies the input structure. """
# get the index for the tag
entry = -1
for i in range(len(struct)):
# some entries might be malformed, so check if "tag" is in the dict
if label_name in struct[i]:
if struct[i][label_name] == tag_name:
entry = i
break
if entry == -1: # if the tag hasn't been added yet, then add it
struct.append({
label_name: tag_name,
})
entry = len(struct) - 1
# if no one has voted, then add voting structures
if "vote" not in struct[entry]:
struct[entry]["vote"] = {}
struct[entry]["vote"]["up"] = []
struct[entry]["vote"]["down"] = []
# toggle the vote
toggled = False
for v in range(len(struct[entry]["vote"][direction])):
if struct[entry]["vote"][direction][v]["username"] == username:
del struct[entry]["vote"][direction][v]
toggled = True
if not toggled:
struct[entry]["vote"][direction].append({
"username": username # leave open for any other metadata we may eventually want to include
})
# delete any votes in the opposite direction
otherDirectionLst = ["up", "down"]
otherDirection = otherDirectionLst[-1 *
otherDirectionLst.index(direction) + 1]
for v in range(len(struct[entry]["vote"][otherDirection])):
if struct[entry]["vote"][otherDirection][v]["username"] == username:
del struct[entry]["vote"][otherDirection][v]
def toggle_vote(pmid, topic, username, direction):
""" Toggle a user's vote on an article tag. """
fullArticle = next(get_article_object(pmid))
metadata = eval(fullArticle.metadata)
update_vote_in_struct(
metadata['meshHeadings'],
topic,
username,
direction,
"name")
query = Articles.update(
metadata=metadata).where(
Articles.pmid == pmid)
query.execute()
def vote_stereotaxic_space(pmid, space, username):
""" Toggle a user's vote for the stereotaxic space of an article. """
fullArticle = next(get_article_object(pmid))
target = eval(fullArticle.metadata)
if "space_subjects" not in target:
target["space_subjects"] = {}
if "radio_votes" not in target["space_subjects"]:
target["space_subjects"]["radio_votes"] = []
for i in range(len(target["space_subjects"]["radio_votes"])):
if target["space_subjects"]["radio_votes"][i]["username"] == username:
del target["space_subjects"]["radio_votes"][i]
target["space_subjects"]["radio_votes"].append({
"username": username,
"type": space
})
query = Articles.update(
metadata=target).where(
Articles.pmid == pmid)
query.execute()
def vote_number_of_subjects(pmid, subjects, username):
""" Place a vote for the number of subjects for this article. """
fullArticle = next(get_article_object(pmid))
target = eval(fullArticle.metadata)
if "space_subjects" not in target:
target["space_subjects"] = {}
if "number_of_subjects" not in target["space_subjects"]:
target["space_subjects"]["number_of_subjects"] = []
for i in range(len(target["space_subjects"]["number_of_subjects"])):
if target["space_subjects"]["number_of_subjects"][i]["username"] == username:
del target["space_subjects"]["number_of_subjects"][i]
target["space_subjects"]["number_of_subjects"].append({
"username": username,
"value": subjects
})
query = Articles.update(
metadata=target).where(
Articles.pmid == pmid)
query.execute()
def toggle_user_tag(user_tag, pmid, username):
""" Toggle a custom user tag to the database. """
main_target = next(
Articles.select(
Articles.metadata).where(
Articles.pmid == pmid).execute())
target = eval(main_target.metadata)
if "user_tags" in target:
toggled = False
for user in target["user_tags"]:
# if the tag is already present, then delete it
if target["user_tags"][user]["tag_name"] == user_tag:
del target["user_tags"][user]
toggled = True
break
if not toggled:
target["user_tags"][username] = {
"tag_name": user_tag
}
else:
target["user_tags"] = {
username: {
"tag_name": user_tag
}
}
query = Articles.update(metadata=target).where(Articles.pmid == pmid)
query.execute()
def get_number_of_articles():
""" Get the total number of articles in the database. """
return Articles.select().wrapped_count()
# BEGIN: add article functions
def add_pmid_article_to_database(article_id):
"""
Given a PMID, use external APIs to get the necessary article data
in order to add the article to our database.
"""
pmid = str(article_id)
try:
handle = efetch("pubmed", id=[pmid], rettype="medline", retmode="text")
except BaseException:
return False # Could not access correct pubmed ID
records = list(Medline.parse(handle))
records = records[0]
article_info = {}
article_info["title"] = records.get("TI")
article_info["PMID"] = pmid
article_info["authors"] = ', '.join(records.get("AU", []))
article_info["abstract"] = records.get("AB")
article_info["DOI"] = getDOI(records.get("AID", []))
article_info["experiments"] = []
article_info["metadata"] = str({"meshHeadings": []})
article_info["reference"] = None
identity = ""
try:
locations_list = eval(
urllib.request.urlopen(
"http://neurosynth.org/api/studies/peaks/" +
str(pmid) +
"/").read().decode())["data"]
id_map = {}
greatest_id = 89999
current_exp = None
for loc in locations_list:
current_loc_id = None
vals = loc
if len(loc) == 4:
current_loc_id = loc[0]
vals = vals[1:]
# vals is the x, y, z array; current_loc_id is the Neurosynth ID
if current_loc_id not in id_map:
greatest_id += 1
id_map[current_loc_id] = greatest_id
if current_exp is not None:
# Add the current experiment if its not None
article_info["experiments"].append(current_exp)
current_exp = {
"caption": "",
"locations": [],
"descriptors": [],
"contrast": "",
"space": "",
"effect": ""
}
current_exp["locations"].append(",".join([str(v) for v in vals]))
if current_exp is not None:
article_info["experiments"].append(current_exp)
except BaseException:
pass
Articles.create(abstract=article_info["abstract"],
authors=article_info["authors"],
doi=article_info["DOI"],
experiments=str(article_info["experiments"]),
pmid=article_info["PMID"],
title=article_info["title"])
return True
def getDOI(lst):
""" Extract the DOI from a Bio.Medline result """
pattern = r"([0-9]{2}\.[0-9]*\/[a-z]*\.[0-9]*\.[0-9]*)[ ]\[doi\]"
for item in lst:
if re.match(pattern, item):
x = re.match(pattern, item)
return x.group(1)
def clean_bulk_add(contents):
"""
A helper function for adding many articles at a time (by uploading a
JSON file of article information). Clean the data, ensure that only
complete entries are included, and add all of the entries to our database.
"""
clean_articles = []
for article in contents:
try:
if "timestamp" not in article:
article["timestamp"] = None
article["authors"] = ",".join(article["authors"])
if "doi" not in article:
article["doi"] = None
if "experiments" in article:
article["experiments"] = str(article["experiments"])
else:
article["experiments"] = str([])
if "meshHeadings" in article:
article["metadata"] = str(
{"meshHeadings": article["meshHeadings"]})
del article["meshHeadings"]
else:
article["metadata"] = str({"meshHeadings": []})
if "journal" in article and "year" in article:
article["reference"] = article["authors"] + \
"(" + str(article["year"]) + ") " + article["journal"]
del article["journal"]
del article["year"]
else:
article["reference"] = None
# once the article data is clean, add it to a separate list that
# we'll pass to PeeWee
article = {
"timestamp": article["timestamp"],
"abstract": article["abstract"],
"authors": article["authors"],
"doi": article["doi"],
"experiments": article["experiments"],
"metadata": article["metadata"],
"neurosynthid": None,
"pmid": article["pmid"],
"reference": article["reference"],
"title": article["title"]
}
clean_articles.append(article)
except BaseException:
pass
return clean_articles
def add_bulk(papers, limit=100): # papers is the entire formatted data set
""" Add a list of article entries to our database. """
with conn.atomic():
for article in range(0, len(papers), limit): # Inserts limit at a time
Articles.insert_many(papers[article:article + limit]).execute()
# BEGIN: table helper functions
def delete_row(pmid, exp, row):
""" Delete a row of coordinates from an experiment. """
target = next(get_article_object(pmid))
experiments = eval(target.experiments)
elem = experiments[exp]
locations = elem["locations"]
locations.pop(row)
Articles.update(
experiments=experiments).where(
Articles.pmid == pmid).execute()
def flag_table(pmid, exp):
""" Flag a table as inaccurate. """
target = next(get_article_object(pmid))
experiments = eval(target.experiments)
elem = experiments[int(exp)]
if "flagged" in elem:
# toggle the flag if it exists
elem["flagged"] = 1 - elem["flagged"]
else:
elem["flagged"] = 1
Articles.update(
experiments=experiments).where(
Articles.pmid == pmid).execute()
def edit_table_title_caption(pmid, exp, title, caption):
""" Edit the title and caption of a table. """
target = next(get_article_object(pmid))
experiments = eval(target.experiments)
elem = experiments[int(exp)]
elem["title"] = title
elem["caption"] = caption
Articles.update(
experiments=experiments).where(
Articles.pmid == pmid).execute()
def split_table(pmid, exp, row):
""" Split a coordinate table into two. """
target = next(get_article_object(pmid))
experiments = eval(target.experiments)
elem = experiments[exp]
locations = elem["locations"]
locations1 = locations[0:row]
locations2 = locations[row:]
elem["locations"] = locations1
highestID = int(max([exp["id"] for exp in experiments])) + 1
secondTable = {
"title": "",
"caption": "",
"locations": locations2,
"id": highestID
}
experiments.insert(exp + 1, secondTable)
Articles.update(
experiments=experiments).where(
Articles.pmid == pmid).execute()
def add_coordinate_row(pmid, exp, coords, row_number=-1):
""" Add a coordinate row to the end of a table.
Take a list of three or four coordinates.
Take a row number. -1 will add to the end of the list. """
target = next(get_article_object(pmid))
experiments = eval(target.experiments)
elem = experiments[int(exp)]
row_list = ",".join([str(c) for c in coords])
if row_number == -1:
elem["locations"].append(row_list)
else:
elem["locations"].insert(row_number, row_list)
Articles.update(
experiments=experiments).where(
Articles.pmid == pmid).execute()
def update_coordinate_row(pmid, exp, coords, row_number):
""" Add a coordinate row to the end of a table.
Take a list of three or four coordinates. Take a row number. """
target = next(get_article_object(pmid))
experiments = eval(target.experiments)
elem = experiments[int(exp)]
row_list = ",".join([str(c) for c in coords])
elem["locations"][row_number] = row_list
Articles.update(
experiments=experiments).where(
Articles.pmid == pmid).execute()
def add_table_through_text_box(pmid, values):
""" Add an experiment table using a CSV-formatted string. """
target = next(get_article_object(pmid))
experiments = eval(target.experiments)
values = values.replace(" ", "").split("\n")
secondTable = {"title": "", "caption": "", "locations": values,
"id": (max([exp["id"] for exp in experiments]) + 1)}
experiments.insert(len(experiments), secondTable)
Articles.update(
experiments=experiments).where(
Articles.pmid == pmid).execute()
def update_table_vote(tag_name, direction, table_num, pmid, column, username):
""" Update the vote on an experiment tag for a given user. """
article_obj = Articles.select(
Articles.experiments).where(
Articles.pmid == pmid).execute()
article_obj = next(article_obj)
article_obj = eval(article_obj.experiments)
# get the table object
table_obj = article_obj[table_num]
if not table_obj.get(column):
table_obj[column] = []
update_vote_in_struct(
table_obj[column],
tag_name,
username,
direction,
"tag")
article_obj[table_num] = table_obj
query = Articles.update(
experiments=article_obj).where(
Articles.pmid == pmid)
query.execute()
def replace_experiments(pmid, experiments):
""" Replace the experiments dict for a PMID. """
Articles.update(
experiments=experiments).where(
Articles.pmid == pmid).execute()
def replace_metadata(pmid, metadata):
""" Replace the metadata for a PMID. """
Articles.update(metadata=metadata).where(
Articles.pmid == str(pmid)).execute()
def check_existence(pmid):
""" Evaluates whether a PMID exists in our database. """
return Articles.select(
Articles.pmid).where(
Articles.pmid == str(pmid)).execute()
|
mit
|
titasakgm/brc-stock
|
openerp/addons/report_geraldo/lib/geraldo/geraldo/utils.py
|
7
|
4767
|
import sys
try:
import reportlab
except ImportError:
cm = 28.346456692913385
A4 = (595.275590551181, 841.8897637795275)
black = None
TA_LEFT, TA_CENTER, TA_RIGHT = 0, 1, 2
landscape = lambda t:(t[1],t[0])
else:
from reportlab.lib.units import * # Check this - is the source of units
from reportlab.lib.pagesizes import * # Check this - is the source of page sizes
from reportlab.lib.enums import TA_LEFT, TA_CENTER, TA_RIGHT # Check this also
from reportlab.lib.colors import black
from exceptions import AttributeNotFound
try:
from functools import wraps
except ImportError:
wraps = lambda func: func
# FLAGS
BAND_WIDTH = 'band-width'
BAND_HEIGHT = 'band-height'
CROSS_COLS = 'cross-cols'
CROSS_ROWS = 'cross-rows'
FIELD_ACTION_VALUES = 'values' # \
FIELD_ACTION_FIRST = 'first' # > Used only by cross reference functions
FIELD_ACTION_LAST = 'last' # /
FIELD_ACTION_VALUE = 'value'
FIELD_ACTION_COUNT = 'count'
FIELD_ACTION_AVG = 'avg'
FIELD_ACTION_MIN = 'min'
FIELD_ACTION_MAX = 'max'
FIELD_ACTION_SUM = 'sum'
FIELD_ACTION_DISTINCT_COUNT = 'distinct_count'
FIELD_ACTION_PERCENT = 'percent'
SYSTEM_FIELD_CHOICES = {
'report_title': 'ReportTitle',
'page_number': 'PageNumber',
'page_count': 'PageCount',
'current_datetime': 'CurrentDateTime',
'report_author': 'Author',
}
def _get_memoized_value(func, args, kwargs):
"""Used internally by memoize decorator to get/store function results"""
key = (repr(args), repr(kwargs))
if not key in func._cache_dict:
ret = func(*args, **kwargs)
func._cache_dict[key] = ret
return func._cache_dict[key]
def memoize(func):
"""Decorator that stores function results in a dictionary to be used on the
next time that the same arguments were informed."""
func._cache_dict = {}
def _inner(*args, **kwargs):
return _get_memoized_value(func, args, kwargs)
if sys.version.startswith('2.4'):
return _inner
else:
return wraps(func)(_inner)
def get_attr_value(obj, attr_path):
"""This function gets an attribute value from an object. If the attribute
is a method with no arguments (or arguments with default values) it calls
the method. If the expression string has a path to a child attribute, it
supports.
Examples:
attribute_name = 'name'
attribute_name = 'name.upper'
attribute_name = 'customer.name.lower'
"""
if not attr_path:
raise Exception('Invalid attribute path \'%s\''%attr_path)
parts = attr_path.split('.')
try:
val = getattr(obj, parts[0])
except AttributeError:
try:
val = obj[parts[0]]
except (KeyError, TypeError):
raise AttributeNotFound('There is no attribute nor key "%s" in the object "%s"'%(parts[0], repr(obj)))
if len(parts) > 1:
val = get_attr_value(val, '.'.join(parts[1:]))
if callable(val):
val = val()
return val
@memoize
def calculate_size(size):
"""Calculates the informed size. If this is a string or unicode, it is
converted to float using evaluation function"""
if isinstance(size, basestring):
return eval(size) # If you are thinking this is a semanthic bug, you must
# be aware this 'eval' is necessary to calculate sizes
# like '10*cm' or '15.8*rows'
# I want to check if eval is better way to do it than
# do a regex matching and calculate. TODO
return size
# Replaced by ReportLab landscape and portrait functions
#@memoize
#def landscape(page_size):
# return page_size[1], page_size[0]
@memoize
def format_date(date, expression):
return date.strftime(expression)
# Tries to import class Process from multiprocessing library and sets
# it as None if import fails
try:
from multiprocessing import Process
except ImportError:
Process = None
# Sets this to True if you don't want to use multiprocessing on
# functions with 'run_under_process' decorator
DISABLE_MULTIPROCESSING = False
def run_under_process(func):
"""This is a decorator that uses multiprocessing library to run a
function under a new process. To use it on Python 2.4 you need to
install python-multiprocessing package.
Just remember that Process doesn't support returning value"""
def _inner(*args, **kwargs):
# If multiprocessing is disabled, just runs function with
# its arguments
if not Process or DISABLE_MULTIPROCESSING:
func(*args, **kwargs)
prc = Process(target=func, args=args, kwargs=kwargs)
prc.start()
prc.join()
return _inner
|
agpl-3.0
|
strk/QGIS
|
tests/src/python/test_qgsrasterrerderer_createsld.py
|
15
|
29445
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
test_qgsrasterrenderer_createsld.py
---------------------
Date : December 2018
Copyright : (C) 2018 by Luigi Pirelli
Email : luipir at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *less
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Luigi Pirelli'
__date__ = 'December 2018'
__copyright__ = '(C) 2018, Luigi Pirelli'
import qgis # NOQA
import os
import random
from qgis.PyQt.QtCore import (
Qt,
QDir,
QFile,
QIODevice,
QPointF,
QSizeF,
QFileInfo,
)
from qgis.PyQt.QtXml import QDomDocument
from qgis.PyQt.QtGui import QColor, QFont
from qgis.core import (
QgsRasterLayer,
QgsRasterRenderer,
QgsMultiBandColorRenderer,
QgsSingleBandGrayRenderer,
QgsPalettedRasterRenderer,
QgsSingleBandPseudoColorRenderer,
QgsContrastEnhancement,
QgsRasterMinMaxOrigin,
Qgis,
QgsRasterBandStats,
QgsRasterShader,
QgsColorRampShader,
)
from qgis.testing import start_app, unittest
from utilities import unitTestDataPath
# Convenience instances in case you may need them
# not used in this test
start_app()
TEST_DATA_DIR = unitTestDataPath()
class TestQgsRasterRendererCreateSld(unittest.TestCase):
"""
This class tests the creation of SLD from QGis raster layers
"""
@classmethod
def setUpClass(self):
pass
def setUp(self):
pass
def tearDown(self):
pass
def __init__(self, methodName):
"""Run once on class initialization."""
unittest.TestCase.__init__(self, methodName)
myPath = os.path.join(TEST_DATA_DIR, 'landsat.tif')
rasterFileInfo = QFileInfo(myPath)
self.raster_layer = QgsRasterLayer(rasterFileInfo.filePath(),
rasterFileInfo.completeBaseName())
def testSingleBandPseudoColorRenderer_Interpolated(self):
# get min and max of the band to renderer
bandNo = 3
stats = self.raster_layer.dataProvider().bandStatistics(bandNo, QgsRasterBandStats.Min | QgsRasterBandStats.Max)
minValue = stats.minimumValue
maxValue = stats.maximumValue
# create shader for the renderer
shader = QgsRasterShader(minValue, maxValue)
colorRampShaderFcn = QgsColorRampShader(minValue, maxValue)
colorRampShaderFcn.setColorRampType(QgsColorRampShader.Interpolated)
colorRampShaderFcn.setClassificationMode(QgsColorRampShader.Continuous)
colorRampShaderFcn.setClip(True)
items = []
for index in range(10):
items.append(QgsColorRampShader.ColorRampItem(index, QColor('#{0:02d}{0:02d}{0:02d}'.format(index)), "{}".format(index)))
colorRampShaderFcn.setColorRampItemList(items)
shader.setRasterShaderFunction(colorRampShaderFcn)
# create instance to test
rasterRenderer = QgsSingleBandPseudoColorRenderer(self.raster_layer.dataProvider(), bandNo, shader)
self.raster_layer.setRenderer(rasterRenderer)
# do test
dom, root = self.rendererToSld(self.raster_layer.renderer())
self.assertNoOpacity(root)
self.assertChannelBand(root, 'sld:GrayChannel', '{}'.format(bandNo))
# check ColorMapEntry classes
colorMap = root.elementsByTagName('sld:ColorMap')
colorMap = colorMap.item(0).toElement()
self.assertFalse(colorMap.isNull())
self.assertEqual(colorMap.attribute('type'), 'ramp')
colorMapEntries = colorMap.elementsByTagName('sld:ColorMapEntry')
self.assertEqual(colorMapEntries.count(), 10)
for index in range(colorMapEntries.count()):
colorMapEntry = colorMapEntries.at(index).toElement()
self.assertEqual(colorMapEntry.attribute('quantity'), '{}'.format(index))
self.assertEqual(colorMapEntry.attribute('label'), '{}'.format(index))
self.assertEqual(colorMapEntry.attribute('opacity'), '')
self.assertEqual(colorMapEntry.attribute('color'), '#{0:02d}{0:02d}{0:02d}'.format(index))
def testSingleBandPseudoColorRenderer_Discrete(self):
# get min and max of the band to renderer
bandNo = 3
stats = self.raster_layer.dataProvider().bandStatistics(bandNo, QgsRasterBandStats.Min | QgsRasterBandStats.Max)
minValue = stats.minimumValue
maxValue = stats.maximumValue
# create shader for the renderer
shader = QgsRasterShader(minValue, maxValue)
colorRampShaderFcn = QgsColorRampShader(minValue, maxValue)
colorRampShaderFcn.setColorRampType(QgsColorRampShader.Discrete)
colorRampShaderFcn.setClassificationMode(QgsColorRampShader.Continuous)
colorRampShaderFcn.setClip(True)
items = []
for index in range(10):
items.append(QgsColorRampShader.ColorRampItem(index, QColor('#{0:02d}{0:02d}{0:02d}'.format(index)), "{}".format(index)))
colorRampShaderFcn.setColorRampItemList(items)
shader.setRasterShaderFunction(colorRampShaderFcn)
# create instance to test
rasterRenderer = QgsSingleBandPseudoColorRenderer(self.raster_layer.dataProvider(), bandNo, shader)
self.raster_layer.setRenderer(rasterRenderer)
# do test
dom, root = self.rendererToSld(self.raster_layer.renderer())
self.assertNoOpacity(root)
self.assertChannelBand(root, 'sld:GrayChannel', '{}'.format(bandNo))
# check ColorMapEntry classes
colorMap = root.elementsByTagName('sld:ColorMap')
colorMap = colorMap.item(0).toElement()
self.assertFalse(colorMap.isNull())
self.assertEqual(colorMap.attribute('type'), 'intervals')
colorMapEntries = colorMap.elementsByTagName('sld:ColorMapEntry')
self.assertEqual(colorMapEntries.count(), 10)
for index in range(colorMapEntries.count()):
colorMapEntry = colorMapEntries.at(index).toElement()
self.assertEqual(colorMapEntry.attribute('quantity'), '{}'.format(index))
self.assertEqual(colorMapEntry.attribute('label'), '{}'.format(index))
self.assertEqual(colorMapEntry.attribute('opacity'), '')
self.assertEqual(colorMapEntry.attribute('color'), '#{0:02d}{0:02d}{0:02d}'.format(index))
def testSingleBandPseudoColorRenderer_Exact(self):
# get min and max of the band to renderer
bandNo = 3
stats = self.raster_layer.dataProvider().bandStatistics(bandNo, QgsRasterBandStats.Min | QgsRasterBandStats.Max)
minValue = stats.minimumValue
maxValue = stats.maximumValue
# create shader for the renderer
shader = QgsRasterShader(minValue, maxValue)
colorRampShaderFcn = QgsColorRampShader(minValue, maxValue)
colorRampShaderFcn.setColorRampType(QgsColorRampShader.Exact)
colorRampShaderFcn.setClassificationMode(QgsColorRampShader.Continuous)
colorRampShaderFcn.setClip(True)
items = []
for index in range(10):
items.append(QgsColorRampShader.ColorRampItem(index, QColor('#{0:02d}{0:02d}{0:02d}'.format(index)), "{}".format(index)))
colorRampShaderFcn.setColorRampItemList(items)
shader.setRasterShaderFunction(colorRampShaderFcn)
# create instance to test
rasterRenderer = QgsSingleBandPseudoColorRenderer(self.raster_layer.dataProvider(), bandNo, shader)
self.raster_layer.setRenderer(rasterRenderer)
# do test
dom, root = self.rendererToSld(self.raster_layer.renderer())
self.assertNoOpacity(root)
self.assertChannelBand(root, 'sld:GrayChannel', '{}'.format(bandNo))
# check ColorMapEntry classes
colorMap = root.elementsByTagName('sld:ColorMap')
colorMap = colorMap.item(0).toElement()
self.assertFalse(colorMap.isNull())
self.assertEqual(colorMap.attribute('type'), 'values')
self.assertFalse(colorMap.hasAttribute('extendend'))
colorMapEntries = colorMap.elementsByTagName('sld:ColorMapEntry')
self.assertEqual(colorMapEntries.count(), 10)
for index in range(colorMapEntries.count()):
colorMapEntry = colorMapEntries.at(index).toElement()
self.assertEqual(colorMapEntry.attribute('quantity'), '{}'.format(index))
self.assertEqual(colorMapEntry.attribute('label'), '{}'.format(index))
self.assertEqual(colorMapEntry.attribute('opacity'), '')
self.assertEqual(colorMapEntry.attribute('color'), '#{0:02d}{0:02d}{0:02d}'.format(index))
# add check that is set ColoMap extended="true" if colormap is bigger that 255 entries
# !NOTE! can't reuse previous shader => segmentation fault
shader = QgsRasterShader(minValue, maxValue)
colorRampShaderFcn = QgsColorRampShader(minValue, maxValue)
colorRampShaderFcn.setColorRampType(QgsColorRampShader.Exact)
colorRampShaderFcn.setClassificationMode(QgsColorRampShader.Continuous)
colorRampShaderFcn.setClip(True)
items = []
for index in range(255):
items.append(QgsColorRampShader.ColorRampItem(index, QColor.fromHsv(index, 255, 255, 255), "{}".format(index)))
colorRampShaderFcn.setColorRampItemList(items)
shader.setRasterShaderFunction(colorRampShaderFcn)
# create instance to test
rasterRenderer = QgsSingleBandPseudoColorRenderer(self.raster_layer.dataProvider(), bandNo, shader)
# self.raster_layer.setRenderer(rasterRenderer)
# dom, root = self.rendererToSld(self.raster_layer.renderer())
# self.assertTrue( colorMap.hasAttribute( 'extendend' ) )
# self.assertEqual( colorMap.attribute( 'extendend' ), 'true' )
def testPalettedRasterRenderer(self):
# create 10 color classes
#classesString = '122 0 0 0 255 122\n123 1 1 1 255 123\n124 2 2 2 255 124\n125 3 3 3 255 125\n126 4 4 4 255 126\n127 5 5 5 255 127\n128 6 6 6 255 128\n129 7 7 7 255 129\n130 8 8 8 255 130'
classesString = ''
for index in range(10):
classesString += '{0} {0} {0} {0} 255 {0}\n'.format(index)
classes = QgsPalettedRasterRenderer.classDataFromString(classesString)
rasterRenderer = QgsPalettedRasterRenderer(
self.raster_layer.dataProvider(), 3, classes)
self.raster_layer.setRenderer(rasterRenderer)
dom, root = self.rendererToSld(self.raster_layer.renderer())
self.assertNoOpacity(root)
self.assertChannelBand(root, 'sld:GrayChannel', '3')
# check ColorMapEntry classes
colorMap = root.elementsByTagName('sld:ColorMap')
colorMap = colorMap.item(0).toElement()
self.assertFalse(colorMap.isNull())
self.assertEqual(colorMap.attribute('type'), 'values')
self.assertFalse(colorMap.hasAttribute('extendend'))
colorMapEntries = colorMap.elementsByTagName('sld:ColorMapEntry')
self.assertEqual(colorMapEntries.count(), 10)
for index in range(colorMapEntries.count()):
colorMapEntry = colorMapEntries.at(index).toElement()
self.assertEqual(colorMapEntry.attribute('quantity'), '{}'.format(index))
self.assertEqual(colorMapEntry.attribute('label'), '{}'.format(index))
self.assertEqual(colorMapEntry.attribute('opacity'), '')
self.assertEqual(colorMapEntry.attribute('color'), '#{0:02d}{0:02d}{0:02d}'.format(index))
# add check that is set ColoMap extended="true" if colormap is bigger that 255 entries
classesString = ''
values = range(255)
for index in range(255):
classesString += '{0} {1} {1} {1} 255 {0}\n'.format(index, random.choice(values))
classes = QgsPalettedRasterRenderer.classDataFromString(classesString)
rasterRenderer = QgsPalettedRasterRenderer(
self.raster_layer.dataProvider(), 3, classes)
self.raster_layer.setRenderer(rasterRenderer)
dom, root = self.rendererToSld(self.raster_layer.renderer())
colorMap = root.elementsByTagName('sld:ColorMap')
colorMap = colorMap.item(0).toElement()
self.assertTrue(colorMap.hasAttribute('extended'))
self.assertEqual(colorMap.attribute('extended'), 'true')
def testMultiBandColorRenderer(self):
rasterRenderer = QgsMultiBandColorRenderer(
self.raster_layer.dataProvider(), 3, 1, 2)
self.raster_layer.setRenderer(rasterRenderer)
self.raster_layer.setContrastEnhancement(algorithm=QgsContrastEnhancement.StretchToMinimumMaximum,
limits=QgsRasterMinMaxOrigin.MinMax)
dom, root = self.rendererToSld(self.raster_layer.renderer())
self.assertNoOpacity(root)
self.assertChannelBand(root, 'sld:RedChannel', '3')
self.assertChannelBand(root, 'sld:GreenChannel', '1')
self.assertChannelBand(root, 'sld:BlueChannel', '2')
def testSingleBandGrayRenderer(self):
# check with StretchToMinimumMaximum
rasterRenderer = QgsSingleBandGrayRenderer(self.raster_layer.dataProvider(), 3)
self.raster_layer.setRenderer(rasterRenderer)
self.raster_layer.setContrastEnhancement(algorithm=QgsContrastEnhancement.StretchToMinimumMaximum,
limits=QgsRasterMinMaxOrigin.MinMax)
maximum = self.raster_layer.renderer().contrastEnhancement().maximumValue()
minmum = self.raster_layer.renderer().contrastEnhancement().minimumValue()
self.assertEqual(minmum, 51)
self.assertEqual(maximum, 172)
# check default values
dom, root = self.rendererToSld(self.raster_layer.renderer())
self.assertNoOpacity(root)
self.assertChannelBand(root, 'sld:GrayChannel', '3')
elements = root.elementsByTagName('sld:ContrastEnhancement')
self.assertEqual(len(elements), 1)
enhancement = elements.at(0).toElement()
self.assertFalse(enhancement.isNull())
normalize = enhancement.firstChildElement('sld:Normalize')
self.assertFalse(normalize.isNull())
self.assertVendorOption(normalize, 'algorithm', 'StretchToMinimumMaximum')
self.assertVendorOption(normalize, 'minValue', '51')
self.assertVendorOption(normalize, 'maxValue', '172')
elements = root.elementsByTagName('sld:ColorMap')
self.assertEqual(len(elements), 1)
colorMap = elements.at(0).toElement()
self.assertFalse(colorMap.isNull())
colorMapEntries = colorMap.elementsByTagName('sld:ColorMapEntry')
self.assertEqual(len(colorMapEntries), 2)
clorMap1 = colorMapEntries.at(0)
self.assertEqual(clorMap1.attributes().namedItem('color').nodeValue(), '#000000')
self.assertEqual(clorMap1.attributes().namedItem('quantity').nodeValue(), '0')
clorMap2 = colorMapEntries.at(1)
self.assertEqual(clorMap2.attributes().namedItem('color').nodeValue(), '#ffffff')
self.assertEqual(clorMap2.attributes().namedItem('quantity').nodeValue(), '255')
# check when StretchAndClipToMinimumMaximum
# then min/max have always to be the real one and not that set in the contrastEnhancement
self.raster_layer.setContrastEnhancement(algorithm=QgsContrastEnhancement.StretchAndClipToMinimumMaximum,
limits=QgsRasterMinMaxOrigin.MinMax)
minmum = self.raster_layer.renderer().contrastEnhancement().setMinimumValue(100)
maximum = self.raster_layer.renderer().contrastEnhancement().maximumValue()
minmum = self.raster_layer.renderer().contrastEnhancement().minimumValue()
self.assertEqual(minmum, 100)
self.assertEqual(maximum, 172)
dom, root = self.rendererToSld(self.raster_layer.renderer())
self.assertNoOpacity(root)
self.assertChannelBand(root, 'sld:GrayChannel', '3')
elements = root.elementsByTagName('sld:ContrastEnhancement')
self.assertEqual(len(elements), 1)
enhancement = elements.at(0).toElement()
self.assertFalse(enhancement.isNull())
normalize = enhancement.firstChildElement('sld:Normalize')
self.assertFalse(normalize.isNull())
self.assertVendorOption(normalize, 'minValue', '51')
self.assertVendorOption(normalize, 'maxValue', '172')
elements = root.elementsByTagName('sld:ColorMap')
self.assertEqual(len(elements), 1)
colorMap = elements.at(0).toElement()
self.assertFalse(colorMap.isNull())
colorMapEntries = colorMap.elementsByTagName('sld:ColorMapEntry')
self.assertEqual(len(colorMapEntries), 4)
clorMap1 = colorMapEntries.at(0)
self.assertEqual(clorMap1.attributes().namedItem('color').nodeValue(), '#000000')
self.assertEqual(clorMap1.attributes().namedItem('quantity').nodeValue(), '100')
self.assertEqual(clorMap1.attributes().namedItem('opacity').nodeValue(), '0')
clorMap2 = colorMapEntries.at(1)
self.assertEqual(clorMap2.attributes().namedItem('color').nodeValue(), '#000000')
self.assertEqual(clorMap2.attributes().namedItem('quantity').nodeValue(), '100')
clorMap3 = colorMapEntries.at(2)
self.assertEqual(clorMap3.attributes().namedItem('color').nodeValue(), '#ffffff')
self.assertEqual(clorMap3.attributes().namedItem('quantity').nodeValue(), '172')
clorMap4 = colorMapEntries.at(3)
self.assertEqual(clorMap4.attributes().namedItem('color').nodeValue(), '#ffffff')
self.assertEqual(clorMap4.attributes().namedItem('quantity').nodeValue(), '172')
self.assertEqual(clorMap4.attributes().namedItem('opacity').nodeValue(), '0')
# check when ClipToMinimumMaximum
# then min/max have always to be the real one and not that set in the contrastEnhancement
self.raster_layer.setContrastEnhancement(algorithm=QgsContrastEnhancement.ClipToMinimumMaximum,
limits=QgsRasterMinMaxOrigin.MinMax)
minmum = self.raster_layer.renderer().contrastEnhancement().setMinimumValue(100)
maximum = self.raster_layer.renderer().contrastEnhancement().maximumValue()
minmum = self.raster_layer.renderer().contrastEnhancement().minimumValue()
self.assertEqual(minmum, 100)
self.assertEqual(maximum, 172)
dom, root = self.rendererToSld(self.raster_layer.renderer())
self.assertNoOpacity(root)
self.assertChannelBand(root, 'sld:GrayChannel', '3')
elements = root.elementsByTagName('sld:ContrastEnhancement')
self.assertEqual(len(elements), 1)
enhancement = elements.at(0).toElement()
self.assertFalse(enhancement.isNull())
normalize = enhancement.firstChildElement('sld:Normalize')
self.assertFalse(normalize.isNull())
self.assertVendorOption(normalize, 'minValue', '51')
self.assertVendorOption(normalize, 'maxValue', '172')
elements = root.elementsByTagName('sld:ColorMap')
self.assertEqual(len(elements), 1)
colorMap = elements.at(0).toElement()
self.assertFalse(colorMap.isNull())
colorMapEntries = colorMap.elementsByTagName('sld:ColorMapEntry')
self.assertEqual(len(colorMapEntries), 4)
clorMap1 = colorMapEntries.at(0)
self.assertEqual(clorMap1.attributes().namedItem('color').nodeValue(), '#000000')
self.assertEqual(clorMap1.attributes().namedItem('quantity').nodeValue(), '100')
self.assertEqual(clorMap1.attributes().namedItem('opacity').nodeValue(), '0')
clorMap2 = colorMapEntries.at(1)
self.assertEqual(clorMap2.attributes().namedItem('color').nodeValue(), '#000000')
self.assertEqual(clorMap2.attributes().namedItem('quantity').nodeValue(), '100')
clorMap3 = colorMapEntries.at(2)
self.assertEqual(clorMap3.attributes().namedItem('color').nodeValue(), '#ffffff')
self.assertEqual(clorMap3.attributes().namedItem('quantity').nodeValue(), '172')
clorMap4 = colorMapEntries.at(3)
self.assertEqual(clorMap4.attributes().namedItem('color').nodeValue(), '#ffffff')
self.assertEqual(clorMap4.attributes().namedItem('quantity').nodeValue(), '172')
self.assertEqual(clorMap4.attributes().namedItem('opacity').nodeValue(), '0')
def testRasterRenderer(self):
class fakerenderer(QgsRasterRenderer):
def __init__(self, interface):
QgsRasterRenderer.__init__(self, interface, '')
rasterRenderer = fakerenderer(self.raster_layer.dataProvider())
self.raster_layer.setRenderer(rasterRenderer)
# check opacity default value is not exported
dom, root = self.rendererToSld(self.raster_layer.renderer())
self.assertNoOpacity(root)
# check if opacity is not the default value
rasterRenderer.setOpacity(1.1)
dom, root = self.rendererToSld(self.raster_layer.renderer())
self.assertOpacity(root, '1.1')
# check gamma properties from [-100:0] stretched to [0:1]
# and (0:100] stretche dto (1:100]
# dom, root = self.rendererToSld(rasterRenderer, {'contrast': '-100'})
# self.assertGamma(root, '0')
# dom, root = self.rendererToSld(rasterRenderer, {'contrast': '-50'})
# self.assertGamma(root, '0.5')
# dom, root = self.rendererToSld(rasterRenderer, {'contrast': '0'})
# self.assertGamma(root, '1')
# dom, root = self.rendererToSld(rasterRenderer, {'contrast': '1'})
# self.assertGamma(root, '1')
# dom, root = self.rendererToSld(rasterRenderer, {'contrast': '100'})
# self.assertGamma(root, '100')
# # input contrast are always integer, btw the value is managed also if it's double
# dom, root = self.rendererToSld(rasterRenderer, {'contrast': '1.1'})
# self.assertGamma(root, '1.1')
# dom, root = self.rendererToSld(rasterRenderer, {'contrast': '1.6'})
# self.assertGamma(root, '1.6')
# dom, root = self.rendererToSld(rasterRenderer, {'contrast': '-50.5'})
# self.assertGamma(root, '0.495')
# dom, root = self.rendererToSld(rasterRenderer, {'contrast': '-0.1'})
# self.assertGamma(root, '0.999')
def testStretchingAlgorithm(self):
rasterRenderer = QgsMultiBandColorRenderer(
self.raster_layer.dataProvider(), 3, 1, 2)
self.raster_layer.setRenderer(rasterRenderer)
# check StretchToMinimumMaximum stretching alg
self.raster_layer.setContrastEnhancement(algorithm=QgsContrastEnhancement.StretchToMinimumMaximum,
limits=QgsRasterMinMaxOrigin.MinMax)
dom, root = self.rendererToSld(self.raster_layer.renderer())
self.assertContrastEnhancement(root, 'sld:RedChannel', 'StretchToMinimumMaximum', '51', '172')
self.assertContrastEnhancement(root, 'sld:GreenChannel', 'StretchToMinimumMaximum', '122', '130')
self.assertContrastEnhancement(root, 'sld:BlueChannel', 'StretchToMinimumMaximum', '133', '148')
# check StretchAndClipToMinimumMaximum stretching alg
self.raster_layer.setContrastEnhancement(algorithm=QgsContrastEnhancement.StretchAndClipToMinimumMaximum,
limits=QgsRasterMinMaxOrigin.MinMax)
dom, root = self.rendererToSld(self.raster_layer.renderer())
self.assertContrastEnhancement(root, 'sld:RedChannel', 'ClipToZero', '51', '172')
self.assertContrastEnhancement(root, 'sld:GreenChannel', 'ClipToZero', '122', '130')
self.assertContrastEnhancement(root, 'sld:BlueChannel', 'ClipToZero', '133', '148')
# check ClipToMinimumMaximum stretching alg
self.raster_layer.setContrastEnhancement(algorithm=QgsContrastEnhancement.ClipToMinimumMaximum,
limits=QgsRasterMinMaxOrigin.MinMax)
dom, root = self.rendererToSld(self.raster_layer.renderer())
self.assertContrastEnhancement(root, 'sld:RedChannel', 'ClipToMinimumMaximum', '51', '172')
self.assertContrastEnhancement(root, 'sld:GreenChannel', 'ClipToMinimumMaximum', '122', '130')
self.assertContrastEnhancement(root, 'sld:BlueChannel', 'ClipToMinimumMaximum', '133', '148')
# check NoEnhancement stretching alg
self.raster_layer.setContrastEnhancement(algorithm=QgsContrastEnhancement.NoEnhancement)
dom, root = self.rendererToSld(self.raster_layer.renderer())
self.assertContrastEnhancement(root, 'sld:RedChannel')
self.assertContrastEnhancement(root, 'sld:GreenChannel')
self.assertContrastEnhancement(root, 'sld:BlueChannel')
def assertVendorOption(self, root, name, expectedValue):
"""Set expectedValue=None to check that the vendor option is not present."""
vendorOptions = root.elementsByTagName('sld:VendorOption')
found = False
for vendorOptionIndex in range(vendorOptions.count()):
vendorOption = vendorOptions.at(vendorOptionIndex)
self.assertEqual('sld:VendorOption', vendorOption.nodeName())
if (vendorOption.attributes().namedItem('name').nodeValue() == name):
found = True
self.assertEqual(vendorOption.firstChild().nodeValue(), expectedValue)
if (expectedValue is None) and found:
self.fail("found VendorOption: {} where supposed not present".format(name))
if expectedValue and not found:
self.fail("Not found VendorOption: {}".format(name))
def assertGamma(self, root, expectedValue, index=0):
enhancement = root.elementsByTagName('sld:ContrastEnhancement').item(index)
gamma = enhancement.firstChildElement('sld:GammaValue')
self.assertEqual(expectedValue, gamma.firstChild().nodeValue())
def assertOpacity(self, root, expectedValue, index=0):
opacity = root.elementsByTagName('sld:Opacity').item(index)
self.assertEqual(expectedValue, opacity.firstChild().nodeValue())
def assertNoOpacity(self, root):
opacities = root.elementsByTagName('sld:Opacity')
self.assertEqual(opacities.size(), 0)
def assertContrastEnhancement(self, root, bandTag, expectedAlg=None, expectedMin=None, expectedMax=None, index=0):
channelSelection = root.elementsByTagName('sld:ChannelSelection').item(index)
self.assertIsNotNone(channelSelection)
band = channelSelection.firstChildElement(bandTag)
# check if no enhancement alg is iset
if (not expectedAlg):
contrastEnhancementName = band.firstChildElement('sld:ContrastEnhancement')
self.assertEqual('', contrastEnhancementName.firstChild().nodeName())
return
# check if enhancement alg is set
contrastEnhancementName = band.firstChildElement('sld:ContrastEnhancement')
self.assertEqual('sld:Normalize', contrastEnhancementName.firstChild().nodeName())
normalize = contrastEnhancementName.firstChildElement('sld:Normalize')
vendorOptions = normalize.elementsByTagName('VendorOption')
for vendorOptionIndex in range(vendorOptions.count()):
vendorOption = vendorOptions.at(vendorOptionIndex)
self.assertEqual('VendorOption', vendorOption.nodeName())
if (vendorOption.attributes().namedItem('name').nodeValue() == 'algorithm'):
self.assertEqual(expectedAlg, vendorOption.firstChild().nodeValue())
elif (vendorOption.attributes().namedItem('name').nodeValue() == 'minValue'):
self.assertEqual(expectedMin, vendorOption.firstChild().nodeValue())
elif (vendorOption.attributes().namedItem('name').nodeValue() == 'maxValue'):
self.assertEqual(expectedMax, vendorOption.firstChild().nodeValue())
else:
self.fail('Unrecognised vendorOption name {}'.format(vendorOption.attributes().namedItem('name').nodeValue()))
def assertChannelBand(self, root, bandTag, expectedValue, index=0):
channelSelection = root.elementsByTagName('sld:ChannelSelection').item(index)
self.assertIsNotNone(channelSelection)
band = channelSelection.firstChildElement(bandTag)
sourceChannelName = band.firstChildElement('sld:SourceChannelName')
self.assertEqual(expectedValue, sourceChannelName.firstChild().nodeValue())
def rendererToSld(self, renderer, properties={}):
dom = QDomDocument()
root = dom.createElement("FakeRoot")
dom.appendChild(root)
renderer.toSld(dom, root, properties)
return dom, root
if __name__ == '__main__':
unittest.main()
|
gpl-2.0
|
Yukarumya/Yukarum-Redfoxes
|
third_party/rust/unicode-bidi/tools/generate.py
|
1
|
7122
|
#!/usr/bin/env python
#
# Based on src/etc/unicode.py from Rust 1.2.0.
#
# Copyright 2011-2013 The Rust Project Developers.
# Copyright 2015 The Servo Project Developers. See the COPYRIGHT
# file at the top-level directory of this distribution and at
# http://rust-lang.org/COPYRIGHT.
#
# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
# option. This file may not be copied, modified, or distributed
# except according to those terms.
import fileinput, re, os, sys, operator
preamble = '''// NOTE:
// The following code was generated by "tools/generate.py". do not edit directly
#![allow(missing_docs, non_upper_case_globals, non_snake_case)]
'''
# these are the surrogate codepoints, which are not valid rust characters
surrogate_codepoints = (0xd800, 0xdfff)
def fetch(f):
if not os.path.exists(os.path.basename(f)):
os.system("curl -O http://www.unicode.org/Public/UNIDATA/%s"
% f)
if not os.path.exists(os.path.basename(f)):
sys.stderr.write("cannot load %s" % f)
exit(1)
def is_surrogate(n):
return surrogate_codepoints[0] <= n <= surrogate_codepoints[1]
def load_unicode_data(f):
fetch(f)
udict = {};
range_start = -1;
for line in fileinput.input(f):
data = line.split(';');
if len(data) != 15:
continue
cp = int(data[0], 16);
if is_surrogate(cp):
continue
if range_start >= 0:
for i in xrange(range_start, cp):
udict[i] = data;
range_start = -1;
if data[1].endswith(", First>"):
range_start = cp;
continue;
udict[cp] = data;
# Mapping of code point to Bidi_Class property:
bidi_class = {}
for code in udict:
[code_org, name, gencat, combine, bidi,
decomp, deci, digit, num, mirror,
old, iso, upcase, lowcase, titlecase ] = udict[code];
if bidi not in bidi_class:
bidi_class[bidi] = []
bidi_class[bidi].append(code)
# Default Bidi_Class for unassigned codepoints.
# http://www.unicode.org/Public/UNIDATA/extracted/DerivedBidiClass.txt
default_ranges = [
(0x0600, 0x07BF, "AL"), (0x08A0, 0x08FF, "AL"),
(0xFB50, 0xFDCF, "AL"), (0xFDF0, 0xFDFF, "AL"),
(0xFE70, 0xFEFF, "AL"), (0x1EE00, 0x0001EEFF, "AL"),
(0x0590, 0x05FF, "R"), (0x07C0, 0x089F, "R"),
(0xFB1D, 0xFB4F, "R"), (0x00010800, 0x00010FFF, "R"),
(0x0001E800, 0x0001EDFF, "R"), (0x0001EF00, 0x0001EFFF, "R"),
(0x20A0, 0x20CF, "ET")]
for (start, end, default) in default_ranges:
for code in range(start, end+1):
if not code in udict:
bidi_class[default].append(code)
bidi_class = group_cats(bidi_class)
return bidi_class
def group_cats(cats):
cats_out = []
for cat in cats:
cats_out.extend([(x, y, cat) for (x, y) in group_cat(cats[cat])])
cats_out.sort(key=lambda w: w[0])
return (sorted(cats.keys()), cats_out)
def group_cat(cat):
cat_out = []
letters = sorted(set(cat))
cur_start = letters.pop(0)
cur_end = cur_start
for letter in letters:
assert letter > cur_end, \
"cur_end: %s, letter: %s" % (hex(cur_end), hex(letter))
if letter == cur_end + 1:
cur_end = letter
else:
cat_out.append((cur_start, cur_end))
cur_start = cur_end = letter
cat_out.append((cur_start, cur_end))
return cat_out
def format_table_content(f, content, indent):
line = " "*indent
first = True
for chunk in content.split(","):
if len(line) + len(chunk) < 98:
if first:
line += chunk
else:
line += ", " + chunk
first = False
else:
f.write(line + ",\n")
line = " "*indent + chunk
f.write(line)
def escape_char(c):
return "'\\u{%x}'" % c
def emit_table(f, name, t_data, t_type = "&'static [(char, char)]", is_pub=True,
pfun=lambda x: "(%s,%s)" % (escape_char(x[0]), escape_char(x[1]))):
pub_string = ""
if is_pub:
pub_string = "pub "
f.write(" %sconst %s: %s = &[\n" % (pub_string, name, t_type))
data = ""
first = True
for dat in t_data:
if not first:
data += ","
first = False
data += pfun(dat)
format_table_content(f, data, 8)
f.write("\n ];\n\n")
def emit_bidi_module(f, bidi_class, cats):
f.write("""pub use self::BidiClass::*;
#[allow(non_camel_case_types)]
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
/// Represents the Unicode character property **Bidi_Class**, also known as
/// the *bidirectional character type*.
///
/// Use the `bidi_class` function to look up the BidiClass of a code point.
///
/// http://www.unicode.org/reports/tr9/#Bidirectional_Character_Types
pub enum BidiClass {
""")
for cat in cats:
f.write(" " + cat + ",\n")
f.write(""" }
fn bsearch_range_value_table(c: char, r: &'static [(char, char, BidiClass)]) -> BidiClass {
use ::std::cmp::Ordering::{Equal, Less, Greater};
match r.binary_search_by(|&(lo, hi, _)| {
if lo <= c && c <= hi { Equal }
else if hi < c { Less }
else { Greater }
}) {
Ok(idx) => {
let (_, _, cat) = r[idx];
cat
}
// UCD/extracted/DerivedBidiClass.txt: "All code points not explicitly listed
// for Bidi_Class have the value Left_To_Right (L)."
Err(_) => L
}
}
/// Find the BidiClass of a single char.
pub fn bidi_class(c: char) -> BidiClass {
bsearch_range_value_table(c, bidi_class_table)
}
""")
emit_table(f, "bidi_class_table", bidi_class, "&'static [(char, char, BidiClass)]",
pfun=lambda x: "(%s,%s,%s)" % (escape_char(x[0]), escape_char(x[1]), x[2]),
is_pub=False)
if __name__ == "__main__":
os.chdir("../src/") # changing download path to /unicode-bidi/src/
r = "tables.rs"
# downloading the test case files
fetch("BidiTest.txt")
fetch("BidiCharacterTest.txt")
if os.path.exists(r):
os.remove(r)
with open(r, "w") as rf:
# write the file's preamble
rf.write(preamble)
# download and parse all the data
fetch("ReadMe.txt")
with open("ReadMe.txt") as readme:
pattern = "for Version (\d+)\.(\d+)\.(\d+) of the Unicode"
unicode_version = re.search(pattern, readme.read()).groups()
rf.write("""
/// The version of [Unicode](http://www.unicode.org/)
/// that the `bidi_class` function is based on.
pub const UNICODE_VERSION: (u64, u64, u64) = (%s, %s, %s);
""" % unicode_version)
(bidi_cats, bidi_class) = load_unicode_data("UnicodeData.txt")
emit_bidi_module(rf, bidi_class, bidi_cats)
|
mpl-2.0
|
ip-tools/ip-navigator
|
patzilla/util/text/format.py
|
1
|
1253
|
# -*- coding: utf-8 -*-
# (c) 2014-2016 Andreas Motl, Elmyra UG
import re
_slugify_strip_re = re.compile(r'[^\w\s-]')
_slugify_strip_wo_equals_re = re.compile(r'[^\w\s=-]')
_slugify_hyphenate_re = re.compile(r'[-\s]+')
def slugify(value, strip_equals=True, lowercase=True):
"""
Normalizes string, converts to lowercase, removes non-alpha characters,
and converts spaces to hyphens.
From Django's "django/template/defaultfilters.py".
Via http://code.activestate.com/recipes/577257-slugify-make-a-string-usable-in-a-url-or-filename/
"""
import unicodedata
if not isinstance(value, unicode):
value = unicode(value)
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')
_strip_re = _slugify_strip_re
if not strip_equals:
_strip_re = _slugify_strip_wo_equals_re
value = unicode(_strip_re.sub('', value).strip())
if lowercase:
value = value.lower()
value = _slugify_hyphenate_re.sub('-', value)
return value
def text_indent(text, amount=4, ch=' '):
# https://stackoverflow.com/questions/8234274/how-to-indent-the-content-of-a-string-in-python/8348914#8348914
padding = amount * ch
return padding + ('\n' + padding).join(text.split('\n'))
|
agpl-3.0
|
Letractively/userinfuser
|
serverside/entities/badges.py
|
12
|
4345
|
# Copyright (C) 2011, CloudCaptive
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Author: Navraj Chohan
Description:
There are three badge types: The image, the template, and an instance
"""
import logging
import hashlib
import datetime
from accounts import Accounts
from users import Users
from google.appengine.ext import db
from google.appengine.ext.blobstore import blobstore
from django.utils import simplejson
json = simplejson
BOOLEAN = ["yes", "no"]
TYPES = ["free", "basic", "premium"]
STYPE = ["blob", "db"]
PERMISSION = ["private", "public"]
"""
Class: BadgeImage
Description: Stores the image of the badge
Attributes:
image: the image binary
permissions: pub or private
creator: Who created it
Notes:This instance type is only for testing purposes.
Actual images are stored using blobstore
"""
class BadgeImage(db.Model):
image = db.BlobProperty(required=True)
permissions = db.StringProperty(required=True, choices=set(PERMISSION))
creator = db.ReferenceProperty(reference_class=Accounts, required=True)
imgType = db.StringProperty(required=True, choices=set(['jpg','gif','png', 'gif']))
creationDate = db.DateTimeProperty(auto_now_add=True)
modifiedDate = db.DateTimeProperty(auto_now=True)
"""
Class: Badges
Description: A badge type
Attributes:
name: What the badge is called
description: A brief explanation about the badge
altText: The alt text you see in the browser
setType: The pricing level
isEnabled
creationDate
creator: A reference to the account who created this type
tags: Tags by the owner (or everyone, if public)
permissions: Permission, if we allow sharing
blobKey: A reference to the image of this type
"""
class Badges(db.Model):
name = db.StringProperty(required=True)
description = db.TextProperty(required=True)
altText = db.StringProperty(required=True)
setType = db.StringProperty(required=True, choices=set(TYPES))
isEnabled = db.StringProperty(required=True, choices=set(BOOLEAN))
creationDate = db.DateTimeProperty(auto_now_add=True)
modifiedDate = db.DateTimeProperty(auto_now=True)
creator = db.ReferenceProperty(reference_class=Accounts, required=True)
tags = db.StringProperty()
permissions = db.StringProperty(required=True, choices=set(PERMISSION))
storageType = db.StringProperty(required=True, choices=set(STYPE))
# This if you want to make the badge clickable, and route to a resource
# or secret link, etc
resourceLink = db.LinkProperty()
downloadLink = db.LinkProperty()
# a reference key to the object stored into the blobstore
blobKey = blobstore.BlobReferenceProperty()
imageKey = db.ReferenceProperty(reference_class=BadgeImage)
# Uploaded files in static images of badges
filePath = db.StringProperty()
theme = db.StringProperty()
"""
Class: BadgeInstance
Description: An instance of a badge which has been given to a user
Attributes:
badgeRef: A reference to the type of badge
"""
class BadgeInstance(db.Model):
badgeRef = db.ReferenceProperty(reference_class=Badges, required=True)
userRef = db.ReferenceProperty(reference_class=Users, required=True)
awarded = db.StringProperty(required=True, choices=set(BOOLEAN))
permissions = db.StringProperty(required=True, choices=set(PERMISSION))
creationDate = db.DateTimeProperty(auto_now_add=True)
awardDateTime = db.DateTimeProperty()
awardDate = db.DateProperty()
modifiedDate = db.DateTimeProperty(auto_now=True)
instanceRegistrationDate = db.DateTimeProperty(auto_now=True)
pointsRequired = db.IntegerProperty(default=9999999999)
pointsEarned = db.IntegerProperty(default=0)
expirationDate = db.DateTimeProperty()
resource = db.LinkProperty()
reason = db.StringProperty()
downloadLink = db.LinkProperty()
|
gpl-3.0
|
LukeMurphey/splunk-network-tools
|
src/bin/network_tools_app/ipwhois/nir.py
|
1
|
23132
|
# Copyright (c) 2013-2019 Philip Hane
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from . import NetError
from .utils import unique_everseen
import logging
import sys
import re
import copy
from datetime import (datetime, timedelta)
if sys.version_info >= (3, 3): # pragma: no cover
from ipaddress import (ip_address,
ip_network,
summarize_address_range,
collapse_addresses)
else: # pragma: no cover
from ipaddr import (IPAddress as ip_address,
IPNetwork as ip_network,
summarize_address_range,
collapse_address_list as collapse_addresses)
log = logging.getLogger(__name__)
# Base NIR whois output dictionary.
BASE_NET = {
'cidr': None,
'name': None,
'handle': None,
'range': None,
'country': None,
'address': None,
'postal_code': None,
'nameservers': None,
'created': None,
'updated': None,
'contacts': None
}
# Base NIR whois contact output dictionary.
BASE_CONTACT = {
'name': None,
'email': None,
'reply_email': None,
'organization': None,
'division': None,
'title': None,
'phone': None,
'fax': None,
'updated': None
}
# National Internet Registry
NIR_WHOIS = {
'jpnic': {
'country_code': 'JP',
'url': ('http://whois.nic.ad.jp/cgi-bin/whois_gw?lang=%2Fe&key={0}'
'&submit=query'),
'request_type': 'GET',
'request_headers': {'Accept': 'text/html'},
'form_data_ip_field': None,
'fields': {
'name': r'(\[Organization\])[^\S\n]+(?P<val>.*?)\n',
'handle': r'(\[Network Name\])[^\S\n]+(?P<val>.*?)\n',
'created': r'(\[Assigned Date\])[^\S\n]+(?P<val>.*?)\n',
'updated': r'(\[Last Update\])[^\S\n]+(?P<val>.*?)\n',
'nameservers': r'(\[Nameserver\])[^\S\n]+(?P<val>.*?)\n',
'contact_admin': r'(\[Administrative Contact\])[^\S\n]+.+?\>'
'(?P<val>.+?)\<\/A\>\n',
'contact_tech': r'(\[Technical Contact\])[^\S\n]+.+?\>'
'(?P<val>.+?)\<\/A\>\n'
},
'contact_fields': {
'name': r'(\[Last, First\])[^\S\n]+(?P<val>.*?)\n',
'email': r'(\[E-Mail\])[^\S\n]+(?P<val>.*?)\n',
'reply_email': r'(\[Reply Mail\])[^\S\n]+(?P<val>.*?)\n',
'organization': r'(\[Organization\])[^\S\n]+(?P<val>.*?)\n',
'division': r'(\[Division\])[^\S\n]+(?P<val>.*?)\n',
'title': r'(\[Title\])[^\S\n]+(?P<val>.*?)\n',
'phone': r'(\[TEL\])[^\S\n]+(?P<val>.*?)\n',
'fax': r'(\[FAX\])[^\S\n]+(?P<val>.*?)\n',
'updated': r'(\[Last Update\])[^\S\n]+(?P<val>.*?)\n'
},
'dt_format': '%Y/%m/%d %H:%M:%S(JST)',
'dt_hourdelta': 9,
'multi_net': False
},
'krnic': {
'country_code': 'KR',
'url': 'https://whois.kisa.or.kr/eng/whois.jsc',
'request_type': 'POST',
'request_headers': {'Accept': 'text/html'},
'form_data_ip_field': 'query',
'fields': {
'name': r'(Organization Name)[\s]+\:[^\S\n]+(?P<val>.+?)\n',
'handle': r'(Service Name|Network Type)[\s]+\:[^\S\n]+(?P<val>.+?)'
'\n',
'address': r'(Address)[\s]+\:[^\S\n]+(?P<val>.+?)\n',
'postal_code': r'(Zip Code)[\s]+\:[^\S\n]+(?P<val>.+?)\n',
'created': r'(Registration Date)[\s]+\:[^\S\n]+(?P<val>.+?)\n',
'contact_admin': r'(id="eng_isp_contact").+?\>(?P<val>.*?)\<'
'\/div\>\n',
'contact_tech': r'(id="eng_user_contact").+?\>(?P<val>.*?)\<'
'\/div\>\n'
},
'contact_fields': {
'name': r'(Name)[^\S\n]+?:[^\S\n]+?(?P<val>.*?)\n',
'email': r'(E-Mail)[^\S\n]+?:[^\S\n]+?(?P<val>.*?)\n',
'phone': r'(Phone)[^\S\n]+?:[^\S\n]+?(?P<val>.*?)\n'
},
'dt_format': '%Y%m%d',
'dt_hourdelta': 0,
'multi_net': True
}
}
class NIRWhois:
"""
The class for parsing whois data for NIRs (National Internet Registry).
JPNIC and KRNIC are currently the only NIRs supported. Output varies
based on NIR specific whois formatting.
Args:
net (:obj:`ipwhois.net.Net`): The network object.
Raises:
NetError: The parameter provided is not an instance of
ipwhois.net.Net
IPDefinedError: The address provided is defined (does not need to be
resolved).
"""
def __init__(self, net):
from .net import Net
# ipwhois.net.Net validation
if isinstance(net, Net):
self._net = net
else:
raise NetError('The provided net parameter is not an instance of '
'ipwhois.net.Net')
def parse_fields(self, response, fields_dict, net_start=None,
net_end=None, dt_format=None, field_list=None,
hourdelta=0, is_contact=False):
"""
The function for parsing whois fields from a data input.
Args:
response (:obj:`str`): The response from the whois/rwhois server.
fields_dict (:obj:`dict`): The mapping of fields to regex search
values (required).
net_start (:obj:`int`): The starting point of the network (if
parsing multiple networks). Defaults to None.
net_end (:obj:`int`): The ending point of the network (if parsing
multiple networks). Defaults to None.
dt_format (:obj:`str`): The format of datetime fields if known.
Defaults to None.
field_list (:obj:`list` of :obj:`str`): If provided, fields to
parse. Defaults to :obj:`ipwhois.nir.BASE_NET` if is_contact
is False. Otherwise, defaults to
:obj:`ipwhois.nir.BASE_CONTACT`.
hourdelta (:obj:`int`): The timezone delta for created/updated
fields. Defaults to 0.
is_contact (:obj:`bool`): If True, uses contact information
field parsing. Defaults to False.
Returns:
dict: A dictionary of fields provided in fields_dict, mapping to
the results of the regex searches.
"""
response = '{0}\n'.format(response)
if is_contact:
ret = {}
if not field_list:
field_list = list(BASE_CONTACT.keys())
else:
ret = {
'contacts': {'admin': None, 'tech': None},
'contact_admin': {},
'contact_tech': {}
}
if not field_list:
field_list = list(BASE_NET.keys())
field_list.remove('contacts')
field_list.append('contact_admin')
field_list.append('contact_tech')
generate = ((field, pattern) for (field, pattern) in
fields_dict.items() if field in field_list)
for field, pattern in generate:
pattern = re.compile(
str(pattern),
re.DOTALL
)
if net_start is not None:
match = pattern.finditer(response, net_end, net_start)
elif net_end is not None:
match = pattern.finditer(response, net_end)
else:
match = pattern.finditer(response)
values = []
for m in match:
try:
values.append(m.group('val').strip())
except IndexError:
pass
if len(values) > 0:
value = None
try:
if field in ['created', 'updated'] and dt_format:
value = (
datetime.strptime(
values[0],
str(dt_format)
) - timedelta(hours=hourdelta)
).isoformat('T')
elif field in ['nameservers']:
value = list(unique_everseen(values))
else:
values = unique_everseen(values)
value = '\n'.join(values)
except ValueError as e:
log.debug('NIR whois field parsing failed for {0}: {1}'
''.format(field, e))
pass
ret[field] = value
return ret
def _parse_fields(self, *args, **kwargs):
"""
Deprecated. This will be removed in a future release.
"""
from warnings import warn
warn('NIRWhois._parse_fields() has been deprecated and will be '
'removed. You should now use NIRWhois.parse_fields().')
return self.parse_fields(*args, **kwargs)
def get_nets_jpnic(self, response):
"""
The function for parsing network blocks from jpnic whois data.
Args:
response (:obj:`str`): The response from the jpnic server.
Returns:
list of dict: Mapping of networks with start and end positions.
::
[{
'cidr' (str) - The network routing block
'start' (int) - The starting point of the network
'end' (int) - The endpoint point of the network
}]
"""
nets = []
# Iterate through all of the networks found, storing the CIDR value
# and the start and end positions.
for match in re.finditer(
r'^.*?(\[Network Number\])[^\S\n]+.+?>(?P<val>.+?)</A>$',
response,
re.MULTILINE
):
try:
net = copy.deepcopy(BASE_NET)
tmp = ip_network(match.group(2))
try: # pragma: no cover
network_address = tmp.network_address
except AttributeError: # pragma: no cover
network_address = tmp.ip
pass
try: # pragma: no cover
broadcast_address = tmp.broadcast_address
except AttributeError: # pragma: no cover
broadcast_address = tmp.broadcast
pass
net['range'] = '{0} - {1}'.format(
network_address + 1, broadcast_address
)
cidr = ip_network(match.group(2).strip()).__str__()
net['cidr'] = cidr
net['start'] = match.start()
net['end'] = match.end()
nets.append(net)
except (ValueError, TypeError):
pass
return nets
def _get_nets_jpnic(self, *args, **kwargs):
"""
Deprecated. This will be removed in a future release.
"""
from warnings import warn
warn('NIRWhois._get_nets_jpnic() has been deprecated and will be '
'removed. You should now use NIRWhois.get_nets_jpnic().')
return self.get_nets_jpnic(*args, **kwargs)
def get_nets_krnic(self, response):
"""
The function for parsing network blocks from krnic whois data.
Args:
response (:obj:`str`): The response from the krnic server.
Returns:
list of dict: Mapping of networks with start and end positions.
::
[{
'cidr' (str) - The network routing block
'start' (int) - The starting point of the network
'end' (int) - The endpoint point of the network
}]
"""
nets = []
# Iterate through all of the networks found, storing the CIDR value
# and the start and end positions.
for match in re.finditer(
r'^(IPv4 Address)[\s]+:[^\S\n]+((.+?)[^\S\n]-[^\S\n](.+?)'
'[^\S\n]\((.+?)\)|.+)$',
response,
re.MULTILINE
):
try:
net = copy.deepcopy(BASE_NET)
net['range'] = match.group(2)
if match.group(3) and match.group(4):
addrs = []
addrs.extend(summarize_address_range(
ip_address(match.group(3).strip()),
ip_address(match.group(4).strip())))
cidr = ', '.join(
[i.__str__() for i in collapse_addresses(addrs)]
)
net['range'] = '{0} - {1}'.format(
match.group(3), match.group(4)
)
else:
cidr = ip_network(match.group(2).strip()).__str__()
net['cidr'] = cidr
net['start'] = match.start()
net['end'] = match.end()
nets.append(net)
except (ValueError, TypeError):
pass
return nets
def _get_nets_krnic(self, *args, **kwargs):
"""
Deprecated. This will be removed in a future release.
"""
from warnings import warn
warn('NIRWhois._get_nets_krnic() has been deprecated and will be '
'removed. You should now use NIRWhois.get_nets_krnic().')
return self.get_nets_krnic(*args, **kwargs)
def get_contact(self, response=None, nir=None, handle=None,
retry_count=3, dt_format=None):
"""
The function for retrieving and parsing NIR whois data based on
NIR_WHOIS contact_fields.
Args:
response (:obj:`str`): Optional response object, this bypasses the
lookup.
nir (:obj:`str`): The NIR to query ('jpnic' or 'krnic'). Required
if response is None.
handle (:obj:`str`): For NIRs that have separate contact queries
(JPNIC), this is the contact handle to use in the query.
Defaults to None.
retry_count (:obj:`int`): The number of times to retry in case
socket errors, timeouts, connection resets, etc. are
encountered. Defaults to 3.
dt_format (:obj:`str`): The format of datetime fields if known.
Defaults to None.
Returns:
dict: Mapping of the fields provided in contact_fields, to their
parsed results.
"""
if response or nir == 'krnic':
contact_response = response
else:
# Retrieve the whois data.
contact_response = self._net.get_http_raw(
url=str(NIR_WHOIS[nir]['url']).format(handle),
retry_count=retry_count,
headers=NIR_WHOIS[nir]['request_headers'],
request_type=NIR_WHOIS[nir]['request_type']
)
return self.parse_fields(
response=contact_response,
fields_dict=NIR_WHOIS[nir]['contact_fields'],
dt_format=dt_format,
hourdelta=int(NIR_WHOIS[nir]['dt_hourdelta']),
is_contact=True
)
def _get_contact(self, *args, **kwargs):
"""
Deprecated. This will be removed in a future release.
"""
from warnings import warn
warn('NIRWhois._get_contact() has been deprecated and will be '
'removed. You should now use NIRWhois.get_contact().')
return self.get_contact(*args, **kwargs)
def lookup(self, nir=None, inc_raw=False, retry_count=3, response=None,
field_list=None, is_offline=False):
"""
The function for retrieving and parsing NIR whois information for an IP
address via HTTP (HTML scraping).
Args:
nir (:obj:`str`): The NIR to query ('jpnic' or 'krnic'). Required
if response is None.
inc_raw (:obj:`bool`, optional): Whether to include the raw
results in the returned dictionary. Defaults to False.
retry_count (:obj:`int`): The number of times to retry in case
socket errors, timeouts, connection resets, etc. are
encountered. Defaults to 3.
response (:obj:`str`): Optional response object, this bypasses the
NIR lookup. Required when is_offline=True.
field_list (:obj:`list` of :obj:`str`): If provided, fields to
parse. Defaults to :obj:`ipwhois.nir.BASE_NET`.
is_offline (:obj:`bool`): Whether to perform lookups offline. If
True, response and asn_data must be provided. Primarily used
for testing.
Returns:
dict: The NIR whois results:
::
{
'query' (str) - The IP address.
'nets' (list of dict) - Network information which consists
of the fields listed in the ipwhois.nir.NIR_WHOIS
dictionary.
'raw' (str) - Raw NIR whois results if the inc_raw
parameter is True.
}
"""
if nir not in NIR_WHOIS.keys():
raise KeyError('Invalid arg for nir (National Internet Registry')
# Create the return dictionary.
results = {
'query': self._net.address_str,
'raw': None
}
# Only fetch the response if we haven't already.
if response is None:
if is_offline:
raise KeyError('response argument required when '
'is_offline=True')
log.debug('Response not given, perform WHOIS lookup for {0}'
.format(self._net.address_str))
form_data = None
if NIR_WHOIS[nir]['form_data_ip_field']:
form_data = {NIR_WHOIS[nir]['form_data_ip_field']:
self._net.address_str}
# Retrieve the whois data.
response = self._net.get_http_raw(
url=str(NIR_WHOIS[nir]['url']).format(self._net.address_str),
retry_count=retry_count,
headers=NIR_WHOIS[nir]['request_headers'],
request_type=NIR_WHOIS[nir]['request_type'],
form_data=form_data
)
# If inc_raw parameter is True, add the response to return dictionary.
if inc_raw:
results['raw'] = response
nets = []
nets_response = None
if nir == 'jpnic':
nets_response = self.get_nets_jpnic(response)
elif nir == 'krnic':
nets_response = self.get_nets_krnic(response)
nets.extend(nets_response)
global_contacts = {}
# Iterate through all of the network sections and parse out the
# appropriate fields for each.
log.debug('Parsing NIR WHOIS data')
for index, net in enumerate(nets):
section_end = None
if index + 1 < len(nets):
section_end = nets[index + 1]['start']
try:
dt_format = NIR_WHOIS[nir]['dt_format']
except KeyError: # pragma: no cover
dt_format = None
temp_net = self.parse_fields(
response=response,
fields_dict=NIR_WHOIS[nir]['fields'],
net_start=section_end,
net_end=net['end'],
dt_format=dt_format,
field_list=field_list,
hourdelta=int(NIR_WHOIS[nir]['dt_hourdelta'])
)
temp_net['country'] = NIR_WHOIS[nir]['country_code']
contacts = {
'admin': temp_net['contact_admin'],
'tech': temp_net['contact_tech']
}
del (
temp_net['contact_admin'],
temp_net['contact_tech']
)
if not is_offline:
for key, val in contacts.items():
if len(val) > 0:
if isinstance(val, str):
val = val.splitlines()
for contact in val:
if contact in global_contacts.keys():
temp_net['contacts'][key] = (
global_contacts[contact]
)
else:
if nir == 'krnic':
tmp_response = contact
tmp_handle = None
else:
tmp_response = None
tmp_handle = contact
temp_net['contacts'][key] = self.get_contact(
response=tmp_response,
handle=tmp_handle,
nir=nir,
retry_count=retry_count,
dt_format=dt_format
)
global_contacts[contact] = (
temp_net['contacts'][key]
)
# Merge the net dictionaries.
net.update(temp_net)
# The start and end values are no longer needed.
del net['start'], net['end']
# Add the networks to the return dictionary.
results['nets'] = nets
return results
|
apache-2.0
|
fxfitz/ansible
|
lib/ansible/modules/network/f5/bigip_gtm_datacenter.py
|
14
|
13189
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: bigip_gtm_datacenter
short_description: Manage Datacenter configuration in BIG-IP
description:
- Manage BIG-IP data center configuration. A data center defines the location
where the physical network components reside, such as the server and link
objects that share the same subnet on the network. This module is able to
manipulate the data center definitions in a BIG-IP.
version_added: 2.2
options:
contact:
description:
- The name of the contact for the data center.
description:
description:
- The description of the data center.
location:
description:
- The location of the data center.
name:
description:
- The name of the data center.
required: True
state:
description:
- The virtual address state. If C(absent), an attempt to delete the
virtual address will be made. This will only succeed if this
virtual address is not in use by a virtual server. C(present) creates
the virtual address and enables it. If C(enabled), enable the virtual
address if it exists. If C(disabled), create the virtual address if
needed, and set state to C(disabled).
default: present
choices:
- present
- absent
- enabled
- disabled
partition:
description:
- Device partition to manage resources on.
default: Common
version_added: 2.5
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = r'''
- name: Create data center "New York"
bigip_gtm_datacenter:
server: lb.mydomain.com
user: admin
password: secret
name: New York
location: 222 West 23rd
delegate_to: localhost
'''
RETURN = r'''
contact:
description: The contact that was set on the datacenter.
returned: changed
type: string
sample: admin@root.local
description:
description: The description that was set for the datacenter.
returned: changed
type: string
sample: Datacenter in NYC
enabled:
description: Whether the datacenter is enabled or not
returned: changed
type: bool
sample: true
disabled:
description: Whether the datacenter is disabled or not.
returned: changed
type: bool
sample: true
state:
description: State of the datacenter.
returned: changed
type: string
sample: disabled
location:
description: The location that is set for the datacenter.
returned: changed
type: string
sample: 222 West 23rd
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
try:
from library.module_utils.network.f5.bigip import HAS_F5SDK
from library.module_utils.network.f5.bigip import F5Client
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import cleanup_tokens
from library.module_utils.network.f5.common import f5_argument_spec
try:
from library.module_utils.network.f5.common import iControlUnexpectedHTTPError
except ImportError:
HAS_F5SDK = False
except ImportError:
from ansible.module_utils.network.f5.bigip import HAS_F5SDK
from ansible.module_utils.network.f5.bigip import F5Client
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import cleanup_tokens
from ansible.module_utils.network.f5.common import f5_argument_spec
try:
from ansible.module_utils.network.f5.common import iControlUnexpectedHTTPError
except ImportError:
HAS_F5SDK = False
class Parameters(AnsibleF5Parameters):
api_map = {}
updatables = [
'location', 'description', 'contact', 'state'
]
returnables = [
'location', 'description', 'contact', 'state', 'enabled', 'disabled'
]
api_attributes = [
'enabled', 'location', 'description', 'contact', 'disabled'
]
def to_return(self):
result = {}
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
return result
def api_params(self):
result = {}
for api_attribute in self.api_attributes:
if api_attribute in self.api_map:
result[api_attribute] = getattr(
self, self.api_map[api_attribute])
else:
result[api_attribute] = getattr(self, api_attribute)
result = self._filter_params(result)
return result
class ApiParameters(Parameters):
@property
def disabled(self):
if self._values['disabled'] is True:
return True
return None
@property
def enabled(self):
if self._values['enabled'] is True:
return True
return None
class ModuleParameters(Parameters):
@property
def disabled(self):
if self._values['state'] == 'disabled':
return True
else:
return None
@property
def enabled(self):
if self._values['state'] in ['enabled', 'present']:
return True
return None
@property
def state(self):
if self.enabled and self._values['state'] != 'present':
return 'enabled'
elif self.disabled and self._values['state'] != 'present':
return 'disabled'
else:
return self._values['state']
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
pass
class ReportableChanges(Changes):
@property
def disabled(self):
if self._values['state'] == 'disabled':
return True
elif self._values['state'] in ['enabled', 'present']:
return False
return None
@property
def enabled(self):
if self._values['state'] in ['enabled', 'present']:
return True
elif self._values['state'] == 'disabled':
return False
return None
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
@property
def state(self):
if self.want.enabled != self.have.enabled:
return dict(
state=self.want.state,
enabled=self.want.enabled
)
if self.want.disabled != self.have.disabled:
return dict(
state=self.want.state,
disabled=self.want.disabled
)
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.pop('module', None)
self.client = kwargs.pop('client', None)
self.want = ModuleParameters(params=self.module.params)
self.have = ApiParameters()
self.changes = UsableChanges()
def exec_module(self):
changed = False
result = dict()
state = self.want.state
try:
if state in ['present', 'enabled', 'disabled']:
changed = self.present()
elif state == "absent":
changed = self.absent()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def absent(self):
changed = False
if self.exists():
changed = self.remove()
return changed
def read_current_from_device(self):
resource = self.client.api.tm.gtm.datacenters.datacenter.load(
name=self.want.name,
partition=self.want.partition
)
result = resource.attrs
return ApiParameters(params=result)
def exists(self):
result = self.client.api.tm.gtm.datacenters.datacenter.exists(
name=self.want.name,
partition=self.want.partition
)
return result
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def update_on_device(self):
params = self.want.api_params()
resource = self.client.api.tm.gtm.datacenters.datacenter.load(
name=self.want.name,
partition=self.want.partition
)
resource.modify(**params)
def create(self):
self.have = ApiParameters()
self.should_update()
if self.module.check_mode:
return True
self.create_on_device()
if self.exists():
return True
else:
raise F5ModuleError("Failed to create the datacenter")
def create_on_device(self):
params = self.want.api_params()
self.client.api.tm.gtm.datacenters.datacenter.create(
name=self.want.name,
partition=self.want.partition,
**params
)
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the datacenter")
return True
def remove_from_device(self):
resource = self.client.api.tm.gtm.datacenters.datacenter.load(
name=self.want.name,
partition=self.want.partition
)
resource.delete()
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
contact=dict(),
description=dict(),
location=dict(),
name=dict(required=True),
state=dict(
default='present',
choices=['present', 'absent', 'disabled', 'enabled']
),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode
)
if not HAS_F5SDK:
module.fail_json(msg="The python f5-sdk module is required")
try:
client = F5Client(**module.params)
mm = ModuleManager(module=module, client=client)
results = mm.exec_module()
cleanup_tokens(client)
module.exit_json(**results)
except F5ModuleError as ex:
cleanup_tokens(client)
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
|
gpl-3.0
|
abhiatgithub/shogun-toolbox
|
examples/undocumented/python_modular/classifier_featureblock_logistic_regression.py
|
17
|
1312
|
#!/usr/bin/env python
from numpy import array,hstack
from numpy.random import seed, rand
from tools.load import LoadMatrix
lm=LoadMatrix()
traindat = lm.load_numbers('../data/fm_train_real.dat')
testdat = lm.load_numbers('../data/fm_test_real.dat')
label_traindat = lm.load_labels('../data/label_train_twoclass.dat')
parameter_list = [[traindat,testdat,label_traindat]]
def classifier_featureblock_logistic_regression (fm_train=traindat,fm_test=testdat,label_train=label_traindat):
from modshogun import BinaryLabels, RealFeatures, IndexBlock, IndexBlockGroup, FeatureBlockLogisticRegression
features = RealFeatures(hstack((traindat,traindat)))
labels = BinaryLabels(hstack((label_train,label_train)))
n_features = features.get_num_features()
block_one = IndexBlock(0,n_features//2)
block_two = IndexBlock(n_features//2,n_features)
block_group = IndexBlockGroup()
block_group.add_block(block_one)
block_group.add_block(block_two)
mtlr = FeatureBlockLogisticRegression(0.1,features,labels,block_group)
mtlr.set_regularization(1) # use regularization ratio
mtlr.set_tolerance(1e-2) # use 1e-2 tolerance
mtlr.train()
out = mtlr.apply().get_labels()
return out
if __name__=='__main__':
print('FeatureBlockLogisticRegression')
classifier_featureblock_logistic_regression(*parameter_list[0])
|
gpl-3.0
|
dstrockis/outlook-autocategories
|
lib/cryptography/hazmat/primitives/asymmetric/dh.py
|
13
|
4852
|
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import abc
import six
from cryptography import utils
def generate_parameters(generator, key_size, backend):
return backend.generate_dh_parameters(generator, key_size)
class DHPrivateNumbers(object):
def __init__(self, x, public_numbers):
if not isinstance(x, six.integer_types):
raise TypeError("x must be an integer.")
if not isinstance(public_numbers, DHPublicNumbers):
raise TypeError("public_numbers must be an instance of "
"DHPublicNumbers.")
self._x = x
self._public_numbers = public_numbers
def __eq__(self, other):
if not isinstance(other, DHPrivateNumbers):
return NotImplemented
return (
self._x == other._x and
self._public_numbers == other._public_numbers
)
def __ne__(self, other):
return not self == other
def private_key(self, backend):
return backend.load_dh_private_numbers(self)
public_numbers = utils.read_only_property("_public_numbers")
x = utils.read_only_property("_x")
class DHPublicNumbers(object):
def __init__(self, y, parameter_numbers):
if not isinstance(y, six.integer_types):
raise TypeError("y must be an integer.")
if not isinstance(parameter_numbers, DHParameterNumbers):
raise TypeError(
"parameters must be an instance of DHParameterNumbers.")
self._y = y
self._parameter_numbers = parameter_numbers
def __eq__(self, other):
if not isinstance(other, DHPublicNumbers):
return NotImplemented
return (
self._y == other._y and
self._parameter_numbers == other._parameter_numbers
)
def __ne__(self, other):
return not self == other
def public_key(self, backend):
return backend.load_dh_public_numbers(self)
y = utils.read_only_property("_y")
parameter_numbers = utils.read_only_property("_parameter_numbers")
class DHParameterNumbers(object):
def __init__(self, p, g):
if (
not isinstance(p, six.integer_types) or
not isinstance(g, six.integer_types)
):
raise TypeError("p and g must be integers")
if g not in (2, 5):
raise ValueError("DH generator must be 2 or 5")
self._p = p
self._g = g
def __eq__(self, other):
if not isinstance(other, DHParameterNumbers):
return NotImplemented
return (
self._p == other._p and
self._g == other._g
)
def __ne__(self, other):
return not self == other
def parameters(self, backend):
return backend.load_dh_parameter_numbers(self)
p = utils.read_only_property("_p")
g = utils.read_only_property("_g")
@six.add_metaclass(abc.ABCMeta)
class DHParameters(object):
@abc.abstractmethod
def generate_private_key(self):
"""
Generates and returns a DHPrivateKey.
"""
@six.add_metaclass(abc.ABCMeta)
class DHParametersWithSerialization(DHParameters):
@abc.abstractmethod
def parameter_numbers(self):
"""
Returns a DHParameterNumbers.
"""
@six.add_metaclass(abc.ABCMeta)
class DHPrivateKey(object):
@abc.abstractproperty
def key_size(self):
"""
The bit length of the prime modulus.
"""
@abc.abstractmethod
def public_key(self):
"""
The DHPublicKey associated with this private key.
"""
@abc.abstractmethod
def parameters(self):
"""
The DHParameters object associated with this private key.
"""
@six.add_metaclass(abc.ABCMeta)
class DHPrivateKeyWithSerialization(DHPrivateKey):
@abc.abstractmethod
def private_numbers(self):
"""
Returns a DHPrivateNumbers.
"""
@abc.abstractmethod
def exchange(self, peer_public_key):
"""
Given peer's DHPublicKey, carry out the key exchange and
return shared key as bytes.
"""
@six.add_metaclass(abc.ABCMeta)
class DHPublicKey(object):
@abc.abstractproperty
def key_size(self):
"""
The bit length of the prime modulus.
"""
@abc.abstractmethod
def parameters(self):
"""
The DHParameters object associated with this public key.
"""
@six.add_metaclass(abc.ABCMeta)
class DHPublicKeyWithSerialization(DHPublicKey):
@abc.abstractmethod
def public_numbers(self):
"""
Returns a DHPublicNumbers.
"""
|
apache-2.0
|
romain-dartigues/ansible
|
lib/ansible/modules/network/f5/bigip_device_syslog.py
|
9
|
18823
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2018, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_device_syslog
short_description: Manage system-level syslog settings on BIG-IP
description:
- Manage system-level syslog settings on BIG-IP.
version_added: 2.8
options:
auth_priv_from:
description:
- Specifies the lowest level of messages about user authentication
to include in the system log.
choices:
- alert
- crit
- debug
- emerg
- err
- info
- notice
- warning
auth_priv_to:
description:
- Specifies the highest level of messages about user authentication
to include in the system log.
choices:
- alert
- crit
- debug
- emerg
- err
- info
- notice
- warning
console_log:
description:
- Enables or disables logging emergency syslog messages to the
console.
type: bool
cron_from:
description:
- Specifies the lowest level of messages about time-based scheduling
to include in the system log.
choices:
- alert
- crit
- debug
- emerg
- err
- info
- notice
- warning
cron_to:
description:
- Specifies the highest level of messages about time-based
scheduling to include in the system log.
choices:
- alert
- crit
- debug
- emerg
- err
- info
- notice
- warning
daemon_from:
description:
- Specifies the lowest level of messages about daemon performance to
include in the system log.
choices:
- alert
- crit
- debug
- emerg
- err
- info
- notice
- warning
daemon_to:
description:
- Specifies the highest level of messages about daemon performance
to include in the system log.
choices:
- alert
- crit
- debug
- emerg
- err
- info
- notice
- warning
include:
description:
- Syslog-NG configuration to include in the device syslog config.
iso_date:
description:
- Enables or disables the ISO date format for messages in the log
files.
type: bool
kern_from:
description:
- Specifies the lowest level of kernel messages to include in the
system log.
choices:
- alert
- crit
- debug
- emerg
- err
- info
- notice
- warning
kern_to:
description:
- Specifies the highest level of kernel messages to include in the
system log.
choices:
- alert
- crit
- debug
- emerg
- err
- info
- notice
- warning
local6_from:
description:
- Specifies the lowest error level for messages from the local6
facility to include in the log.
choices:
- alert
- crit
- debug
- emerg
- err
- info
- notice
- warning
local6_to:
description:
- Specifies the highest error level for messages from the local6
facility to include in the log.
choices:
- alert
- crit
- debug
- emerg
- err
- info
- notice
- warning
mail_from:
description:
- Specifies the lowest level of mail log messages to include in the
system log.
choices:
- alert
- crit
- debug
- emerg
- err
- info
- notice
- warning
mail_to:
description:
- Specifies the highest level of mail log messages to include in the
system log.
choices:
- alert
- crit
- debug
- emerg
- err
- info
- notice
- warning
messages_from:
description:
- Specifies the lowest level of system messages to include in the
system log.
choices:
- alert
- crit
- debug
- emerg
- err
- info
- notice
- warning
messages_to:
description:
- Specifies the highest level of system messages to include in the
system log.
choices:
- alert
- crit
- debug
- emerg
- err
- info
- notice
- warning
user_log_from:
description:
- Specifies the lowest level of user account messages to include in
the system log.
choices:
- alert
- crit
- debug
- emerg
- err
- info
- notice
- warning
user_log_to:
description:
- Specifies the highest level of user account messages to include in
the system log.
choices:
- alert
- crit
- debug
- emerg
- err
- info
- notice
- warning
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = r'''
- name: Create a ...
bigip_device_syslog:
name: foo
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
'''
RETURN = r'''
auth_priv_from:
description: The new lowest user authentication logging level
returned: changed
type: string
sample: alert
auth_priv_to:
description: The new highest user authentication logging level.
returned: changed
type: string
sample: emerg
console_log:
description: Whether logging to console is enabled or not.
returned: changed
type: bool
sample: yes
iso_date:
description: Whether ISO date format in logs is enabled or not
returned: changed
type: bool
sample: no
cron_from:
description: The new lowest time-based scheduling logging level.
returned: changed
type: string
sample: emerg
cron_to:
description: The new highest time-based scheduling logging level.
returned: changed
type: string
sample: alert
daemon_from:
description: The new lowest daemon performance logging level.
returned: changed
type: string
sample: alert
daemon_to:
description: The new highest daemon performance logging level.
returned: changed
type: string
sample: alert
include:
description: The new extra syslog-ng configuration to include in syslog config.
returned: changed
type: string
sample: "filter f_remote_syslog { not (facility(local6)) };"
kern_from:
description: The new lowest kernel messages logging level.
returned: changed
type: string
sample: alert
kern_to:
description: The new highest kernel messages logging level.
returned: changed
type: string
sample: alert
local6_from:
description: The new lowest local6 facility logging level.
returned: changed
type: string
sample: alert
local6_to:
description: The new highest local6 facility logging level.
returned: changed
type: string
sample: alert
mail_from:
description: The new lowest mail log logging level.
returned: changed
type: string
sample: alert
mail_to:
description: The new highest mail log logging level.
returned: changed
type: string
sample: alert
messages_from:
description: The new lowest system logging level.
returned: changed
type: string
sample: alert
messages_to:
description: The new highest system logging level.
returned: changed
type: string
sample: alert
user_log_from:
description: The new lowest user account logging level.
returned: changed
type: string
sample: alert
user_log_to:
description: The new highest user account logging level.
returned: changed
type: string
sample: alert
'''
from ansible.module_utils.basic import AnsibleModule
try:
from library.module_utils.network.f5.bigip import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import cleanup_tokens
from library.module_utils.network.f5.common import fq_name
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import exit_json
from library.module_utils.network.f5.common import fail_json
from library.module_utils.network.f5.common import flatten_boolean
from library.module_utils.network.f5.compare import cmp_str_with_none
except ImportError:
from ansible.module_utils.network.f5.bigip import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import cleanup_tokens
from ansible.module_utils.network.f5.common import fq_name
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import exit_json
from ansible.module_utils.network.f5.common import fail_json
from ansible.module_utils.network.f5.common import flatten_boolean
from ansible.module_utils.network.f5.compare import cmp_str_with_none
class Parameters(AnsibleF5Parameters):
api_map = {
'authPrivFrom': 'auth_priv_from',
'authPrivTo': 'auth_priv_to',
'consoleLog': 'console_log',
'cronFrom': 'cron_from',
'cronTo': 'cron_to',
'daemonFrom': 'daemon_from',
'daemonTo': 'daemon_to',
'isoDate': 'iso_date',
'kernFrom': 'kern_from',
'kernTo': 'kern_to',
'local6From': 'local6_from',
'local6To': 'local6_to',
'mailFrom': 'mail_from',
'mailTo': 'mail_to',
'messagesFrom': 'messages_from',
'messagesTo': 'messages_to',
'userLogFrom': 'user_log_from',
'userLogTo': 'user_log_to',
}
api_attributes = [
'include',
'authPrivFrom',
'authPrivTo',
'consoleLog',
'cronFrom',
'cronTo',
'daemonFrom',
'daemonTo',
'isoDate',
'kernFrom',
'kernTo',
'local6From',
'local6To',
'mailFrom',
'mailTo',
'messagesFrom',
'messagesTo',
'userLogFrom',
'userLogTo',
]
returnables = [
'include',
'auth_priv_from',
'auth_priv_to',
'console_log',
'cron_from',
'cron_to',
'daemon_from',
'daemon_to',
'iso_date',
'kern_from',
'kern_to',
'local6_from',
'local6_to',
'mail_from',
'mail_to',
'messages_from',
'messages_to',
'user_log_from',
'user_log_to',
]
updatables = [
'include',
'auth_priv_from',
'auth_priv_to',
'console_log',
'cron_from',
'cron_to',
'daemon_from',
'daemon_to',
'iso_date',
'kern_from',
'kern_to',
'local6_from',
'local6_to',
'mail_from',
'mail_to',
'messages_from',
'messages_to',
'user_log_from',
'user_log_to',
]
@property
def console_log(self):
return flatten_boolean(self._values['console_log'])
@property
def iso_date(self):
return flatten_boolean(self._values['iso_date'])
class ApiParameters(Parameters):
@property
def include(self):
if self._values['include'] in [None, 'none']:
return None
return self._values['include']
class ModuleParameters(Parameters):
@property
def include(self):
if self._values['include'] is None:
return None
if self._values['include'] in ['', 'none']:
return ''
return self._values['include'].replace('"', "'")
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
@property
def console_log(self):
if self._values['console_log'] is None:
return None
elif self._values['console_log'] == 'yes':
return 'enabled'
return 'disabled'
@property
def iso_date(self):
if self._values['iso_date'] is None:
return None
elif self._values['iso_date'] == 'yes':
return 'enabled'
return 'disabled'
class ReportableChanges(Changes):
@property
def console_log(self):
return flatten_boolean(self._values['console_log'])
@property
def iso_date(self):
return flatten_boolean(self._values['iso_date'])
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
@property
def include(self):
return cmp_str_with_none(self.want.include, self.have.include)
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = kwargs.get('client', None)
self.want = ModuleParameters(params=self.module.params)
self.have = ApiParameters()
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def exec_module(self):
result = dict()
changed = self.present()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.client.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def present(self):
return self.update()
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def update_on_device(self):
params = self.changes.api_params()
uri = "https://{0}:{1}/mgmt/tm/sys/syslog".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def read_current_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/sys/syslog".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return ApiParameters(params=response)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
levels = [
'alert', 'crit', 'debug', 'emerg', 'err', 'info', 'notice', 'warning'
]
argument_spec = dict(
auth_priv_from=dict(choices=levels),
auth_priv_to=dict(choices=levels),
console_log=dict(type='bool'),
cron_from=dict(choices=levels),
cron_to=dict(choices=levels),
daemon_from=dict(choices=levels),
daemon_to=dict(choices=levels),
include=dict(),
iso_date=dict(type='bool'),
kern_from=dict(choices=levels),
kern_to=dict(choices=levels),
local6_from=dict(choices=levels),
local6_to=dict(choices=levels),
mail_from=dict(choices=levels),
mail_to=dict(choices=levels),
messages_from=dict(choices=levels),
messages_to=dict(choices=levels),
user_log_from=dict(choices=levels),
user_log_to=dict(choices=levels),
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
)
client = F5RestClient(**module.params)
try:
mm = ModuleManager(module=module, client=client)
results = mm.exec_module()
cleanup_tokens(client)
exit_json(module, results, client)
except F5ModuleError as ex:
cleanup_tokens(client)
fail_json(module, ex, client)
if __name__ == '__main__':
main()
|
gpl-3.0
|
michalliu/OpenWrt-Firefly-Libraries
|
staging_dir/target-mipsel_1004kc+dsp_uClibc-0.9.33.2/usr/lib/python3.4/idlelib/idle_test/test_textview.py
|
79
|
2871
|
'''Test the functions and main class method of textView.py.
Since all methods and functions create (or destroy) a TextViewer, which
is a widget containing multiple widgets, all tests must be gui tests.
Using mock Text would not change this. Other mocks are used to retrieve
information about calls.
The coverage is essentially 100%.
'''
from test.support import requires
requires('gui')
import unittest
import os
from tkinter import Tk
from idlelib import textView as tv
from idlelib.idle_test.mock_idle import Func
from idlelib.idle_test.mock_tk import Mbox
def setUpModule():
global root
root = Tk()
def tearDownModule():
global root
root.destroy() # pyflakes falsely sees root as undefined
del root
class TV(tv.TextViewer): # used by TextViewTest
transient = Func()
grab_set = Func()
wait_window = Func()
class TextViewTest(unittest.TestCase):
def setUp(self):
TV.transient.__init__()
TV.grab_set.__init__()
TV.wait_window.__init__()
def test_init_modal(self):
view = TV(root, 'Title', 'test text')
self.assertTrue(TV.transient.called)
self.assertTrue(TV.grab_set.called)
self.assertTrue(TV.wait_window.called)
view.Ok()
def test_init_nonmodal(self):
view = TV(root, 'Title', 'test text', modal=False)
self.assertFalse(TV.transient.called)
self.assertFalse(TV.grab_set.called)
self.assertFalse(TV.wait_window.called)
view.Ok()
def test_ok(self):
view = TV(root, 'Title', 'test text', modal=False)
view.destroy = Func()
view.Ok()
self.assertTrue(view.destroy.called)
del view.destroy # unmask real function
view.destroy
class textviewTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.orig_mbox = tv.tkMessageBox
tv.tkMessageBox = Mbox
@classmethod
def tearDownClass(cls):
tv.tkMessageBox = cls.orig_mbox
del cls.orig_mbox
def test_view_text(self):
# If modal True, tkinter will error with 'can't invoke "event" command'
view = tv.view_text(root, 'Title', 'test text', modal=False)
self.assertIsInstance(view, tv.TextViewer)
def test_view_file(self):
test_dir = os.path.dirname(__file__)
testfile = os.path.join(test_dir, 'test_textview.py')
view = tv.view_file(root, 'Title', testfile, modal=False)
self.assertIsInstance(view, tv.TextViewer)
self.assertIn('Test', view.textView.get('1.0', '1.end'))
view.Ok()
# Mock messagebox will be used and view_file will not return anything
testfile = os.path.join(test_dir, '../notthere.py')
view = tv.view_file(root, 'Title', testfile, modal=False)
self.assertIsNone(view)
if __name__ == '__main__':
unittest.main(verbosity=2)
|
gpl-2.0
|
sublee/almost
|
almosttests.py
|
1
|
4697
|
# -*- coding: utf-8 -*-
import math
import sys
from pytest import deprecated_call
from almost import almost
def test_repeating_decimal():
assert almost(1 / 3.) == 0.333
assert almost(1 / 6.) == 0.167
assert almost(3227 / 555., prec=6) == 5.814414
def test_irrational_number():
assert almost(math.pi) == 3.142
assert almost(math.sqrt(2)) == 1.414
def test_special_number():
assert almost(float('inf')) == float('inf')
assert almost(float('nan')) == float('nan')
assert almost(float('inf')) != float('nan')
assert almost(float('inf')) != 12345
assert almost(float('nan')) != 12345
assert almost(float('-inf')) == -float('inf')
try:
assert almost(float('nan')) == float('-nan')
except ValueError:
if sys.subversion[0] != 'Jython':
raise
assert almost(float('nan')) == -float('nan')
def test_le_ge():
assert almost(1 / 3.) <= 0.333
assert almost(1 / 3.) >= 0.333
assert almost(1 / 6.) <= 0.167
assert almost(1 / 6.) >= 0.167
assert almost(3227 / 555., prec=6) <= 5.814414
assert almost(3227 / 555., prec=6) >= 5.814414
def test_lt_gt():
assert not almost(1 / 3.) < 0.333
assert not almost(1 / 3.) > 0.333
assert not almost(1 / 6.) < 0.167
assert not almost(1 / 6.) > 0.167
assert not almost(3227 / 555., prec=6) < 5.814414
assert not almost(3227 / 555., prec=6) > 5.814414
def test_ne():
assert not (almost(1 / 3.) != 0.333)
assert not (almost(1 / 6.) != 0.167)
assert not (almost(3227 / 555., prec=6) != 5.814414)
def test_pm_1():
assert almost(1.234) == 1.233
assert almost(1.234) == 1.235
def test_str():
assert almost('Hello') == 'Hello'
assert almost('Hello') != 'World'
def test_list():
import math
assert almost([math.pi, math.sqrt(2)]) == [3.142, 1.414]
assert almost([math.pi, 'abc', math.sqrt(2)]) == [3.142, 'abc', 1.414]
assert almost([math.pi, 'abc', math.sqrt(2)]) != [3.142, 'def', 1.414]
def test_dict():
import math
assert almost({'pi': math.pi, 'sqrt(2)': math.sqrt(2)}) == \
{'pi': 3.142, 'sqrt(2)': 1.414}
assert almost({'pi': math.pi, 'text': 'abc', 'sqrt(2)': math.sqrt(2)}) == \
{'pi': 3.142, 'text': 'abc', 'sqrt(2)': 1.414}
assert almost({'pi': math.pi, 'text': 'abc', 'sqrt(2)': math.sqrt(2)}) != \
{'pi': 3.142, 'text': 'def', 'sqrt(2)': 1.414}
def test_gen():
import math
assert almost(math.sqrt(x) for x in range(2, 5)) == [1.414, 1.732, 2]
def test_lt_gt_list():
import math
assert almost([math.pi, math.sqrt(2)]) < [3.142, 1.414, 1]
assert not (almost([math.pi, math.sqrt(2)]) > [3.142, 1.414, 1])
assert almost([math.pi, math.sqrt(2)]) > [3.142, 1.314, 1]
assert not (almost([math.pi, math.sqrt(2)]) < [3.142, 1.314, 1])
def test_recursive_list():
import math
assert almost([[math.pi], [math.sqrt(2)]]) == [[3.142], [1.414]]
assert almost([[math.pi], ['abc', math.sqrt(2)]]) == \
[[3.142], ['abc', 1.414]]
assert almost([[math.pi, 'abc'], [math.sqrt(2)]]) != \
[[3.142, 'def'], [1.414]]
assert almost([[1], [2]]) <= [[1], [2]]
assert not (almost([[1], [2]]) < [[1], [2]])
assert almost([[1], [2]]) >= [[1], [2]]
assert not (almost([[1], [2]]) > [[1], [2]])
assert not (almost([[1], [2]]) != [[1], [2]])
def test_ellipsis():
assert almost('Hello, world') == 'Hello, ...'
assert almost('Hello, ...') == 'Hello, world'
assert almost('..., ...') == 'Hello, world'
assert almost('..., ...') == '..., world'
assert almost('..., ...') == '..., ...'
assert almost('...') == 'Hello, world'
assert 'world' in almost('Hello, world')
assert 'earth' not in almost('Hello, world')
assert 'He...' in almost('Hello, world')
assert '...ld' in almost('Hello, world')
assert 'o, wo' in almost('Hello, world')
assert 'world' in almost('Hello, ...')
assert 'angel' in almost('Hello, ...')
assert 'world' not in almost('Hello, ..')
assert almost([['Hello, ...'], ['..., world']]) == \
[['Hello, world'], ['Hello, world']]
assert almost([['Hello, ...'], ['..., world']]) != \
[['Bye, world'], ['Hello, world']]
def test_random_text():
import random
def gen_text_with_prefix(prefix):
return prefix + str(random.random())[:-5]
assert almost(gen_text_with_prefix('@')) == '@...'
def test_deprecated():
deprecated_call(almost, 1, precision=2)
def test_negative_prec():
assert almost(10000, prec=-2) == 10099
assert almost(10000, prec=-2) == 9900
assert almost(10000, prec=-2) == 10101
assert almost(10000, prec=-2) != 9800
|
bsd-3-clause
|
kingvuplus/ME-TEST2
|
lib/python/Plugins/SystemPlugins/SkinSelector/plugin.py
|
10
|
4102
|
# -*- coding: iso-8859-1 -*-
# (c) 2006 Stephan Reichholf
# This Software is Free, use it where you want, when you want for whatever you want and modify it if you want but don't remove my copyright!
from Screens.Screen import Screen
from Screens.Standby import TryQuitMainloop
from Screens.MessageBox import MessageBox
from Components.ActionMap import NumberActionMap
from Components.Pixmap import Pixmap
from Components.Sources.StaticText import StaticText
from Components.MenuList import MenuList
from Plugins.Plugin import PluginDescriptor
from Components.config import config
from Tools.Directories import resolveFilename, SCOPE_PLUGINS
from enigma import eEnv
import os
SKINXML = "skin.xml"
DEFAULTSKIN = "<Default Skin>"
class SkinSelector(Screen):
skinlist = []
root = os.path.join(eEnv.resolve("${datadir}"),"enigma2")
def __init__(self, session, args = None):
Screen.__init__(self, session)
self.setTitle(_("Select your Skin"))
self.skinlist = []
self.previewPath = ""
if os.path.exists(os.path.join(self.root, SKINXML)):
self.skinlist.append(DEFAULTSKIN)
for root, dirs, files in os.walk(self.root, followlinks=True):
for subdir in dirs:
dir = os.path.join(root,subdir)
if os.path.exists(os.path.join(dir,SKINXML)):
self.skinlist.append(subdir)
dirs = []
self["key_red"] = StaticText(_("Close"))
self["introduction"] = StaticText(_("Press OK to activate the selected skin."))
self.skinlist.sort()
self["SkinList"] = MenuList(self.skinlist)
self["Preview"] = Pixmap()
self["actions"] = NumberActionMap(["WizardActions", "InputActions", "EPGSelectActions"],
{
"ok": self.ok,
"back": self.close,
"red": self.close,
"up": self.up,
"down": self.down,
"left": self.left,
"right": self.right,
"info": self.info,
}, -1)
self.onLayoutFinish.append(self.layoutFinished)
def layoutFinished(self):
tmp = config.skin.primary_skin.value.find("/"+SKINXML)
if tmp != -1:
tmp = config.skin.primary_skin.value[:tmp]
idx = 0
for skin in self.skinlist:
if skin == tmp:
break
idx += 1
if idx < len(self.skinlist):
self["SkinList"].moveToIndex(idx)
self.loadPreview()
def up(self):
self["SkinList"].up()
self.loadPreview()
def down(self):
self["SkinList"].down()
self.loadPreview()
def left(self):
self["SkinList"].pageUp()
self.loadPreview()
def right(self):
self["SkinList"].pageDown()
self.loadPreview()
def info(self):
aboutbox = self.session.open(MessageBox,_("Enigma2 skin selector"), MessageBox.TYPE_INFO)
aboutbox.setTitle(_("About..."))
def ok(self):
if self["SkinList"].getCurrent() == DEFAULTSKIN:
self.skinfile = "."
else:
self.skinfile = self["SkinList"].getCurrent()
self.skinfile = os.path.join(self.skinfile, SKINXML)
print "Skinselector: Selected Skin: "+self.root+self.skinfile
restartbox = self.session.openWithCallback(self.restartGUI,MessageBox,_("GUI needs a restart to apply a new skin\nDo you want to restart the GUI now?"), MessageBox.TYPE_YESNO)
restartbox.setTitle(_("Restart GUI now?"))
def loadPreview(self):
if self["SkinList"].getCurrent() == DEFAULTSKIN:
pngpath = "."
else:
pngpath = self["SkinList"].getCurrent()
pngpath = os.path.join(os.path.join(self.root, pngpath), "prev.png")
if not os.path.exists(pngpath):
pngpath = resolveFilename(SCOPE_PLUGINS, "SystemPlugins/SkinSelector/noprev.png")
if self.previewPath != pngpath:
self.previewPath = pngpath
self["Preview"].instance.setPixmapFromFile(self.previewPath)
def restartGUI(self, answer):
if answer is True:
config.skin.primary_skin.value = self.skinfile
config.skin.primary_skin.save()
self.session.open(TryQuitMainloop, 3)
def SkinSelMain(session, **kwargs):
session.open(SkinSelector)
def SkinSelSetup(menuid, **kwargs):
if menuid == "system":
return [(_("Skin"), SkinSelMain, "skin_selector", None)]
else:
return []
def Plugins(**kwargs):
return PluginDescriptor(name="Skin", description= _("Select your Skin"), where = PluginDescriptor.WHERE_MENU, needsRestart = False, fnc=SkinSelSetup)
|
gpl-2.0
|
BoGoEngine/ibus-bogo
|
ibus_engine/base_config.py
|
1
|
3649
|
#
# This file is part of ibus-bogo project.
#
# Copyright (C) 2012 Long T. Dam <longdt90@gmail.com>
# Copyright (C) 2012-2013 Trung Ngo <ndtrung4419@gmail.com>
# Copyright (C) 2013 Duong H. Nguyen <cmpitg@gmail.com>
#
# ibus-bogo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ibus-bogo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ibus-bogo. If not, see <http://www.gnu.org/licenses/>.
#
from collections import defaultdict
import logging
import json
import os
import bogo
# TODO: This module needs some tests
ENGINE_DIR = os.path.dirname(__file__)
IBUS_BOGO_DEFAULT_CONFIG = {
"input-method": "telex",
"output-charset": "utf-8",
"telex-w-shorthand": True,
"telex-brackets-shorthand": True,
"skip-non-vietnamese": True,
"enable-text-expansion": False,
"auto-capitalize-expansion": False,
"surrounding-text-blacklist": [
"chrome",
"chromium",
"compiz",
"gnome-terminal",
"lxterminal",
"konsole",
"geany",
"skype"
],
"typo-correction-level": 2,
"typo-correction-threshold": 1
}
# TODO: It's best if we can preserve comments and line order
class BaseConfig(object):
"""Base config object, designed to behave like a dictionary.
"""
def __init__(self, path):
super(BaseConfig, self).__init__()
self._keys = {}
self.path = path
self.read_default_config()
self.read_config(path)
# Write immediately because the default config
# may introduce a new key
self.write_config()
def read_config(self, path):
try:
f = open(path, "r")
data = json.loads(f.read())
self._keys.update(data)
f.close()
except:
logging.warning("Config file corrupted or doesn't exist.")
self.reset()
finally:
# FIXME: What is this code for?
tmp = self._keys
self._keys.update(tmp)
def write_config(self):
f = open(self.path, "w")
f.write(json.dumps(self._keys,
indent=4,
ensure_ascii=False,
sort_keys=True))
f.close()
def __setitem__(self, key, value):
self._keys[key] = value
self.write_config()
def __getitem__(self, key):
if key == "input-method-definition":
return defaultdict(dict, {
"vni": bogo.get_vni_definition(),
"telex": bogo.get_telex_definition(
self._keys["telex-w-shorthand"],
self._keys["telex-brackets-shorthand"])
})[self._keys["input-method"]]
else:
return self._keys[key]
def __contains__(self, key):
return self._keys.__contains__(key)
def items(self):
return self._keys.items()
def iteritems(self):
return self._keys.iteritems()
def keys(self):
return self._keys.keys()
def read_default_config(self):
self._keys.update(IBUS_BOGO_DEFAULT_CONFIG)
def reset(self):
self._keys = {}
self.read_default_config()
self.write_config()
|
gpl-3.0
|
PyBossa/pybossa
|
test/test_sched_2.py
|
1
|
3260
|
# -*- coding: utf8 -*-
# This file is part of PYBOSSA.
#
# Copyright (C) 2015 Scifabric LTD.
#
# PYBOSSA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PYBOSSA is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PYBOSSA. If not, see <http://www.gnu.org/licenses/>.
from helper import sched
from default import with_context
import json
from mock import patch
class TestSched(sched.Helper):
def setUp(self):
super(TestSched, self).setUp()
self.endpoints = ['project', 'task', 'taskrun']
# Tests
@with_context
@patch('pybossa.api.task_run.request')
def test_incremental_tasks(self, mock_request):
""" Test incremental SCHED strategy - second TaskRun receives first given answer"""
self.create_2(sched='incremental')
mock_request.remote_addr = '127.0.0.0'
# Del previous TaskRuns
self.del_task_runs()
# Register
self.register(fullname="John Doe", name="johndoe", password="p4ssw0rd")
self.signout()
self.register(fullname="Marie Doe", name="mariedoe", password="dr0wss4p")
self.signout()
self.signin()
# Get the only task with no runs!
res = self.app.get('api/project/1/newtask')
data = json.loads(res.data)
print "Task:%s" % data['id']
# Check that we received a clean Task
assert data.get('info'), data
assert not data.get('info').get('last_answer')
# Submit an Answer for the assigned task
tr = dict(project_id=data['project_id'], task_id=data['id'], info={'answer': 'No'})
tr = json.dumps(tr)
res = self.app.post('/api/taskrun', data=tr)
# No more tasks available for this user!
res = self.app.get('api/project/1/newtask')
data = json.loads(res.data)
assert not data, data
#### Get the only task now with an answer as Anonimous!
self.signout()
res = self.app.get('api/project/1/newtask')
data = json.loads(res.data)
# Check that we received a Task with answer
assert data.get('info'), data
assert data.get('info').get('last_answer').get('answer') == 'No'
# Submit a second Answer as Anonimous
tr = dict(project_id=data['project_id'], task_id=data['id'],
info={'answer': 'No No'})
tr = json.dumps(tr)
self.app.post('/api/taskrun', data=tr)
#### Get the only task now with an answer as User2!
self.signin(email="mariedoe@example.com", password="dr0wss4p")
res = self.app.get('api/project/1/newtask')
data = json.loads(res.data)
# Check that we received a Task with answer
assert data.get('info'), data
assert data.get('info').get('last_answer').get('answer') == 'No No'
|
agpl-3.0
|
genenetwork/genenetwork2_diet
|
wqflask/wqflask/heatmap/heatmap.py
|
1
|
12293
|
from __future__ import absolute_import, print_function, division
import sys
sys.path.append(".")
import gc
import string
import cPickle
import os
import datetime
import time
import pp
import math
import collections
import resource
import scipy
import numpy as np
from scipy import linalg
from pprint import pformat as pf
from htmlgen import HTMLgen2 as HT
import reaper
from base.trait import GeneralTrait
from base import data_set
from base import species
# from wqflask.my_pylmm.pyLMM import lmm
# from wqflask.my_pylmm.pyLMM import input
from utility import helper_functions
from utility import Plot, Bunch
from utility import temp_data
from utility.tools import PYLMM_COMMAND
from MySQLdb import escape_string as escape
import cPickle as pickle
import simplejson as json
from pprint import pformat as pf
from redis import Redis
Redis = Redis()
from flask import Flask, g
class Heatmap(object):
def __init__(self, start_vars, temp_uuid):
trait_db_list = [trait.strip() for trait in start_vars['trait_list'].split(',')]
helper_functions.get_trait_db_obs(self, trait_db_list)
self.temp_uuid = temp_uuid
self.num_permutations = 5000
self.dataset = self.trait_list[0][1]
self.json_data = {} #The dictionary that will be used to create the json object that contains all the data needed to create the figure
self.all_sample_list = []
self.traits = []
chrnames = []
self.species = species.TheSpecies(dataset=self.trait_list[0][1])
for key in self.species.chromosomes.chromosomes.keys():
chrnames.append([self.species.chromosomes.chromosomes[key].name, self.species.chromosomes.chromosomes[key].mb_length])
for trait_db in self.trait_list:
this_trait = trait_db[0]
self.traits.append(this_trait.name)
this_sample_data = this_trait.data
for sample in this_sample_data:
if sample not in self.all_sample_list:
self.all_sample_list.append(sample)
self.sample_data = []
for trait_db in self.trait_list:
this_trait = trait_db[0]
this_sample_data = this_trait.data
#self.sample_data[this_trait.name] = []
this_trait_vals = []
for sample in self.all_sample_list:
if sample in this_sample_data:
this_trait_vals.append(this_sample_data[sample].value)
#self.sample_data[this_trait.name].append(this_sample_data[sample].value)
else:
this_trait_vals.append('')
#self.sample_data[this_trait.name].append('')
self.sample_data.append(this_trait_vals)
self.gen_reaper_results()
#self.gen_pylmm_results()
#chrnames = []
lodnames = []
chr_pos = []
pos = []
markernames = []
for trait in self.trait_results.keys():
lodnames.append(trait)
for marker in self.dataset.group.markers.markers:
#if marker['chr'] not in chrnames:
# chr_ob = [marker['chr'], "filler"]
# chrnames.append(chr_ob)
chr_pos.append(marker['chr'])
pos.append(marker['Mb'])
markernames.append(marker['name'])
self.json_data['chrnames'] = chrnames
self.json_data['lodnames'] = lodnames
self.json_data['chr'] = chr_pos
self.json_data['pos'] = pos
self.json_data['markernames'] = markernames
for trait in self.trait_results:
self.json_data[trait] = self.trait_results[trait]
self.js_data = dict(
json_data = self.json_data
)
print("self.js_data:", self.js_data)
def gen_reaper_results(self):
self.trait_results = {}
for trait_db in self.trait_list:
self.dataset.group.get_markers()
this_trait = trait_db[0]
#this_db = trait_db[1]
genotype = self.dataset.group.read_genotype_file()
samples, values, variances = this_trait.export_informative()
trimmed_samples = []
trimmed_values = []
for i in range(0, len(samples)):
if samples[i] in self.dataset.group.samplelist:
trimmed_samples.append(samples[i])
trimmed_values.append(values[i])
self.lrs_array = genotype.permutation(strains = trimmed_samples,
trait = trimmed_values,
nperm= self.num_permutations)
#self.suggestive = self.lrs_array[int(self.num_permutations*0.37-1)]
#self.significant = self.lrs_array[int(self.num_permutations*0.95-1)]
reaper_results = genotype.regression(strains = trimmed_samples,
trait = trimmed_values)
lrs_values = [float(qtl.lrs) for qtl in reaper_results]
print("lrs_values:", lrs_values)
#self.dataset.group.markers.add_pvalues(p_values)
self.trait_results[this_trait.name] = []
for qtl in reaper_results:
if qtl.additive > 0:
self.trait_results[this_trait.name].append(-float(qtl.lrs))
else:
self.trait_results[this_trait.name].append(float(qtl.lrs))
#for lrs in lrs_values:
# if
# self.trait_results[this_trait.name].append(lrs)
#this_db_samples = self.dataset.group.samplelist
#this_sample_data = this_trait.data
##print("this_sample_data", this_sample_data)
#this_trait_vals = []
#for index, sample in enumerate(this_db_samples):
# if sample in this_sample_data:
# sample_value = this_sample_data[sample].value
# this_trait_vals.append(sample_value)
# else:
# this_trait_vals.append("x")
#pheno_vector = np.array([val == "x" and np.nan or float(val) for val in this_trait_vals])
#key = "pylmm:input:" + str(self.temp_uuid)
#print("key is:", pf(key))
#genotype_data = [marker['genotypes'] for marker in self.dataset.group.markers.markers]
#no_val_samples = self.identify_empty_samples(this_trait_vals)
#trimmed_genotype_data = self.trim_genotypes(genotype_data, no_val_samples)
#genotype_matrix = np.array(trimmed_genotype_data).T
#print("genotype_matrix:", str(genotype_matrix.tolist()))
#print("pheno_vector:", str(pheno_vector.tolist()))
#params = dict(pheno_vector = pheno_vector.tolist(),
# genotype_matrix = genotype_matrix.tolist(),
# restricted_max_likelihood = True,
# refit = False,
# temp_uuid = str(self.temp_uuid),
#
# # meta data
# timestamp = datetime.datetime.now().isoformat(),
# )
#
#json_params = json.dumps(params)
##print("json_params:", json_params)
#Redis.set(key, json_params)
#Redis.expire(key, 60*60)
#print("before printing command")
#
#command = 'python lmm.py --key {} --species {}'.format(key,
# "other")
#print("command is:", command)
#print("after printing command")
#
#os.system(command)
#
#json_results = Redis.blpop("pylmm:results:" + str(self.temp_uuid), 45*60)
def gen_pylmm_results(self):
# This function is NOT used. If it is, we should use a shared function with marker_regression.py
self.trait_results = {}
for trait_db in self.trait_list:
this_trait = trait_db[0]
#this_db = trait_db[1]
self.dataset.group.get_markers()
this_db_samples = self.dataset.group.samplelist
this_sample_data = this_trait.data
#print("this_sample_data", this_sample_data)
this_trait_vals = []
for index, sample in enumerate(this_db_samples):
if sample in this_sample_data:
sample_value = this_sample_data[sample].value
this_trait_vals.append(sample_value)
else:
this_trait_vals.append("x")
pheno_vector = np.array([val == "x" and np.nan or float(val) for val in this_trait_vals])
key = "pylmm:input:" + str(self.temp_uuid)
#print("key is:", pf(key))
genotype_data = [marker['genotypes'] for marker in self.dataset.group.markers.markers]
no_val_samples = self.identify_empty_samples(this_trait_vals)
trimmed_genotype_data = self.trim_genotypes(genotype_data, no_val_samples)
genotype_matrix = np.array(trimmed_genotype_data).T
#print("genotype_matrix:", str(genotype_matrix.tolist()))
#print("pheno_vector:", str(pheno_vector.tolist()))
params = dict(pheno_vector = pheno_vector.tolist(),
genotype_matrix = genotype_matrix.tolist(),
restricted_max_likelihood = True,
refit = False,
temp_uuid = str(self.temp_uuid),
# meta data
timestamp = datetime.datetime.now().isoformat(),
)
json_params = json.dumps(params)
#print("json_params:", json_params)
Redis.set(key, json_params)
Redis.expire(key, 60*60)
print("before printing command")
command = PYLMM_COMMAND+' --key {} --species {}'.format(key,
"other")
print("command is:", command)
print("after printing command")
os.system(command)
json_results = Redis.blpop("pylmm:results:" + str(self.temp_uuid), 45*60)
results = json.loads(json_results[1])
p_values = [float(result) for result in results['p_values']]
#print("p_values:", p_values)
self.dataset.group.markers.add_pvalues(p_values)
self.trait_results[this_trait.name] = []
for marker in self.dataset.group.markers.markers:
self.trait_results[this_trait.name].append(marker['lod_score'])
def identify_empty_samples(self, values):
no_val_samples = []
for sample_count, val in enumerate(values):
if val == "x":
no_val_samples.append(sample_count)
return no_val_samples
def trim_genotypes(self, genotype_data, no_value_samples):
trimmed_genotype_data = []
for marker in genotype_data:
new_genotypes = []
for item_count, genotype in enumerate(marker):
if item_count in no_value_samples:
continue
try:
genotype = float(genotype)
except ValueError:
genotype = np.nan
pass
new_genotypes.append(genotype)
trimmed_genotype_data.append(new_genotypes)
return trimmed_genotype_data
|
agpl-3.0
|
vadimtk/chrome4sdp
|
third_party/WebKit/Tools/Scripts/webkitpy/style/checkers/jsonchecker_unittest.py
|
51
|
4991
|
# Copyright (C) 2010 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for jsonchecker.py."""
import unittest
import jsonchecker
class MockErrorHandler(object):
def __init__(self, handle_style_error):
self.turned_off_filtering = False
self._handle_style_error = handle_style_error
def turn_off_line_filtering(self):
self.turned_off_filtering = True
def __call__(self, line_number, category, confidence, message):
self._handle_style_error(self, line_number, category, confidence, message)
return True
class JSONCheckerTest(unittest.TestCase):
"""Tests JSONChecker class."""
def test_line_number_from_json_exception(self):
tests = (
(0, 'No JSON object could be decoded'),
(2, 'Expecting property name: line 2 column 1 (char 2)'),
(3, 'Expecting object: line 3 column 1 (char 15)'),
(9, 'Expecting property name: line 9 column 21 (char 478)'),
)
for expected_line, message in tests:
self.assertEqual(expected_line, jsonchecker.JSONChecker.line_number_from_json_exception(ValueError(message)))
def assert_no_error(self, json_data):
def handle_style_error(mock_error_handler, line_number, category, confidence, message):
self.fail('Unexpected error: %d %s %d %s' % (line_number, category, confidence, message))
error_handler = MockErrorHandler(handle_style_error)
checker = jsonchecker.JSONChecker('foo.json', error_handler)
checker.check(json_data.split('\n'))
self.assertTrue(error_handler.turned_off_filtering)
def assert_error(self, expected_line_number, expected_category, json_data):
def handle_style_error(mock_error_handler, line_number, category, confidence, message):
mock_error_handler.had_error = True
self.assertEqual(expected_line_number, line_number)
self.assertEqual(expected_category, category)
self.assertIn(category, jsonchecker.JSONChecker.categories)
error_handler = MockErrorHandler(handle_style_error)
error_handler.had_error = False
checker = jsonchecker.JSONChecker('foo.json', error_handler)
checker.check(json_data.split('\n'))
self.assertTrue(error_handler.had_error)
self.assertTrue(error_handler.turned_off_filtering)
def mock_handle_style_error(self):
pass
def test_conflict_marker(self):
self.assert_error(0, 'json/syntax', '<<<<<<< HEAD\n{\n}\n')
def test_single_quote(self):
self.assert_error(2, 'json/syntax', "{\n'slaves': []\n}\n")
def test_init(self):
error_handler = MockErrorHandler(self.mock_handle_style_error)
checker = jsonchecker.JSONChecker('foo.json', error_handler)
self.assertEqual(checker._handle_style_error, error_handler)
def test_no_error(self):
self.assert_no_error("""{
"slaves": [ { "name": "test-slave", "platform": "*" },
{ "name": "apple-xserve-4", "platform": "mac-snowleopard" }
],
"builders": [ { "name": "SnowLeopard Intel Release (Build)", "type": "Build", "builddir": "snowleopard-intel-release",
"platform": "mac-snowleopard", "configuration": "release", "architectures": ["x86_64"],
"slavenames": ["apple-xserve-4"]
}
],
"schedulers": [ { "type": "PlatformSpecificScheduler", "platform": "mac-snowleopard", "branch": "trunk", "treeStableTimer": 45.0,
"builderNames": ["SnowLeopard Intel Release (Build)", "SnowLeopard Intel Debug (Build)"]
}
]
}
""")
|
bsd-3-clause
|
tjakway/digital-ant-farm
|
lib/gtest-1.7.0/test/gtest_env_var_test.py
|
2408
|
3487
|
#!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that Google Test correctly parses environment variables."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import gtest_test_utils
IS_WINDOWS = os.name == 'nt'
IS_LINUX = os.name == 'posix' and os.uname()[0] == 'Linux'
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_env_var_test_')
environ = os.environ.copy()
def AssertEq(expected, actual):
if expected != actual:
print 'Expected: %s' % (expected,)
print ' Actual: %s' % (actual,)
raise AssertionError
def SetEnvVar(env_var, value):
"""Sets the env variable to 'value'; unsets it when 'value' is None."""
if value is not None:
environ[env_var] = value
elif env_var in environ:
del environ[env_var]
def GetFlag(flag):
"""Runs gtest_env_var_test_ and returns its output."""
args = [COMMAND]
if flag is not None:
args += [flag]
return gtest_test_utils.Subprocess(args, env=environ).output
def TestFlag(flag, test_val, default_val):
"""Verifies that the given flag is affected by the corresponding env var."""
env_var = 'GTEST_' + flag.upper()
SetEnvVar(env_var, test_val)
AssertEq(test_val, GetFlag(flag))
SetEnvVar(env_var, None)
AssertEq(default_val, GetFlag(flag))
class GTestEnvVarTest(gtest_test_utils.TestCase):
def testEnvVarAffectsFlag(self):
"""Tests that environment variable should affect the corresponding flag."""
TestFlag('break_on_failure', '1', '0')
TestFlag('color', 'yes', 'auto')
TestFlag('filter', 'FooTest.Bar', '*')
TestFlag('output', 'xml:tmp/foo.xml', '')
TestFlag('print_time', '0', '1')
TestFlag('repeat', '999', '1')
TestFlag('throw_on_failure', '1', '0')
TestFlag('death_test_style', 'threadsafe', 'fast')
TestFlag('catch_exceptions', '0', '1')
if IS_LINUX:
TestFlag('death_test_use_fork', '1', '0')
TestFlag('stack_trace_depth', '0', '100')
if __name__ == '__main__':
gtest_test_utils.Main()
|
mit
|
dmchoull/cdeploy
|
cdeploy/migrator.py
|
1
|
3928
|
import os
import sys
import yaml
from cassandra.cluster import Cluster
from cqlexecutor import CQLExecutor
class Migrator:
def __init__(self, migrations_path, session):
print('Reading migrations from {0}'.format(migrations_path))
self.migrations_path = migrations_path
self.session = session
def run_migrations(self):
CQLExecutor.init_table(self.session)
top_version = self.get_top_version()
new_migration_filter = \
lambda f: os.path.isfile(os.path.join(self.migrations_path, f)) and self.migration_version(f) > top_version
new_migrations = self.filter_migrations(new_migration_filter)
[self.apply_migration(file_name) for file_name in new_migrations]
def undo(self):
top_version = self.get_top_version()
if top_version == 0:
return
top_version_filter = \
lambda f: os.path.isfile(os.path.join(self.migrations_path, f)) and self.migration_version(f) == top_version
top_migration = self.filter_migrations(top_version_filter)[0]
CQLExecutor.execute_undo(self.session, self.read_migration(top_migration))
CQLExecutor.rollback_schema_migration(self.session)
print(' -> Migration {0} undone ({1})\n'.format(top_version, top_migration))
def get_top_version(self):
result = CQLExecutor.get_top_version(self.session)
top_version = result[0].version if len(result) > 0 else 0
print('Current version is {0}'.format(top_version))
return top_version
def filter_migrations(self, filter_func):
migration_dir_listing = os.listdir(self.migrations_path)
return filter(
filter_func,
migration_dir_listing)
def migration_version(self, file_name):
return int(file_name.split('_')[0])
def apply_migration(self, file_name):
migration_script = self.read_migration(file_name)
version = self.migration_version(file_name)
CQLExecutor.execute(self.session, migration_script)
CQLExecutor.add_schema_migration(self.session, version)
print(' -> Migration {0} applied ({1})\n'.format(version, file_name))
def read_migration(self, file_name):
migration_file = open(os.path.join(self.migrations_path, file_name))
return migration_file.read()
DEFAULT_MIGRATIONS_PATH = './migrations'
CONFIG_FILE_PATH = 'config/cassandra.yml'
def main():
if '--help' in sys.argv or '-h' in sys.argv:
print 'Usage: cdeploy [path/to/migrations] [--undo]'
return
undo = False
if '--undo' in sys.argv:
undo = True
sys.argv.remove('--undo')
migrations_path = DEFAULT_MIGRATIONS_PATH if len(sys.argv) == 1 else sys.argv[1]
if invalid_migrations_dir(migrations_path) or missing_config(migrations_path):
return
config = load_config(migrations_path, os.getenv('ENV'))
cluster = Cluster(config['hosts'])
session = cluster.connect(config['keyspace'])
migrator = Migrator(migrations_path, session)
if undo:
migrator.undo()
else:
migrator.run_migrations()
def invalid_migrations_dir(migrations_path):
if not os.path.isdir(migrations_path):
print '"{0}" is not a directory'.format(migrations_path)
return True
else:
return False
def missing_config(migrations_path):
config_path = config_file_path(migrations_path)
if not os.path.exists(os.path.join(config_path)):
print 'Missing configuration file "{0}"'.format(config_path)
return True
else:
return False
def config_file_path(migrations_path):
return os.path.join(migrations_path, CONFIG_FILE_PATH)
def load_config(migrations_path, env):
config_file = open(config_file_path(migrations_path))
config = yaml.load(config_file)
return config[env or 'development']
if __name__ == '__main__':
main()
|
apache-2.0
|
walke469/spartahack-17
|
ballotbuilder/lib/python3.5/site-packages/werkzeug/debug/console.py
|
256
|
5599
|
# -*- coding: utf-8 -*-
"""
werkzeug.debug.console
~~~~~~~~~~~~~~~~~~~~~~
Interactive console support.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD.
"""
import sys
import code
from types import CodeType
from werkzeug.utils import escape
from werkzeug.local import Local
from werkzeug.debug.repr import debug_repr, dump, helper
_local = Local()
class HTMLStringO(object):
"""A StringO version that HTML escapes on write."""
def __init__(self):
self._buffer = []
def isatty(self):
return False
def close(self):
pass
def flush(self):
pass
def seek(self, n, mode=0):
pass
def readline(self):
if len(self._buffer) == 0:
return ''
ret = self._buffer[0]
del self._buffer[0]
return ret
def reset(self):
val = ''.join(self._buffer)
del self._buffer[:]
return val
def _write(self, x):
if isinstance(x, bytes):
x = x.decode('utf-8', 'replace')
self._buffer.append(x)
def write(self, x):
self._write(escape(x))
def writelines(self, x):
self._write(escape(''.join(x)))
class ThreadedStream(object):
"""Thread-local wrapper for sys.stdout for the interactive console."""
def push():
if not isinstance(sys.stdout, ThreadedStream):
sys.stdout = ThreadedStream()
_local.stream = HTMLStringO()
push = staticmethod(push)
def fetch():
try:
stream = _local.stream
except AttributeError:
return ''
return stream.reset()
fetch = staticmethod(fetch)
def displayhook(obj):
try:
stream = _local.stream
except AttributeError:
return _displayhook(obj)
# stream._write bypasses escaping as debug_repr is
# already generating HTML for us.
if obj is not None:
_local._current_ipy.locals['_'] = obj
stream._write(debug_repr(obj))
displayhook = staticmethod(displayhook)
def __setattr__(self, name, value):
raise AttributeError('read only attribute %s' % name)
def __dir__(self):
return dir(sys.__stdout__)
def __getattribute__(self, name):
if name == '__members__':
return dir(sys.__stdout__)
try:
stream = _local.stream
except AttributeError:
stream = sys.__stdout__
return getattr(stream, name)
def __repr__(self):
return repr(sys.__stdout__)
# add the threaded stream as display hook
_displayhook = sys.displayhook
sys.displayhook = ThreadedStream.displayhook
class _ConsoleLoader(object):
def __init__(self):
self._storage = {}
def register(self, code, source):
self._storage[id(code)] = source
# register code objects of wrapped functions too.
for var in code.co_consts:
if isinstance(var, CodeType):
self._storage[id(var)] = source
def get_source_by_code(self, code):
try:
return self._storage[id(code)]
except KeyError:
pass
def _wrap_compiler(console):
compile = console.compile
def func(source, filename, symbol):
code = compile(source, filename, symbol)
console.loader.register(code, source)
return code
console.compile = func
class _InteractiveConsole(code.InteractiveInterpreter):
def __init__(self, globals, locals):
code.InteractiveInterpreter.__init__(self, locals)
self.globals = dict(globals)
self.globals['dump'] = dump
self.globals['help'] = helper
self.globals['__loader__'] = self.loader = _ConsoleLoader()
self.more = False
self.buffer = []
_wrap_compiler(self)
def runsource(self, source):
source = source.rstrip() + '\n'
ThreadedStream.push()
prompt = self.more and '... ' or '>>> '
try:
source_to_eval = ''.join(self.buffer + [source])
if code.InteractiveInterpreter.runsource(self,
source_to_eval, '<debugger>', 'single'):
self.more = True
self.buffer.append(source)
else:
self.more = False
del self.buffer[:]
finally:
output = ThreadedStream.fetch()
return prompt + source + output
def runcode(self, code):
try:
eval(code, self.globals, self.locals)
except Exception:
self.showtraceback()
def showtraceback(self):
from werkzeug.debug.tbtools import get_current_traceback
tb = get_current_traceback(skip=1)
sys.stdout._write(tb.render_summary())
def showsyntaxerror(self, filename=None):
from werkzeug.debug.tbtools import get_current_traceback
tb = get_current_traceback(skip=4)
sys.stdout._write(tb.render_summary())
def write(self, data):
sys.stdout.write(data)
class Console(object):
"""An interactive console."""
def __init__(self, globals=None, locals=None):
if locals is None:
locals = {}
if globals is None:
globals = {}
self._ipy = _InteractiveConsole(globals, locals)
def eval(self, code):
_local._current_ipy = self._ipy
old_sys_stdout = sys.stdout
try:
return self._ipy.runsource(code)
finally:
sys.stdout = old_sys_stdout
|
bsd-2-clause
|
cloudera/hue
|
desktop/core/ext-py/zope.interface-4.5.0/src/zope/interface/tests/test_document.py
|
5
|
16637
|
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Documentation tests.
"""
import unittest
class Test_asStructuredText(unittest.TestCase):
def _callFUT(self, iface):
from zope.interface.document import asStructuredText
return asStructuredText(iface)
def test_asStructuredText_no_docstring(self):
from zope.interface import Interface
EXPECTED = '\n\n'.join([
"INoDocstring",
" Attributes:",
" Methods:",
""
])
class INoDocstring(Interface):
pass
self.assertEqual(self._callFUT(INoDocstring), EXPECTED)
def test_asStructuredText_empty_with_docstring(self):
from zope.interface import Interface
EXPECTED = '\n\n'.join([
"IEmpty",
" This is an empty interface.",
" Attributes:",
" Methods:",
""
])
class IEmpty(Interface):
""" This is an empty interface.
"""
self.assertEqual(self._callFUT(IEmpty), EXPECTED)
def test_asStructuredText_empty_with_multiline_docstring(self):
from zope.interface import Interface
EXPECTED = '\n'.join([
"IEmpty",
"",
" This is an empty interface.",
" ",
(" It can be used to annotate any class or object, "
"because it promises"),
" nothing.",
"",
" Attributes:",
"",
" Methods:",
"",
""
])
class IEmpty(Interface):
""" This is an empty interface.
It can be used to annotate any class or object, because it promises
nothing.
"""
self.assertEqual(self._callFUT(IEmpty), EXPECTED)
def test_asStructuredText_with_attribute_no_docstring(self):
from zope.interface import Attribute
from zope.interface import Interface
EXPECTED = '\n\n'.join([
"IHasAttribute",
" This interface has an attribute.",
" Attributes:",
" an_attribute -- no documentation",
" Methods:",
""
])
class IHasAttribute(Interface):
""" This interface has an attribute.
"""
an_attribute = Attribute('an_attribute')
self.assertEqual(self._callFUT(IHasAttribute), EXPECTED)
def test_asStructuredText_with_attribute_with_docstring(self):
from zope.interface import Attribute
from zope.interface import Interface
EXPECTED = '\n\n'.join([
"IHasAttribute",
" This interface has an attribute.",
" Attributes:",
" an_attribute -- This attribute is documented.",
" Methods:",
""
])
class IHasAttribute(Interface):
""" This interface has an attribute.
"""
an_attribute = Attribute('an_attribute',
'This attribute is documented.')
self.assertEqual(self._callFUT(IHasAttribute), EXPECTED)
def test_asStructuredText_with_method_no_args_no_docstring(self):
from zope.interface import Interface
EXPECTED = '\n\n'.join([
"IHasMethod",
" This interface has a method.",
" Attributes:",
" Methods:",
" aMethod() -- no documentation",
""
])
class IHasMethod(Interface):
""" This interface has a method.
"""
def aMethod():
pass
self.assertEqual(self._callFUT(IHasMethod), EXPECTED)
def test_asStructuredText_with_method_positional_args_no_docstring(self):
from zope.interface import Interface
EXPECTED = '\n\n'.join([
"IHasMethod",
" This interface has a method.",
" Attributes:",
" Methods:",
" aMethod(first, second) -- no documentation",
""
])
class IHasMethod(Interface):
""" This interface has a method.
"""
def aMethod(first, second):
pass
self.assertEqual(self._callFUT(IHasMethod), EXPECTED)
def test_asStructuredText_with_method_starargs_no_docstring(self):
from zope.interface import Interface
EXPECTED = '\n\n'.join([
"IHasMethod",
" This interface has a method.",
" Attributes:",
" Methods:",
" aMethod(first, second, *rest) -- no documentation",
""
])
class IHasMethod(Interface):
""" This interface has a method.
"""
def aMethod(first, second, *rest):
pass
self.assertEqual(self._callFUT(IHasMethod), EXPECTED)
def test_asStructuredText_with_method_kwargs_no_docstring(self):
from zope.interface import Interface
EXPECTED = '\n\n'.join([
"IHasMethod",
" This interface has a method.",
" Attributes:",
" Methods:",
" aMethod(first, second, **kw) -- no documentation",
""
])
class IHasMethod(Interface):
""" This interface has a method.
"""
def aMethod(first, second, **kw):
pass
self.assertEqual(self._callFUT(IHasMethod), EXPECTED)
def test_asStructuredText_with_method_with_docstring(self):
from zope.interface import Interface
EXPECTED = '\n\n'.join([
"IHasMethod",
" This interface has a method.",
" Attributes:",
" Methods:",
" aMethod() -- This method is documented.",
""
])
class IHasMethod(Interface):
""" This interface has a method.
"""
def aMethod():
"""This method is documented.
"""
self.assertEqual(self._callFUT(IHasMethod), EXPECTED)
def test_asStructuredText_derived_ignores_base(self):
from zope.interface import Attribute
from zope.interface import Interface
EXPECTED = '\n\n'.join([
"IDerived",
" IDerived doc",
" This interface extends:",
" o IBase",
" Attributes:",
" attr1 -- no documentation",
" attr2 -- attr2 doc",
" Methods:",
" method3() -- method3 doc",
" method4() -- no documentation",
" method5() -- method5 doc",
"",
])
class IBase(Interface):
def method1():
pass
def method2():
pass
class IDerived(IBase):
"IDerived doc"
attr1 = Attribute('attr1')
attr2 = Attribute('attr2', 'attr2 doc')
def method3():
"method3 doc"
def method4():
pass
def method5():
"method5 doc"
self.assertEqual(self._callFUT(IDerived), EXPECTED)
class Test_asReStructuredText(unittest.TestCase):
def _callFUT(self, iface):
from zope.interface.document import asReStructuredText
return asReStructuredText(iface)
def test_asReStructuredText_no_docstring(self):
from zope.interface import Interface
EXPECTED = '\n\n'.join([
"``INoDocstring``",
" Attributes:",
" Methods:",
""
])
class INoDocstring(Interface):
pass
self.assertEqual(self._callFUT(INoDocstring), EXPECTED)
def test_asReStructuredText_empty_with_docstring(self):
from zope.interface import Interface
EXPECTED = '\n\n'.join([
"``IEmpty``",
" This is an empty interface.",
" Attributes:",
" Methods:",
""
])
class IEmpty(Interface):
""" This is an empty interface.
"""
self.assertEqual(self._callFUT(IEmpty), EXPECTED)
def test_asReStructuredText_empty_with_multiline_docstring(self):
from zope.interface import Interface
EXPECTED = '\n'.join([
"``IEmpty``",
"",
" This is an empty interface.",
" ",
(" It can be used to annotate any class or object, "
"because it promises"),
" nothing.",
"",
" Attributes:",
"",
" Methods:",
"",
""
])
class IEmpty(Interface):
""" This is an empty interface.
It can be used to annotate any class or object, because it promises
nothing.
"""
self.assertEqual(self._callFUT(IEmpty), EXPECTED)
def test_asReStructuredText_with_attribute_no_docstring(self):
from zope.interface import Attribute
from zope.interface import Interface
EXPECTED = '\n\n'.join([
"``IHasAttribute``",
" This interface has an attribute.",
" Attributes:",
" ``an_attribute`` -- no documentation",
" Methods:",
""
])
class IHasAttribute(Interface):
""" This interface has an attribute.
"""
an_attribute = Attribute('an_attribute')
self.assertEqual(self._callFUT(IHasAttribute), EXPECTED)
def test_asReStructuredText_with_attribute_with_docstring(self):
from zope.interface import Attribute
from zope.interface import Interface
EXPECTED = '\n\n'.join([
"``IHasAttribute``",
" This interface has an attribute.",
" Attributes:",
" ``an_attribute`` -- This attribute is documented.",
" Methods:",
""
])
class IHasAttribute(Interface):
""" This interface has an attribute.
"""
an_attribute = Attribute('an_attribute',
'This attribute is documented.')
self.assertEqual(self._callFUT(IHasAttribute), EXPECTED)
def test_asReStructuredText_with_method_no_args_no_docstring(self):
from zope.interface import Interface
EXPECTED = '\n\n'.join([
"``IHasMethod``",
" This interface has a method.",
" Attributes:",
" Methods:",
" ``aMethod()`` -- no documentation",
""
])
class IHasMethod(Interface):
""" This interface has a method.
"""
def aMethod():
pass
self.assertEqual(self._callFUT(IHasMethod), EXPECTED)
def test_asReStructuredText_with_method_positional_args_no_docstring(self):
from zope.interface import Interface
EXPECTED = '\n\n'.join([
"``IHasMethod``",
" This interface has a method.",
" Attributes:",
" Methods:",
" ``aMethod(first, second)`` -- no documentation",
""
])
class IHasMethod(Interface):
""" This interface has a method.
"""
def aMethod(first, second):
pass
self.assertEqual(self._callFUT(IHasMethod), EXPECTED)
def test_asReStructuredText_with_method_starargs_no_docstring(self):
from zope.interface import Interface
EXPECTED = '\n\n'.join([
"``IHasMethod``",
" This interface has a method.",
" Attributes:",
" Methods:",
" ``aMethod(first, second, *rest)`` -- no documentation",
""
])
class IHasMethod(Interface):
""" This interface has a method.
"""
def aMethod(first, second, *rest):
pass
self.assertEqual(self._callFUT(IHasMethod), EXPECTED)
def test_asReStructuredText_with_method_kwargs_no_docstring(self):
from zope.interface import Interface
EXPECTED = '\n\n'.join([
"``IHasMethod``",
" This interface has a method.",
" Attributes:",
" Methods:",
" ``aMethod(first, second, **kw)`` -- no documentation",
""
])
class IHasMethod(Interface):
""" This interface has a method.
"""
def aMethod(first, second, **kw):
pass
self.assertEqual(self._callFUT(IHasMethod), EXPECTED)
def test_asReStructuredText_with_method_with_docstring(self):
from zope.interface import Interface
EXPECTED = '\n\n'.join([
"``IHasMethod``",
" This interface has a method.",
" Attributes:",
" Methods:",
" ``aMethod()`` -- This method is documented.",
""
])
class IHasMethod(Interface):
""" This interface has a method.
"""
def aMethod():
"""This method is documented.
"""
self.assertEqual(self._callFUT(IHasMethod), EXPECTED)
def test_asReStructuredText_derived_ignores_base(self):
from zope.interface import Attribute
from zope.interface import Interface
EXPECTED = '\n\n'.join([
"``IDerived``",
" IDerived doc",
" This interface extends:",
" o ``IBase``",
" Attributes:",
" ``attr1`` -- no documentation",
" ``attr2`` -- attr2 doc",
" Methods:",
" ``method3()`` -- method3 doc",
" ``method4()`` -- no documentation",
" ``method5()`` -- method5 doc",
"",
])
class IBase(Interface):
def method1():
pass
def method2():
pass
class IDerived(IBase):
"IDerived doc"
attr1 = Attribute('attr1')
attr2 = Attribute('attr2', 'attr2 doc')
def method3():
"method3 doc"
def method4():
pass
def method5():
"method5 doc"
self.assertEqual(self._callFUT(IDerived), EXPECTED)
class Test__justify_and_indent(unittest.TestCase):
def _callFUT(self, text, level, **kw):
from zope.interface.document import _justify_and_indent
return _justify_and_indent(text, level, **kw)
def test_simple_level_0(self):
LINES = ['Three blind mice', 'See how they run']
text = '\n'.join(LINES)
self.assertEqual(self._callFUT(text, 0), text)
def test_simple_level_1(self):
LINES = ['Three blind mice', 'See how they run']
text = '\n'.join(LINES)
self.assertEqual(self._callFUT(text, 1),
'\n'.join([' ' + line for line in LINES]))
def test_simple_level_2(self):
LINES = ['Three blind mice', 'See how they run']
text = '\n'.join(LINES)
self.assertEqual(self._callFUT(text, 1),
'\n'.join([' ' + line for line in LINES]))
def test_simple_w_CRLF(self):
LINES = ['Three blind mice', 'See how they run']
text = '\r\n'.join(LINES)
self.assertEqual(self._callFUT(text, 1),
'\n'.join([' ' + line for line in LINES]))
def test_with_munge(self):
TEXT = ("This is a piece of text longer than 15 characters, \n"
"and split across multiple lines.")
EXPECTED = (" This is a piece\n"
" of text longer\n"
" than 15 characters,\n"
" and split across\n"
" multiple lines.\n"
" ")
self.assertEqual(self._callFUT(TEXT, 1, munge=1, width=15), EXPECTED)
|
apache-2.0
|
h4ck3rm1k3/FEC-Field-Documentation
|
fec/version/v7_0/F3.py
|
3
|
6647
|
import fechbase
class Records(fechbase.RecordsBase):
def __init__(self):
fechbase.RecordsBase.__init__(self)
self.fields = [
{'name': 'FORM TYPE', 'number': '1'},
{'name': 'FILER COMMITTEE ID NUMBER', 'number': '2'},
{'name': 'COMMITTEE NAME', 'number': '3'},
{'name': 'CHANGE OF ADDRESS', 'number': '4'},
{'name': 'STREET 1', 'number': '5'},
{'name': 'STREET 2', 'number': '6'},
{'name': 'CITY', 'number': '7'},
{'name': 'STATE', 'number': '8'},
{'name': 'ZIP', 'number': '9'},
{'name': 'ELECTION STATE', 'number': '10'},
{'name': 'ELECTION DISTRICT', 'number': '11'},
{'name': 'REPORT CODE', 'number': '12'},
{'name': 'ELECTION CODE', 'number': '13'},
{'name': 'DATE OF ELECTION', 'number': '14'},
{'name': 'STATE OF ELECTION', 'number': '15'},
{'name': 'COVERAGE FROM DATE', 'number': '16'},
{'name': 'COVERAGE THROUGH DATE', 'number': '17'},
{'name': 'TREASURER LAST NAME', 'number': '18'},
{'name': 'TREASURER FIRST NAME', 'number': '19'},
{'name': 'TREASURER MIDDLE NAME', 'number': '20'},
{'name': 'TREASURER PREFIX', 'number': '21'},
{'name': 'TREASURER SUFFIX', 'number': '22'},
{'name': 'DATE SIGNED', 'number': '23'},
{'name': 'Total Contributions (NO Loans)', 'number': '24-(6a)'},
{'name': 'Total Contribution Refunds', 'number': '25-(6b)'},
{'name': 'Net Contributions', 'number': '26-(6c)'},
{'name': 'Total Operating Expenditures', 'number': '27-(7a)'},
{'name': 'Total Offset to Operating Expenditures', 'number': '28-(7b)'},
{'name': 'NET Operating Expenditures.', 'number': '29-(7c)'},
{'name': 'CASH ON HAND AT CLOSE ...', 'number': '30-8.'},
{'name': 'DEBTS TO ( Totals from SCH C and/or D)', 'number': '31-9.'},
{'name': 'DEBTS BY (Totals from SCH C and/or D)', 'number': '32-10.'},
{'name': 'Individuals Itemized', 'number': '33-11(a i.)'},
{'name': 'Individuals Unitemized', 'number': '34-11(a.ii)'},
{'name': 'Individual Contribution Total', 'number': '35-11(a.iii)'},
{'name': 'Political Party Committees', 'number': '36-11(b)'},
{'name': 'Other Political Committees', 'number': '37-11(c)'},
{'name': 'The Candidate', 'number': '38-11(d)'},
{'name': 'Total Contributions', 'number': '39-11(e)'},
{'name': 'Transfers From Other Authorized Committees', 'number': '40-12.'},
{'name': 'Loans made or guarn. by the Candidate', 'number': '41-13(a)'},
{'name': 'All Other Loans', 'number': '42-13(b)'},
{'name': 'Total Loans', 'number': '43-13(c)'},
{'name': 'Offsets to Operating Expenditures', 'number': '44-14.'},
{'name': 'Other Receipts', 'number': '45-15.'},
{'name': 'Total Receipts', 'number': '46-16.'},
{'name': 'Operating Expenditures', 'number': '47-17.'},
{'name': 'Transfers to Other Authorized Committees', 'number': '48-18.'},
{'name': 'Of Loans made or guar. by the Cand.', 'number': '49-19(a)'},
{'name': 'Loan Repayments, All Other Loans', 'number': '50-19(b)'},
{'name': 'Total Loan Repayments', 'number': '51-19(c)'},
{'name': 'Refund/Individuals Other than Political Committees', 'number': '52-20(a)'},
{'name': 'Refund/Political Party Committees', 'number': '53-20(b)'},
{'name': 'Refund/Other Political Committees', 'number': '54-20(c)'},
{'name': 'Total Contribution Refunds', 'number': '55-20(d)'},
{'name': 'Other Disbursements', 'number': '56-21.'},
{'name': 'Total Disbursements', 'number': '57-22.'},
{'name': 'Cash Beginning Reporting Period', 'number': '58-23.'},
{'name': 'Total Receipts this Period', 'number': '59-24.'},
{'name': 'Subtotals', 'number': '60-25.'},
{'name': 'Total Disbursements this Period', 'number': '61-26.'},
{'name': 'Cash on hand at Close Period', 'number': '62-27.'},
{'name': 'Total Contributions (No Loans)', 'number': '63-(6a)'},
{'name': 'Total Contribution Refunds', 'number': '64-(6b)'},
{'name': 'Net Contributions', 'number': '65-(6c)'},
{'name': 'Total Operating Expenditures', 'number': '66-(7a)'},
{'name': 'Total Offsets to Operating Expenditures', 'number': '67-(7b)'},
{'name': 'NET Operating Expenditures.', 'number': '68-(7c)'},
{'name': 'Individuals Itemized', 'number': '69-11(a i.)'},
{'name': 'Individuals Unitemized', 'number': '70-11(a.ii)'},
{'name': 'Individuals Total', 'number': '71-11(a.iii)'},
{'name': 'Political Party Committees', 'number': '72-11(b)'},
{'name': 'All Other Political Committees (PACS)', 'number': '73-11(c)'},
{'name': 'The Candidate', 'number': '74-11(d)'},
{'name': 'Total Contributions', 'number': '75-11(e)'},
{'name': 'Transfers From Other AUTH Committees', 'number': '76-12.'},
{'name': 'Loans made or guarn. by the Candidate', 'number': '77-13(a)'},
{'name': 'All Other Loans', 'number': '78-13(b)'},
{'name': 'Total Loans', 'number': '79-13(c)'},
{'name': 'Offsets to Operating Expenditures', 'number': '80-14.'},
{'name': 'Other Receipts', 'number': '81-15.'},
{'name': 'Total Receipts', 'number': '82-16.'},
{'name': 'Operating Expenditures', 'number': '83-17'},
{'name': 'Transfers To Other AUTH Committees', 'number': '84-18.'},
{'name': 'Loan Repayment By Candidate', 'number': '85-19(a)'},
{'name': 'Loan Repayments, ALL Other Loans', 'number': '86-19(b)'},
{'name': 'Total Loan Repayments', 'number': '87-19(c)'},
{'name': 'Refund/Individuals Other than Political Committees', 'number': '88-20(a)'},
{'name': 'Refund, Political Party Committees', 'number': '89-20(b)'},
{'name': 'Refund, Other Political Committees', 'number': '90-20(c)'},
{'name': 'Total Contributions Refunds', 'number': '91-20(d)'},
{'name': 'Other Disbursements', 'number': '92-21.'},
{'name': 'Total Disbursements', 'number': '93-22.'},
]
self.fields_names = self.hash_names(self.fields)
|
unlicense
|
buguelos/odoo
|
addons/account_check_writing/account.py
|
379
|
2032
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv,fields
class account_journal(osv.osv):
_inherit = "account.journal"
_columns = {
'allow_check_writing': fields.boolean('Allow Check writing', help='Check this if the journal is to be used for writing checks.'),
'use_preprint_check': fields.boolean('Use Preprinted Check', help='Check if you use a preformated sheet for check'),
}
class res_company(osv.osv):
_inherit = "res.company"
_columns = {
'check_layout': fields.selection([
('top', 'Check on Top'),
('middle', 'Check in middle'),
('bottom', 'Check on bottom'),
],"Check Layout",
help="Check on top is compatible with Quicken, QuickBooks and Microsoft Money. Check in middle is compatible with Peachtree, ACCPAC and DacEasy. Check on bottom is compatible with Peachtree, ACCPAC and DacEasy only" ),
}
_defaults = {
'check_layout' : lambda *a: 'top',
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
theetcher/fxpt
|
fxpt/fx_prefsaver/ctrl_maya.py
|
1
|
3651
|
import pymel.core.uitypes as pmui
from ctrl_pymel import getController as pmGetController
from pstypes import UIType
from com import message
IDX_PM_TYPE = 0
IDX_PM_CLASS = 1
constructors = {
UIType.MCheckBox: [UIType.PMCheckBox, pmui.CheckBox],
UIType.MCheckBoxGrp1: [UIType.PMCheckBoxGrp1, pmui.CheckBoxGrp],
UIType.MCheckBoxGrp2: [UIType.PMCheckBoxGrp2, pmui.CheckBoxGrp],
UIType.MCheckBoxGrp3: [UIType.PMCheckBoxGrp3, pmui.CheckBoxGrp],
UIType.MCheckBoxGrp4: [UIType.PMCheckBoxGrp4, pmui.CheckBoxGrp],
UIType.MColorSliderGrp: [UIType.PMColorSliderGrp, pmui.ColorSliderGrp],
UIType.MFloatField: [UIType.PMFloatField, pmui.FloatField],
UIType.MFloatFieldGrp1: [UIType.PMFloatFieldGrp1, pmui.FloatFieldGrp],
UIType.MFloatFieldGrp2: [UIType.PMFloatFieldGrp2, pmui.FloatFieldGrp],
UIType.MFloatFieldGrp3: [UIType.PMFloatFieldGrp3, pmui.FloatFieldGrp],
UIType.MFloatFieldGrp4: [UIType.PMFloatFieldGrp4, pmui.FloatFieldGrp],
UIType.MFloatScrollBar: [UIType.PMFloatScrollBar, pmui.FloatScrollBar],
UIType.MFloatSlider: [UIType.PMFloatSlider, pmui.FloatSlider],
UIType.MFloatSliderGrp: [UIType.PMFloatSliderGrp, pmui.FloatSliderGrp],
UIType.MFrameLayout: [UIType.PMFrameLayout, pmui.FrameLayout],
UIType.MIconTextCheckBox: [UIType.PMIconTextCheckBox, pmui.IconTextCheckBox],
UIType.MIconTextRadioButton: [UIType.PMIconTextRadioButton, pmui.IconTextRadioButton],
UIType.MIconTextScrollList: [UIType.PMIconTextScrollList, pmui.IconTextScrollList],
UIType.MIntField: [UIType.PMIntField, pmui.IntField],
UIType.MIntFieldGrp1: [UIType.PMIntFieldGrp1, pmui.IntFieldGrp],
UIType.MIntFieldGrp2: [UIType.PMIntFieldGrp2, pmui.IntFieldGrp],
UIType.MIntFieldGrp3: [UIType.PMIntFieldGrp3, pmui.IntFieldGrp],
UIType.MIntFieldGrp4: [UIType.PMIntFieldGrp4, pmui.IntFieldGrp],
UIType.MIntScrollBar: [UIType.PMIntScrollBar, pmui.IntScrollBar],
UIType.MIntSlider: [UIType.PMIntSlider, pmui.IntSlider],
UIType.MIntSliderGrp: [UIType.PMIntSliderGrp, pmui.IntSliderGrp],
UIType.MOptionMenu: [UIType.PMOptionMenu, pmui.OptionMenu],
UIType.MOptionMenuGrp: [UIType.PMOptionMenuGrp, pmui.OptionMenuGrp],
UIType.MRadioButton: [UIType.PMRadioButton, pmui.RadioButton],
UIType.MRadioButtonGrp1: [UIType.PMRadioButtonGrp1, pmui.RadioButtonGrp],
UIType.MRadioButtonGrp2: [UIType.PMRadioButtonGrp2, pmui.RadioButtonGrp],
UIType.MRadioButtonGrp3: [UIType.PMRadioButtonGrp3, pmui.RadioButtonGrp],
UIType.MRadioButtonGrp4: [UIType.PMRadioButtonGrp4, pmui.RadioButtonGrp],
UIType.MSymbolCheckBox: [UIType.PMSymbolCheckBox, pmui.SymbolCheckBox],
UIType.MScriptTable: [UIType.PMScriptTable, pmui.ScriptTable],
UIType.MScrollField: [UIType.PMScrollField, pmui.ScrollField],
UIType.MScrollLayout: [UIType.PMScrollLayout, pmui.ScrollLayout],
UIType.MShelfTabLayout: [UIType.PMShelfTabLayout, pmui.ShelfTabLayout],
UIType.MTabLayout: [UIType.PMTabLayout, pmui.TabLayout],
UIType.MTextField: [UIType.PMTextField, pmui.TextField],
UIType.MTextFieldButtonGrp: [UIType.PMTextFieldButtonGrp, pmui.TextFieldButtonGrp],
UIType.MTextFieldGrp: [UIType.PMTextFieldGrp, pmui.TextFieldGrp],
UIType.MTextScrollList: [UIType.PMTextScrollList, pmui.TextScrollList]
}
def getController(uiType, control, defaultValue):
if uiType in constructors:
pmUiType = constructors[uiType][IDX_PM_TYPE]
pmClass = constructors[uiType][IDX_PM_CLASS]
return pmGetController(pmUiType, pmClass(control), defaultValue)
else:
message('Cannot create controller: Unknown controller type: {0}.'.format(str(uiType)))
|
mit
|
lahcoin/lahcoin
|
contrib/bitrpc/bitrpc.py
|
22
|
7838
|
from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:8638")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:8638")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Freicoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Freicoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
|
mit
|
Averroes/raft
|
core/data/DomFuzzerResultsDataModel.py
|
11
|
1487
|
#
# This module supports the data model for the responses data for TreeViews
#
# Author: Gregory Fleischer (gfleischer@gmail.com)
#
# Copyright (c) 2011 RAFT Team
#
# This file is part of RAFT.
#
# RAFT is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# RAFT is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with RAFT. If not, see <http://www.gnu.org/licenses/>.
#
from core.data.DataTableDataModel import DataTableDataModel
from core.database.constants import DomFuzzerResultsTable
class DomFuzzerResultsDataModel(DataTableDataModel):
ITEM_DEFINITION = (
('#', DomFuzzerResultsTable.ID),
('Confidence', DomFuzzerResultsTable.CONFIDENCE),
('Target', DomFuzzerResultsTable.TARGET),
('Param', DomFuzzerResultsTable.PARAM),
('Test', DomFuzzerResultsTable.TEST),
('URL', DomFuzzerResultsTable.URL),
)
def __init__(self, framework, parent = None):
DataTableDataModel.__init__(self, framework, DomFuzzerResultsDataModel.ITEM_DEFINITION, parent)
|
gpl-3.0
|
thonkify/thonkify
|
src/lib/unidecode/x08a.py
|
253
|
4647
|
data = (
'Yan ', # 0x00
'Yan ', # 0x01
'Ding ', # 0x02
'Fu ', # 0x03
'Qiu ', # 0x04
'Qiu ', # 0x05
'Jiao ', # 0x06
'Hong ', # 0x07
'Ji ', # 0x08
'Fan ', # 0x09
'Xun ', # 0x0a
'Diao ', # 0x0b
'Hong ', # 0x0c
'Cha ', # 0x0d
'Tao ', # 0x0e
'Xu ', # 0x0f
'Jie ', # 0x10
'Yi ', # 0x11
'Ren ', # 0x12
'Xun ', # 0x13
'Yin ', # 0x14
'Shan ', # 0x15
'Qi ', # 0x16
'Tuo ', # 0x17
'Ji ', # 0x18
'Xun ', # 0x19
'Yin ', # 0x1a
'E ', # 0x1b
'Fen ', # 0x1c
'Ya ', # 0x1d
'Yao ', # 0x1e
'Song ', # 0x1f
'Shen ', # 0x20
'Yin ', # 0x21
'Xin ', # 0x22
'Jue ', # 0x23
'Xiao ', # 0x24
'Ne ', # 0x25
'Chen ', # 0x26
'You ', # 0x27
'Zhi ', # 0x28
'Xiong ', # 0x29
'Fang ', # 0x2a
'Xin ', # 0x2b
'Chao ', # 0x2c
'She ', # 0x2d
'Xian ', # 0x2e
'Sha ', # 0x2f
'Tun ', # 0x30
'Xu ', # 0x31
'Yi ', # 0x32
'Yi ', # 0x33
'Su ', # 0x34
'Chi ', # 0x35
'He ', # 0x36
'Shen ', # 0x37
'He ', # 0x38
'Xu ', # 0x39
'Zhen ', # 0x3a
'Zhu ', # 0x3b
'Zheng ', # 0x3c
'Gou ', # 0x3d
'Zi ', # 0x3e
'Zi ', # 0x3f
'Zhan ', # 0x40
'Gu ', # 0x41
'Fu ', # 0x42
'Quan ', # 0x43
'Die ', # 0x44
'Ling ', # 0x45
'Di ', # 0x46
'Yang ', # 0x47
'Li ', # 0x48
'Nao ', # 0x49
'Pan ', # 0x4a
'Zhou ', # 0x4b
'Gan ', # 0x4c
'Yi ', # 0x4d
'Ju ', # 0x4e
'Ao ', # 0x4f
'Zha ', # 0x50
'Tuo ', # 0x51
'Yi ', # 0x52
'Qu ', # 0x53
'Zhao ', # 0x54
'Ping ', # 0x55
'Bi ', # 0x56
'Xiong ', # 0x57
'Qu ', # 0x58
'Ba ', # 0x59
'Da ', # 0x5a
'Zu ', # 0x5b
'Tao ', # 0x5c
'Zhu ', # 0x5d
'Ci ', # 0x5e
'Zhe ', # 0x5f
'Yong ', # 0x60
'Xu ', # 0x61
'Xun ', # 0x62
'Yi ', # 0x63
'Huang ', # 0x64
'He ', # 0x65
'Shi ', # 0x66
'Cha ', # 0x67
'Jiao ', # 0x68
'Shi ', # 0x69
'Hen ', # 0x6a
'Cha ', # 0x6b
'Gou ', # 0x6c
'Gui ', # 0x6d
'Quan ', # 0x6e
'Hui ', # 0x6f
'Jie ', # 0x70
'Hua ', # 0x71
'Gai ', # 0x72
'Xiang ', # 0x73
'Wei ', # 0x74
'Shen ', # 0x75
'Chou ', # 0x76
'Tong ', # 0x77
'Mi ', # 0x78
'Zhan ', # 0x79
'Ming ', # 0x7a
'E ', # 0x7b
'Hui ', # 0x7c
'Yan ', # 0x7d
'Xiong ', # 0x7e
'Gua ', # 0x7f
'Er ', # 0x80
'Beng ', # 0x81
'Tiao ', # 0x82
'Chi ', # 0x83
'Lei ', # 0x84
'Zhu ', # 0x85
'Kuang ', # 0x86
'Kua ', # 0x87
'Wu ', # 0x88
'Yu ', # 0x89
'Teng ', # 0x8a
'Ji ', # 0x8b
'Zhi ', # 0x8c
'Ren ', # 0x8d
'Su ', # 0x8e
'Lang ', # 0x8f
'E ', # 0x90
'Kuang ', # 0x91
'E ', # 0x92
'Shi ', # 0x93
'Ting ', # 0x94
'Dan ', # 0x95
'Bo ', # 0x96
'Chan ', # 0x97
'You ', # 0x98
'Heng ', # 0x99
'Qiao ', # 0x9a
'Qin ', # 0x9b
'Shua ', # 0x9c
'An ', # 0x9d
'Yu ', # 0x9e
'Xiao ', # 0x9f
'Cheng ', # 0xa0
'Jie ', # 0xa1
'Xian ', # 0xa2
'Wu ', # 0xa3
'Wu ', # 0xa4
'Gao ', # 0xa5
'Song ', # 0xa6
'Pu ', # 0xa7
'Hui ', # 0xa8
'Jing ', # 0xa9
'Shuo ', # 0xaa
'Zhen ', # 0xab
'Shuo ', # 0xac
'Du ', # 0xad
'Yasashi ', # 0xae
'Chang ', # 0xaf
'Shui ', # 0xb0
'Jie ', # 0xb1
'Ke ', # 0xb2
'Qu ', # 0xb3
'Cong ', # 0xb4
'Xiao ', # 0xb5
'Sui ', # 0xb6
'Wang ', # 0xb7
'Xuan ', # 0xb8
'Fei ', # 0xb9
'Chi ', # 0xba
'Ta ', # 0xbb
'Yi ', # 0xbc
'Na ', # 0xbd
'Yin ', # 0xbe
'Diao ', # 0xbf
'Pi ', # 0xc0
'Chuo ', # 0xc1
'Chan ', # 0xc2
'Chen ', # 0xc3
'Zhun ', # 0xc4
'Ji ', # 0xc5
'Qi ', # 0xc6
'Tan ', # 0xc7
'Zhui ', # 0xc8
'Wei ', # 0xc9
'Ju ', # 0xca
'Qing ', # 0xcb
'Jian ', # 0xcc
'Zheng ', # 0xcd
'Ze ', # 0xce
'Zou ', # 0xcf
'Qian ', # 0xd0
'Zhuo ', # 0xd1
'Liang ', # 0xd2
'Jian ', # 0xd3
'Zhu ', # 0xd4
'Hao ', # 0xd5
'Lun ', # 0xd6
'Shen ', # 0xd7
'Biao ', # 0xd8
'Huai ', # 0xd9
'Pian ', # 0xda
'Yu ', # 0xdb
'Die ', # 0xdc
'Xu ', # 0xdd
'Pian ', # 0xde
'Shi ', # 0xdf
'Xuan ', # 0xe0
'Shi ', # 0xe1
'Hun ', # 0xe2
'Hua ', # 0xe3
'E ', # 0xe4
'Zhong ', # 0xe5
'Di ', # 0xe6
'Xie ', # 0xe7
'Fu ', # 0xe8
'Pu ', # 0xe9
'Ting ', # 0xea
'Jian ', # 0xeb
'Qi ', # 0xec
'Yu ', # 0xed
'Zi ', # 0xee
'Chuan ', # 0xef
'Xi ', # 0xf0
'Hui ', # 0xf1
'Yin ', # 0xf2
'An ', # 0xf3
'Xian ', # 0xf4
'Nan ', # 0xf5
'Chen ', # 0xf6
'Feng ', # 0xf7
'Zhu ', # 0xf8
'Yang ', # 0xf9
'Yan ', # 0xfa
'Heng ', # 0xfb
'Xuan ', # 0xfc
'Ge ', # 0xfd
'Nuo ', # 0xfe
'Qi ', # 0xff
)
|
mit
|
polarise/BioClasses
|
count_paths_in_sequence.py
|
1
|
1624
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from __future__ import division
import sys
import time
from Node import *
from LeafCounter import *
from Sequence import *
from Bio import SeqIO
from TransitionMatrix import *
import cPickle
from GeneticCode import *
def main( fn ):
TM = TransitionMatrix()
TM.read( "transition_matrices/homo_transition_matrix.pic" )
#G = GeneticCode( "genetic_codes/tetrahymena_genetic_code.txt" )
#G.read_CAI_table( "euplotid_CAI_table.txt" )
c = 0
for seq_record in SeqIO.parse( fn, "fasta" ):
if c > 100: break
sequence = str( seq_record.seq )
seq_name = seq_record.id
# first we check whether we will be able to build the tree
s = Sequence( sequence=sequence, name=seq_name, stops=[ 'TAA', 'TAG', 'TGA' ] )
s.truncate()
s.get_stop_sequence()
s.sanitise_stop_sequence()
nodes = [ Node( *d ) for d in s.unique_stop_sequence ]
L = LeafCounter()
for n in nodes[:-3]:
L.add_node( n )
print "%s\t%s" % ( seq_name, L.leaf_count() )
"""
if L.leaf_count() > 10000:
print >> sys.stderr, "Skipping complex sequence %s with %d leaves..." % ( seq_name, L.leaf_count())
continue
# now we know that we can ;-)
s.set_transition_matrix( TM )
s.set_genetic_code( G )
s.build_tree()
s.estimate_frameshift_likelihood()
s.estimate_frameshift_CAI()
s.get_most_likely_frameshift()
if s.most_likely_frameshift is None:
print >> sys.stderr, "%s admits no frameshifts..." % seq_name
else:
print seq_name + "\t" + s.most_likely_frameshift.repr_as_row()
c += 0
"""
if __name__ == "__main__":
fn = sys.argv[1]
main( fn )
|
gpl-2.0
|
gratefulfrog/SPI
|
RPI/Python/OLD/oscSerialApp_tooSlow/master.py
|
1
|
13258
|
#!/usr/bin/python3
# example call
# ./master.py /dev/ttyUSB0 /dev/ttyUSB1
## python system imports
import csv
import threading
import queue
import time
import sys
import os.path
import AEMmailer
from casyncosc import SerialServer
## diskSpaceLimit : in MB, when limit reached, processing halts
diskSpaceLimit = 100 # MB
class WriterThread(threading.Thread):
def __init__(self, name, q, lock,fileLockDict, syncTimeFunc, dataDir): #,syncTimeFunc):
"""
Consumer class instanciation:
The Writer thread implements the consumer in the producer/consumer paradigm.
The number of threads created is not known by the individual threads. They
simply pop things off the shared thread-safe queue, extract the data, and
write to the correct data (csv) file, creating it and the data directory if needed.
The file names for the data files are created from the data elts themselves, in combination
with the information obtained by callingthe syncTimeFunc provided as argument.
Data file names are used as keys in the shared fileLockDict, whre the values are semaphores
ensuring unique access to each file.
@param self
@param name a string naming the thread, legacy but left to allow for easier debugging
@param q the thread-safe q which will be popped to get data
@param lock the semaphore ensure unique access to the fileLockDict
@param dataDir the path to the data directory
@param syncTimeFunc a function that will be called to get the synch time to be used in naming the
data files.
"""
threading.Thread.__init__(self)
## for file name lookup
self.fileLockDict = fileLockDict
## string name of the thread, used for debugging or information messages
self.name = name
## work q, source of data to be written to files
self.q = q
## semaphore for exclusive access to the fileLockDict
self.dictLock = lock
## semaphore locking access to the file currently being written
self.fileLock = None
## path to the data file target directory
self.dataDir = dataDir
self.dictLock.acquire()
if not os.path.exists(dataDir):
os.makedirs(dataDir)
self.dictLock.release()
## function which when called will return the synchronisation time of the boards
self.getSynchTimeFunc = syncTimeFunc
def getFormattedRow(self,row):
"""
Formats a row by rounding the float values to 4 decimals
@param row the data row,
@return the row as a list ready to be written to the csv data file.
"""
row[2]= round(row[2],4)
return row[1:3]
def createDataFile(self,outFile):
"""
Called to create the data csv file and write the header row.
"""
headers = ('Timestamp','Value')
with open(outFile, 'a', newline='') as csvfile:
writer = csv.writer(csvfile, delimiter=',',
quotechar='"', quoting=csv.QUOTE_MINIMAL)
#print(headers)
writer.writerow(headers)
print('created file:',outFile)
def decodeADCCID(self,coded):
"""
Decodes the ADC and Channel values which were encoded onto a single byte such that
the top 4 bits are the ADC id, and the lower four are the channel id.
@param coded a single byte containing the encoded values
@return a list as decoded into [ADC_ID, Channel_ID]
"""
maskR = 0b1111
return [((coded>>4)& 0b1111), coded & 0b1111]
def getFile(self,row):
"""
the filename is computed, then the dictLock is acquired to access the fileLockDict and
get a lock for the data file, creating the lock if needed.
the dictLock is released and the fileLock is aquire before returning
@param row the data to be used to get the file name
"""
[adcID,chID] = self.decodeADCCID(row[0])
filename = self.dataDir + '/{bid}_{adc}_{cid}_{syc}.csv'.format(bid=row[3],adc=adcID,cid=chID,syc=self.getSynchTimeFunc())
#print('getting filenam',filename)
self.dictLock.acquire()
try:
self.fileLock = self.fileLockDict[filename]
except KeyError:
self.fileLock = threading.Lock()
self.fileLockDict[filename] = self.fileLock
if not os.path.exists(filename):
self.createDataFile(filename)
self.dictLock.release()
self.fileLock.acquire()
return filename
def releaseFile(self,filename):
"""
Simply releases the fileLock
"""
self.fileLock.release()
def do_work(self,thing):
"""
This method handles the 'consuming' of the thing popped from the queue.
After obtaining a lock for the appropriate data file, a csv row is written,
the lock is released.
@param the object popped from the queue, in our case a list of length 4:
ADCCID,Timestamp,value, Board_ID
"""
filename = self.getFile(thing)
#print('writing a row',thing)
with open(filename, 'a', newline='') as csvfile:
writer = csv.writer(csvfile, delimiter=',',
quotechar='"', quoting=csv.QUOTE_MINIMAL)
writer.writerow(self.getFormattedRow(thing))
self.releaseFile(filename)
def run(self):
"""
Thread run method. pops the queue, sends the popped thing to be consumed.
if a None value is popped, the method exits properply and the thread ends.
"""
try:
while True:
item = self.q.get()
#print(item)
if item is None:
break
self.do_work(item)
self.q.task_done()
except Exception as e:
print(e)
print('thread exiting...')
finally:
self.q.task_done()
class ReaderThread(threading.Thread):
def __init__(self, q, stopEv, mailerFunc, portT):
"""
Consumer class instanciation:
The Writer thread implements the consumer in the producer/consumer paradigm.
The number of threads created is not known by the individual threads. They
simply pop things off the shared thread-safe queue, extract the data, and
write to the correct data (csv) file, creating it and the data directory if needed.
The file names for the data files are created from the data elts themselves, in combination
with the information obtained by callingthe syncTimeFunc provided as argument.
Data file names are used as keys in the shared fileLockDict, whre the values are semaphores
ensuring unique access to each file.
@param self
@param portT a string naming serial port
@param q the thread-safe q which will be popped to get data
@param lock the semaphore ensure unique access to the fileLockDict
@param dataDir the path to the data directory
@param syncTimeFunc a function that will be called to get the synch time to be used in naming the
data files.
"""
threading.Thread.__init__(self)
## string name of the thread, used for debugging or information messages
self.server = SerialServer(portT,stopEv,q,mailerFunc)
## event to set when disk space runs out we exit
self.stopEvent = stopEv
print('Reader created on port: ',portT)
def run(self):
"""
Thread run method. pops the queue, sends the popped thing to be consumed.
if a None value is popped, the method exits properply and the thread ends.
"""
try:
self.server.serve()
except KeyboardInterrupt:
self.stopEvent.set()
###### Master Class CODE ##################
class Master:
def __init__(self, ports = ['/dev/ttyUSB0'], nbThreads=4, dataDir='./DATA'):
"""
Constructor for class Master, implements the start and Producer part of
the Producer/Consumer paradigm for parallel processing.
At instanciation,
a FIFO, thread-safe queue is created which will be where the producer puts
his production and where the consumers get it.
A semaphore (threading.Lock) object is created to ensure unicity of access
the the fileLockDict used by all the consumer threads.
An insnce of the spiCommsMgr it created.
Finally the conusmer threads are created.
@param self,
@param nbThreads defaults to the initializer
@param dataDir default value provided, the directory where the conusmers
will write the csv files.
"""
## file lock dire for writer threads
## keys: full filename
## values: threading.lock object used to guarranty exclusive access to the file for writing
self.fileLockDict = {}
## Directory for data files
self.dataDir = dataDir
## Synchronized work queue
self.q = queue.Queue()
## Semaphore object to be passed to consumer threads for their use
self.lock = threading.Lock()
self.stopEvent = threading.Event()
self.stopEvent.clear()
self.q = queue.Queue()
try:
self.mailer = AEMmailer.AEMMailer()
except AEMmailer.NoPasswordException:
print("No password provided; no mail will be sent...")
self.mailer = None
self.sendMsg("AEM session started!")
self.startTime = time.strftime('%Y_%m_%d_%H.%M.%S', time.localtime())
self.createWriterThreads(nbThreads)
self.createReaderThreads(ports)
def getSyncTime(self):
return self.startTime
def sendMsg(self,msg):
if self.mailer:
self.mailer.connectAndSend(msg)
print(msg)
def createReaderThreads(self,portLis):
self.readerThreads=[]
for port in portLis:
reader = ReaderThread(self.q,self.stopEvent,self.sendMsg,port)
reader.start()
self.readerThreads.append(reader)
def createWriterThreads(self,num):
"""
Creates the number of consumer threads according to the argument.
@param num the number of threads to create
"""
self.writerThreads = []
for i in range(num):
name='WriterThread-' + str(i)
t = WriterThread(name,self.q,self.lock,self.fileLockDict, self.getSyncTime, self.dataDir)
t.start()
self.writerThreads.append(t)
def stopAll(self):
"""
Called at the end of a run, it allows all the consumer threads to exit properly
"""
# block until all tasks are done
print('Shutting down all threads...')
self.stopEvent.set()
self.q.join()
# stop workers
for t in self.writerThreads:
self.q.put(None)
threadCounter = 0
for t in self.writerThreads + self.readerThreads:
#print('Thread', threadCounter,' shut down...')
#threadCounter+=1
t.join()
print('All threads shut down, exiting...')
time.sleep(0.01)
def diskSpaceLimitReached(self):
st = os.statvfs(os.getcwd())
free = st.f_bavail * st.f_frsize
diskFreeMB = free / 1000000
res = diskFreeMB <= diskSpaceLimit
if res:
print('Disk Space Limit Reached :',diskSpaceLimit,'MB')
self.sendMsg('Disk Space Limit ' + str(diskSpaceLimit) + ' MB reached!')
return res
def run(self):
"""
called to start a data run, after the consumer threads have been started.
will display the elapsed time on exit.
To exit, ctrl-c will be handled properly.
"""
startTime = time.time()
try:
while True:
if self.diskSpaceLimitReached():
self.stopEvent.set()
break
time.sleep(10)
except:
pass
finally:
self.stopAll()
elapsedTime = round(time.time()-startTime)
print('Elapsed Time :', elapsedTime, 'seconds')
self.sendMsg('Shutting down!\nElapsed Time : ' + str(elapsedTime) + ' seconds.')
if __name__ == '__main__':
if any(['-h' in sys.argv, '--h' in sys.argv, '--help' in sys.argv]):
print('Usage: $ ./master.py <ports defaults to /dev/ttyUSB0>' )
print('examples;')
print('Usage: $ ./master.py # uses default port /dev/ttyUSB00' )
print('Usage: $ ./master.py /dev/ttyUSB0 # same as previous' )
print('Usage: $ ./master.py /dev/ttyACM0 /dev/ttyUSB0 # use these ports')
print('Note: the AEM board must be running the appropriate software')
sys.exit(0)
if len(sys.argv) < 2:
## instance of Master class running the entire show!
master = Master()
else:
master = Master(sys.argv[1:])
master.run()
|
gpl-3.0
|
tzuhsienli/resume-site
|
resume/urls.py
|
1
|
1524
|
from django.conf.urls import patterns, include, url
from django.contrib.auth.models import User, Group
from rest_framework import viewsets, routers
# from resume.views import hello
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
# We should create our API now from here
#ViewSets define the view behavior.
class UserViewSet(viewsets.ModelViewSet):
model = User
class GroupViewSet(viewsets.ModelViewSet):
model = Group
# Routers provide an easy way of automatically determining the URL conf
router = routers.DefaultRouter()
router.register(r'users', UserViewSet)
router.register(r'groups',GroupViewSet)
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'resume.views.home', name='home'),
# # url(r'^resume/', include('resume.foo.urls')),
#
# # Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
#
# # Uncomment the next line to enable the admin:
# url(r'^admin/', include(admin.site.urls)),
#
# # Custom URL configuration should be added here
# url(r'^hello/$', include('resume.views.hello'), name='hello'),
# url(r'^facet/login$',include('resume.facet.login'), name='login'),
# Wire up our API using automatic URL routing.
# Additionally, we include login URLs for the browseable API.
url(r'^', include(router.urls)),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework'))
)
|
mit
|
IptvBrasilGroup/Cleitonleonelcreton.repository
|
plugin.video.iptvbrondemand.mobile/requestsX/packages/urllib3/packages/six.py
|
2375
|
11628
|
"""Utilities for writing code that runs on Python 2 and 3"""
#Copyright (c) 2010-2011 Benjamin Peterson
#Permission is hereby granted, free of charge, to any person obtaining a copy of
#this software and associated documentation files (the "Software"), to deal in
#the Software without restriction, including without limitation the rights to
#use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
#the Software, and to permit persons to whom the Software is furnished to do so,
#subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
#FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
#COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
#IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
#CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import operator
import sys
import types
__author__ = "Benjamin Peterson <benjamin@python.org>"
__version__ = "1.2.0" # Revision 41c74fef2ded
# True if we are running on Python 3.
PY3 = sys.version_info[0] == 3
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
MAXSIZE = sys.maxsize
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
if sys.platform.startswith("java"):
# Jython always uses 32 bits.
MAXSIZE = int((1 << 31) - 1)
else:
# It's possible to have sizeof(long) != sizeof(Py_ssize_t).
class X(object):
def __len__(self):
return 1 << 31
try:
len(X())
except OverflowError:
# 32-bit
MAXSIZE = int((1 << 31) - 1)
else:
# 64-bit
MAXSIZE = int((1 << 63) - 1)
del X
def _add_doc(func, doc):
"""Add documentation to a function."""
func.__doc__ = doc
def _import_module(name):
"""Import module, returning the module after the last dot."""
__import__(name)
return sys.modules[name]
class _LazyDescr(object):
def __init__(self, name):
self.name = name
def __get__(self, obj, tp):
result = self._resolve()
setattr(obj, self.name, result)
# This is a bit ugly, but it avoids running this again.
delattr(tp, self.name)
return result
class MovedModule(_LazyDescr):
def __init__(self, name, old, new=None):
super(MovedModule, self).__init__(name)
if PY3:
if new is None:
new = name
self.mod = new
else:
self.mod = old
def _resolve(self):
return _import_module(self.mod)
class MovedAttribute(_LazyDescr):
def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
super(MovedAttribute, self).__init__(name)
if PY3:
if new_mod is None:
new_mod = name
self.mod = new_mod
if new_attr is None:
if old_attr is None:
new_attr = name
else:
new_attr = old_attr
self.attr = new_attr
else:
self.mod = old_mod
if old_attr is None:
old_attr = name
self.attr = old_attr
def _resolve(self):
module = _import_module(self.mod)
return getattr(module, self.attr)
class _MovedItems(types.ModuleType):
"""Lazy loading of moved objects"""
_moved_attributes = [
MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
MovedAttribute("map", "itertools", "builtins", "imap", "map"),
MovedAttribute("reload_module", "__builtin__", "imp", "reload"),
MovedAttribute("reduce", "__builtin__", "functools"),
MovedAttribute("StringIO", "StringIO", "io"),
MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
MovedModule("builtins", "__builtin__"),
MovedModule("configparser", "ConfigParser"),
MovedModule("copyreg", "copy_reg"),
MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
MovedModule("http_cookies", "Cookie", "http.cookies"),
MovedModule("html_entities", "htmlentitydefs", "html.entities"),
MovedModule("html_parser", "HTMLParser", "html.parser"),
MovedModule("http_client", "httplib", "http.client"),
MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
MovedModule("cPickle", "cPickle", "pickle"),
MovedModule("queue", "Queue"),
MovedModule("reprlib", "repr"),
MovedModule("socketserver", "SocketServer"),
MovedModule("tkinter", "Tkinter"),
MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
MovedModule("tkinter_colorchooser", "tkColorChooser",
"tkinter.colorchooser"),
MovedModule("tkinter_commondialog", "tkCommonDialog",
"tkinter.commondialog"),
MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
MovedModule("tkinter_font", "tkFont", "tkinter.font"),
MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
"tkinter.simpledialog"),
MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
MovedModule("winreg", "_winreg"),
]
for attr in _moved_attributes:
setattr(_MovedItems, attr.name, attr)
del attr
moves = sys.modules[__name__ + ".moves"] = _MovedItems("moves")
def add_move(move):
"""Add an item to six.moves."""
setattr(_MovedItems, move.name, move)
def remove_move(name):
"""Remove item from six.moves."""
try:
delattr(_MovedItems, name)
except AttributeError:
try:
del moves.__dict__[name]
except KeyError:
raise AttributeError("no such move, %r" % (name,))
if PY3:
_meth_func = "__func__"
_meth_self = "__self__"
_func_code = "__code__"
_func_defaults = "__defaults__"
_iterkeys = "keys"
_itervalues = "values"
_iteritems = "items"
else:
_meth_func = "im_func"
_meth_self = "im_self"
_func_code = "func_code"
_func_defaults = "func_defaults"
_iterkeys = "iterkeys"
_itervalues = "itervalues"
_iteritems = "iteritems"
try:
advance_iterator = next
except NameError:
def advance_iterator(it):
return it.next()
next = advance_iterator
if PY3:
def get_unbound_function(unbound):
return unbound
Iterator = object
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
else:
def get_unbound_function(unbound):
return unbound.im_func
class Iterator(object):
def next(self):
return type(self).__next__(self)
callable = callable
_add_doc(get_unbound_function,
"""Get the function out of a possibly unbound function""")
get_method_function = operator.attrgetter(_meth_func)
get_method_self = operator.attrgetter(_meth_self)
get_function_code = operator.attrgetter(_func_code)
get_function_defaults = operator.attrgetter(_func_defaults)
def iterkeys(d):
"""Return an iterator over the keys of a dictionary."""
return iter(getattr(d, _iterkeys)())
def itervalues(d):
"""Return an iterator over the values of a dictionary."""
return iter(getattr(d, _itervalues)())
def iteritems(d):
"""Return an iterator over the (key, value) pairs of a dictionary."""
return iter(getattr(d, _iteritems)())
if PY3:
def b(s):
return s.encode("latin-1")
def u(s):
return s
if sys.version_info[1] <= 1:
def int2byte(i):
return bytes((i,))
else:
# This is about 2x faster than the implementation above on 3.2+
int2byte = operator.methodcaller("to_bytes", 1, "big")
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
else:
def b(s):
return s
def u(s):
return unicode(s, "unicode_escape")
int2byte = chr
import StringIO
StringIO = BytesIO = StringIO.StringIO
_add_doc(b, """Byte literal""")
_add_doc(u, """Text literal""")
if PY3:
import builtins
exec_ = getattr(builtins, "exec")
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
print_ = getattr(builtins, "print")
del builtins
else:
def exec_(code, globs=None, locs=None):
"""Execute code in a namespace."""
if globs is None:
frame = sys._getframe(1)
globs = frame.f_globals
if locs is None:
locs = frame.f_locals
del frame
elif locs is None:
locs = globs
exec("""exec code in globs, locs""")
exec_("""def reraise(tp, value, tb=None):
raise tp, value, tb
""")
def print_(*args, **kwargs):
"""The new-style print function."""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
_add_doc(reraise, """Reraise an exception.""")
def with_metaclass(meta, base=object):
"""Create a base class with a metaclass."""
return meta("NewBase", (base,), {})
|
gpl-2.0
|
doganaltunbay/odoo
|
addons/hr_timesheet_sheet/hr_timesheet_sheet.py
|
35
|
34024
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from datetime import datetime
from dateutil.relativedelta import relativedelta
from pytz import timezone
import pytz
from openerp.osv import fields, osv
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT
from openerp.tools.translate import _
class hr_timesheet_sheet(osv.osv):
_name = "hr_timesheet_sheet.sheet"
_inherit = "mail.thread"
_table = 'hr_timesheet_sheet_sheet'
_order = "id desc"
_description="Timesheet"
def _total(self, cr, uid, ids, name, args, context=None):
""" Compute the attendances, analytic lines timesheets and differences between them
for all the days of a timesheet and the current day
"""
res = {}
for sheet in self.browse(cr, uid, ids, context=context or {}):
res.setdefault(sheet.id, {
'total_attendance': 0.0,
'total_timesheet': 0.0,
'total_difference': 0.0,
})
for period in sheet.period_ids:
res[sheet.id]['total_attendance'] += period.total_attendance
res[sheet.id]['total_timesheet'] += period.total_timesheet
res[sheet.id]['total_difference'] += period.total_attendance - period.total_timesheet
return res
def check_employee_attendance_state(self, cr, uid, sheet_id, context=None):
ids_signin = self.pool.get('hr.attendance').search(cr,uid,[('sheet_id', '=', sheet_id),('action','=','sign_in')])
ids_signout = self.pool.get('hr.attendance').search(cr,uid,[('sheet_id', '=', sheet_id),('action','=','sign_out')])
if len(ids_signin) != len(ids_signout):
raise osv.except_osv(('Warning!'),_('The timesheet cannot be validated as it does not contain an equal number of sign ins and sign outs.'))
return True
def copy(self, cr, uid, ids, *args, **argv):
raise osv.except_osv(_('Error!'), _('You cannot duplicate a timesheet.'))
def create(self, cr, uid, vals, context=None):
if 'employee_id' in vals:
if not self.pool.get('hr.employee').browse(cr, uid, vals['employee_id'], context=context).user_id:
raise osv.except_osv(_('Error!'), _('In order to create a timesheet for this employee, you must link him/her to a user.'))
if not self.pool.get('hr.employee').browse(cr, uid, vals['employee_id'], context=context).product_id:
raise osv.except_osv(_('Error!'), _('In order to create a timesheet for this employee, you must link the employee to a product, like \'Consultant\'.'))
if not self.pool.get('hr.employee').browse(cr, uid, vals['employee_id'], context=context).journal_id:
raise osv.except_osv(_('Configuration Error!'), _('In order to create a timesheet for this employee, you must assign an analytic journal to the employee, like \'Timesheet Journal\'.'))
if vals.get('attendances_ids'):
# If attendances, we sort them by date asc before writing them, to satisfy the alternance constraint
vals['attendances_ids'] = self.sort_attendances(cr, uid, vals['attendances_ids'], context=context)
return super(hr_timesheet_sheet, self).create(cr, uid, vals, context=context)
def write(self, cr, uid, ids, vals, context=None):
if 'employee_id' in vals:
new_user_id = self.pool.get('hr.employee').browse(cr, uid, vals['employee_id'], context=context).user_id.id or False
if not new_user_id:
raise osv.except_osv(_('Error!'), _('In order to create a timesheet for this employee, you must link him/her to a user.'))
if not self._sheet_date(cr, uid, ids, forced_user_id=new_user_id, context=context):
raise osv.except_osv(_('Error!'), _('You cannot have 2 timesheets that overlap!\nYou should use the menu \'My Timesheet\' to avoid this problem.'))
if not self.pool.get('hr.employee').browse(cr, uid, vals['employee_id'], context=context).product_id:
raise osv.except_osv(_('Error!'), _('In order to create a timesheet for this employee, you must link the employee to a product.'))
if not self.pool.get('hr.employee').browse(cr, uid, vals['employee_id'], context=context).journal_id:
raise osv.except_osv(_('Configuration Error!'), _('In order to create a timesheet for this employee, you must assign an analytic journal to the employee, like \'Timesheet Journal\'.'))
if vals.get('attendances_ids'):
# If attendances, we sort them by date asc before writing them, to satisfy the alternance constraint
# In addition to the date order, deleting attendances are done before inserting attendances
vals['attendances_ids'] = self.sort_attendances(cr, uid, vals['attendances_ids'], context=context)
res = super(hr_timesheet_sheet, self).write(cr, uid, ids, vals, context=context)
if vals.get('attendances_ids'):
for timesheet in self.browse(cr, uid, ids):
if not self.pool['hr.attendance']._altern_si_so(cr, uid, [att.id for att in timesheet.attendances_ids]):
raise osv.except_osv(_('Warning !'), _('Error ! Sign in (resp. Sign out) must follow Sign out (resp. Sign in)'))
return res
def sort_attendances(self, cr, uid, attendance_tuples, context=None):
date_attendances = []
for att_tuple in attendance_tuples:
if att_tuple[0] in [0,1,4]:
if att_tuple[0] in [0,1]:
if att_tuple[2] and att_tuple[2].has_key('name'):
name = att_tuple[2]['name']
else:
name = self.pool['hr.attendance'].browse(cr, uid, att_tuple[1]).name
else:
name = self.pool['hr.attendance'].browse(cr, uid, att_tuple[1]).name
date_attendances.append((1, name, att_tuple))
elif att_tuple[0] in [2,3]:
date_attendances.append((0, self.pool['hr.attendance'].browse(cr, uid, att_tuple[1]).name, att_tuple))
else:
date_attendances.append((0, False, att_tuple))
date_attendances.sort()
return [att[2] for att in date_attendances]
def button_confirm(self, cr, uid, ids, context=None):
for sheet in self.browse(cr, uid, ids, context=context):
if sheet.employee_id and sheet.employee_id.parent_id and sheet.employee_id.parent_id.user_id:
self.message_subscribe_users(cr, uid, [sheet.id], user_ids=[sheet.employee_id.parent_id.user_id.id], context=context)
self.check_employee_attendance_state(cr, uid, sheet.id, context=context)
di = sheet.user_id.company_id.timesheet_max_difference
if (abs(sheet.total_difference) < di) or not di:
sheet.signal_workflow('confirm')
else:
raise osv.except_osv(_('Warning!'), _('Please verify that the total difference of the sheet is lower than %.2f.') %(di,))
return True
def attendance_action_change(self, cr, uid, ids, context=None):
hr_employee = self.pool.get('hr.employee')
employee_ids = []
for sheet in self.browse(cr, uid, ids, context=context):
if sheet.employee_id.id not in employee_ids: employee_ids.append(sheet.employee_id.id)
return hr_employee.attendance_action_change(cr, uid, employee_ids, context=context)
def _count_all(self, cr, uid, ids, field_name, arg, context=None):
Timesheet = self.pool['hr.analytic.timesheet']
Attendance = self.pool['hr.attendance']
return {
sheet_id: {
'timesheet_activity_count': Timesheet.search_count(cr,uid, [('sheet_id','=', sheet_id)], context=context),
'attendance_count': Attendance.search_count(cr,uid, [('sheet_id', '=', sheet_id)], context=context)
}
for sheet_id in ids
}
_columns = {
'name': fields.char('Note', select=1,
states={'confirm':[('readonly', True)], 'done':[('readonly', True)]}),
'employee_id': fields.many2one('hr.employee', 'Employee', required=True),
'user_id': fields.related('employee_id', 'user_id', type="many2one", relation="res.users", store=True, string="User", required=False, readonly=True),#fields.many2one('res.users', 'User', required=True, select=1, states={'confirm':[('readonly', True)], 'done':[('readonly', True)]}),
'date_from': fields.date('Date from', required=True, select=1, readonly=True, states={'new':[('readonly', False)]}),
'date_to': fields.date('Date to', required=True, select=1, readonly=True, states={'new':[('readonly', False)]}),
'timesheet_ids' : fields.one2many('hr.analytic.timesheet', 'sheet_id',
'Timesheet lines',
readonly=True, states={
'draft': [('readonly', False)],
'new': [('readonly', False)]}
),
'attendances_ids' : fields.one2many('hr.attendance', 'sheet_id', 'Attendances'),
'state' : fields.selection([
('new', 'New'),
('draft','Open'),
('confirm','Waiting Approval'),
('done','Approved')], 'Status', select=True, required=True, readonly=True,
help=' * The \'Draft\' status is used when a user is encoding a new and unconfirmed timesheet. \
\n* The \'Confirmed\' status is used for to confirm the timesheet by user. \
\n* The \'Done\' status is used when users timesheet is accepted by his/her senior.'),
'state_attendance' : fields.related('employee_id', 'state', type='selection', selection=[('absent', 'Absent'), ('present', 'Present')], string='Current Status', readonly=True),
'total_attendance': fields.function(_total, method=True, string='Total Attendance', multi="_total"),
'total_timesheet': fields.function(_total, method=True, string='Total Timesheet', multi="_total"),
'total_difference': fields.function(_total, method=True, string='Difference', multi="_total"),
'period_ids': fields.one2many('hr_timesheet_sheet.sheet.day', 'sheet_id', 'Period', readonly=True),
'account_ids': fields.one2many('hr_timesheet_sheet.sheet.account', 'sheet_id', 'Analytic accounts', readonly=True),
'company_id': fields.many2one('res.company', 'Company'),
'department_id':fields.many2one('hr.department','Department'),
'timesheet_activity_count': fields.function(_count_all, type='integer', string='Timesheet Activities', multi=True),
'attendance_count': fields.function(_count_all, type='integer', string="Attendances", multi=True),
}
def _default_date_from(self, cr, uid, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
r = user.company_id and user.company_id.timesheet_range or 'month'
if r=='month':
return time.strftime('%Y-%m-01')
elif r=='week':
return (datetime.today() + relativedelta(weekday=0, days=-6)).strftime('%Y-%m-%d')
elif r=='year':
return time.strftime('%Y-01-01')
return time.strftime('%Y-%m-%d')
def _default_date_to(self, cr, uid, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
r = user.company_id and user.company_id.timesheet_range or 'month'
if r=='month':
return (datetime.today() + relativedelta(months=+1,day=1,days=-1)).strftime('%Y-%m-%d')
elif r=='week':
return (datetime.today() + relativedelta(weekday=6)).strftime('%Y-%m-%d')
elif r=='year':
return time.strftime('%Y-12-31')
return time.strftime('%Y-%m-%d')
def _default_employee(self, cr, uid, context=None):
emp_ids = self.pool.get('hr.employee').search(cr, uid, [('user_id','=',uid)], context=context)
return emp_ids and emp_ids[0] or False
_defaults = {
'date_from' : _default_date_from,
'date_to' : _default_date_to,
'state': 'new',
'employee_id': _default_employee,
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'hr_timesheet_sheet.sheet', context=c)
}
def _sheet_date(self, cr, uid, ids, forced_user_id=False, context=None):
for sheet in self.browse(cr, uid, ids, context=context):
new_user_id = forced_user_id or sheet.user_id and sheet.user_id.id
if new_user_id:
cr.execute('SELECT id \
FROM hr_timesheet_sheet_sheet \
WHERE (date_from <= %s and %s <= date_to) \
AND user_id=%s \
AND id <> %s',(sheet.date_to, sheet.date_from, new_user_id, sheet.id))
if cr.fetchall():
return False
return True
_constraints = [
(_sheet_date, 'You cannot have 2 timesheets that overlap!\nPlease use the menu \'My Current Timesheet\' to avoid this problem.', ['date_from','date_to']),
]
def action_set_to_draft(self, cr, uid, ids, *args):
self.write(cr, uid, ids, {'state': 'draft'})
self.create_workflow(cr, uid, ids)
return True
def name_get(self, cr, uid, ids, context=None):
if not ids:
return []
if isinstance(ids, (long, int)):
ids = [ids]
return [(r['id'], _('Week ')+datetime.strptime(r['date_from'], '%Y-%m-%d').strftime('%U')) \
for r in self.read(cr, uid, ids, ['date_from'],
context=context, load='_classic_write')]
def unlink(self, cr, uid, ids, context=None):
sheets = self.read(cr, uid, ids, ['state','total_attendance'], context=context)
for sheet in sheets:
if sheet['state'] in ('confirm', 'done'):
raise osv.except_osv(_('Invalid Action!'), _('You cannot delete a timesheet which is already confirmed.'))
elif sheet['total_attendance'] <> 0.00:
raise osv.except_osv(_('Invalid Action!'), _('You cannot delete a timesheet which have attendance entries.'))
return super(hr_timesheet_sheet, self).unlink(cr, uid, ids, context=context)
def onchange_employee_id(self, cr, uid, ids, employee_id, context=None):
department_id = False
user_id = False
if employee_id:
empl_id = self.pool.get('hr.employee').browse(cr, uid, employee_id, context=context)
department_id = empl_id.department_id.id
user_id = empl_id.user_id.id
return {'value': {'department_id': department_id, 'user_id': user_id,}}
# ------------------------------------------------
# OpenChatter methods and notifications
# ------------------------------------------------
def _needaction_domain_get(self, cr, uid, context=None):
emp_obj = self.pool.get('hr.employee')
empids = emp_obj.search(cr, uid, [('parent_id.user_id', '=', uid)], context=context)
if not empids:
return False
dom = ['&', ('state', '=', 'confirm'), ('employee_id', 'in', empids)]
return dom
class account_analytic_line(osv.osv):
_inherit = "account.analytic.line"
def _get_default_date(self, cr, uid, context=None):
if context is None:
context = {}
#get the default date (should be: today)
res = super(account_analytic_line, self)._get_default_date(cr, uid, context=context)
#if we got the dates from and to from the timesheet and if the default date is in between, we use the default
#but if the default isn't included in those dates, we use the date start of the timesheet as default
if context.get('timesheet_date_from') and context.get('timesheet_date_to'):
if context['timesheet_date_from'] <= res <= context['timesheet_date_to']:
return res
return context.get('timesheet_date_from')
#if we don't get the dates from the timesheet, we return the default value from super()
return res
class account_analytic_account(osv.osv):
_inherit = "account.analytic.account"
def name_create(self, cr, uid, name, context=None):
if context is None:
context = {}
group_template_required = self.pool['res.users'].has_group(cr, uid, 'account_analytic_analysis.group_template_required')
if not context.get('default_use_timesheets') or group_template_required:
return super(account_analytic_account, self).name_create(cr, uid, name, context=context)
rec_id = self.create(cr, uid, {self._rec_name: name}, context)
return self.name_get(cr, uid, [rec_id], context)[0]
class hr_timesheet_line(osv.osv):
_inherit = "hr.analytic.timesheet"
def _sheet(self, cursor, user, ids, name, args, context=None):
sheet_obj = self.pool.get('hr_timesheet_sheet.sheet')
res = {}.fromkeys(ids, False)
for ts_line in self.browse(cursor, user, ids, context=context):
sheet_ids = sheet_obj.search(cursor, user,
[('date_to', '>=', ts_line.date), ('date_from', '<=', ts_line.date),
('employee_id.user_id', '=', ts_line.user_id.id)],
context=context)
if sheet_ids:
# [0] because only one sheet possible for an employee between 2 dates
res[ts_line.id] = sheet_obj.name_get(cursor, user, sheet_ids, context=context)[0]
return res
def _get_hr_timesheet_sheet(self, cr, uid, ids, context=None):
ts_line_ids = []
for ts in self.browse(cr, uid, ids, context=context):
cr.execute("""
SELECT l.id
FROM hr_analytic_timesheet l
INNER JOIN account_analytic_line al
ON (l.line_id = al.id)
WHERE %(date_to)s >= al.date
AND %(date_from)s <= al.date
AND %(user_id)s = al.user_id
GROUP BY l.id""", {'date_from': ts.date_from,
'date_to': ts.date_to,
'user_id': ts.employee_id.user_id.id,})
ts_line_ids.extend([row[0] for row in cr.fetchall()])
return ts_line_ids
def _get_account_analytic_line(self, cr, uid, ids, context=None):
ts_line_ids = self.pool.get('hr.analytic.timesheet').search(cr, uid, [('line_id', 'in', ids)])
return ts_line_ids
_columns = {
'sheet_id': fields.function(_sheet, string='Sheet', select="1",
type='many2one', relation='hr_timesheet_sheet.sheet', ondelete="cascade",
store={
'hr_timesheet_sheet.sheet': (_get_hr_timesheet_sheet, ['employee_id', 'date_from', 'date_to'], 10),
'account.analytic.line': (_get_account_analytic_line, ['user_id', 'date'], 10),
'hr.analytic.timesheet': (lambda self,cr,uid,ids,context=None: ids, None, 10),
},
),
}
def _check_sheet_state(self, cr, uid, ids, context=None):
if context is None:
context = {}
for timesheet_line in self.browse(cr, uid, ids, context=context):
if timesheet_line.sheet_id and timesheet_line.sheet_id.state not in ('draft', 'new'):
return False
return True
_constraints = [
(_check_sheet_state, 'You cannot modify an entry in a Confirmed/Done timesheet !', ['state']),
]
def unlink(self, cr, uid, ids, *args, **kwargs):
if isinstance(ids, (int, long)):
ids = [ids]
self._check(cr, uid, ids)
return super(hr_timesheet_line,self).unlink(cr, uid, ids,*args, **kwargs)
def _check(self, cr, uid, ids):
for att in self.browse(cr, uid, ids):
if att.sheet_id and att.sheet_id.state not in ('draft', 'new'):
raise osv.except_osv(_('Error!'), _('You cannot modify an entry in a confirmed timesheet.'))
return True
def multi_on_change_account_id(self, cr, uid, ids, account_ids, context=None):
return dict([(el, self.on_change_account_id(cr, uid, ids, el, context.get('user_id', uid))) for el in account_ids])
class hr_attendance(osv.osv):
_inherit = "hr.attendance"
def _get_default_date(self, cr, uid, context=None):
if context is None:
context = {}
if 'name' in context:
return context['name'] + time.strftime(' %H:%M:%S')
return time.strftime('%Y-%m-%d %H:%M:%S')
def _get_hr_timesheet_sheet(self, cr, uid, ids, context=None):
attendance_ids = []
for ts in self.browse(cr, uid, ids, context=context):
cr.execute("""
SELECT a.id
FROM hr_attendance a
INNER JOIN hr_employee e
INNER JOIN resource_resource r
ON (e.resource_id = r.id)
ON (a.employee_id = e.id)
WHERE %(date_to)s >= date_trunc('day', a.name)
AND %(date_from)s <= a.name
AND %(user_id)s = r.user_id
GROUP BY a.id""", {'date_from': ts.date_from,
'date_to': ts.date_to,
'user_id': ts.employee_id.user_id.id,})
attendance_ids.extend([row[0] for row in cr.fetchall()])
return attendance_ids
def _get_attendance_employee_tz(self, cr, uid, employee_id, date, context=None):
""" Simulate timesheet in employee timezone
Return the attendance date in string format in the employee
tz converted from utc timezone as we consider date of employee
timesheet is in employee timezone
"""
employee_obj = self.pool['hr.employee']
tz = False
if employee_id:
employee = employee_obj.browse(cr, uid, employee_id, context=context)
tz = employee.user_id.partner_id.tz
if not date:
date = time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
att_tz = timezone(tz or 'utc')
attendance_dt = datetime.strptime(date, DEFAULT_SERVER_DATETIME_FORMAT)
att_tz_dt = pytz.utc.localize(attendance_dt)
att_tz_dt = att_tz_dt.astimezone(att_tz)
# We take only the date omiting the hours as we compare with timesheet
# date_from which is a date format thus using hours would lead to
# be out of scope of timesheet
att_tz_date_str = datetime.strftime(att_tz_dt, DEFAULT_SERVER_DATE_FORMAT)
return att_tz_date_str
def _get_current_sheet(self, cr, uid, employee_id, date=False, context=None):
sheet_obj = self.pool['hr_timesheet_sheet.sheet']
if not date:
date = time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
att_tz_date_str = self._get_attendance_employee_tz(
cr, uid, employee_id,
date=date, context=context)
sheet_ids = sheet_obj.search(cr, uid,
[('date_from', '<=', att_tz_date_str),
('date_to', '>=', att_tz_date_str),
('employee_id', '=', employee_id)],
limit=1, context=context)
return sheet_ids and sheet_ids[0] or False
def _sheet(self, cursor, user, ids, name, args, context=None):
res = {}.fromkeys(ids, False)
for attendance in self.browse(cursor, user, ids, context=context):
res[attendance.id] = self._get_current_sheet(
cursor, user, attendance.employee_id.id, attendance.name,
context=context)
return res
_columns = {
'sheet_id': fields.function(_sheet, string='Sheet',
type='many2one', relation='hr_timesheet_sheet.sheet',
store={
'hr_timesheet_sheet.sheet': (_get_hr_timesheet_sheet, ['employee_id', 'date_from', 'date_to'], 10),
'hr.attendance': (lambda self,cr,uid,ids,context=None: ids, ['employee_id', 'name', 'day'], 10),
},
)
}
_defaults = {
'name': _get_default_date,
}
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
sheet_id = context.get('sheet_id') or self._get_current_sheet(cr, uid, vals.get('employee_id'), vals.get('name'), context=context)
if sheet_id:
att_tz_date_str = self._get_attendance_employee_tz(
cr, uid, vals.get('employee_id'),
date=vals.get('name'), context=context)
ts = self.pool.get('hr_timesheet_sheet.sheet').browse(cr, uid, sheet_id, context=context)
if ts.state not in ('draft', 'new'):
raise osv.except_osv(_('Error!'), _('You can not enter an attendance in a submitted timesheet. Ask your manager to reset it before adding attendance.'))
elif ts.date_from > att_tz_date_str or ts.date_to < att_tz_date_str:
raise osv.except_osv(_('User Error!'), _('You can not enter an attendance date outside the current timesheet dates.'))
return super(hr_attendance,self).create(cr, uid, vals, context=context)
def unlink(self, cr, uid, ids, *args, **kwargs):
if isinstance(ids, (int, long)):
ids = [ids]
self._check(cr, uid, ids)
return super(hr_attendance,self).unlink(cr, uid, ids,*args, **kwargs)
def write(self, cr, uid, ids, vals, context=None):
if context is None:
context = {}
if isinstance(ids, (int, long)):
ids = [ids]
self._check(cr, uid, ids)
res = super(hr_attendance,self).write(cr, uid, ids, vals, context=context)
if 'sheet_id' in context:
for attendance in self.browse(cr, uid, ids, context=context):
if context['sheet_id'] != attendance.sheet_id.id:
raise osv.except_osv(_('User Error!'), _('You cannot enter an attendance ' \
'date outside the current timesheet dates.'))
return res
def _check(self, cr, uid, ids):
for att in self.browse(cr, uid, ids):
if att.sheet_id and att.sheet_id.state not in ('draft', 'new'):
raise osv.except_osv(_('Error!'), _('You cannot modify an entry in a confirmed timesheet'))
return True
class hr_timesheet_sheet_sheet_day(osv.osv):
_name = "hr_timesheet_sheet.sheet.day"
_description = "Timesheets by Period"
_auto = False
_order='name'
_columns = {
'name': fields.date('Date', readonly=True),
'sheet_id': fields.many2one('hr_timesheet_sheet.sheet', 'Sheet', readonly=True, select="1"),
'total_timesheet': fields.float('Total Timesheet', readonly=True),
'total_attendance': fields.float('Attendance', readonly=True),
'total_difference': fields.float('Difference', readonly=True),
}
_depends = {
'account.analytic.line': ['date', 'unit_amount'],
'hr.analytic.timesheet': ['line_id', 'sheet_id'],
'hr.attendance': ['action', 'name', 'sheet_id'],
}
def init(self, cr):
cr.execute("""create or replace view hr_timesheet_sheet_sheet_day as
SELECT
id,
name,
sheet_id,
total_timesheet,
total_attendance,
cast(round(cast(total_attendance - total_timesheet as Numeric),2) as Double Precision) AS total_difference
FROM
((
SELECT
MAX(id) as id,
name,
sheet_id,
SUM(total_timesheet) as total_timesheet,
CASE WHEN SUM(total_attendance) < 0
THEN (SUM(total_attendance) +
CASE WHEN current_date <> name
THEN 1440
ELSE (EXTRACT(hour FROM current_time AT TIME ZONE 'UTC') * 60) + EXTRACT(minute FROM current_time AT TIME ZONE 'UTC')
END
)
ELSE SUM(total_attendance)
END /60 as total_attendance
FROM
((
select
min(hrt.id) as id,
l.date::date as name,
s.id as sheet_id,
sum(l.unit_amount) as total_timesheet,
0.0 as total_attendance
from
hr_analytic_timesheet hrt
JOIN account_analytic_line l ON l.id = hrt.line_id
LEFT JOIN hr_timesheet_sheet_sheet s ON s.id = hrt.sheet_id
group by l.date::date, s.id
) union (
select
-min(a.id) as id,
a.name::date as name,
s.id as sheet_id,
0.0 as total_timesheet,
SUM(((EXTRACT(hour FROM a.name) * 60) + EXTRACT(minute FROM a.name)) * (CASE WHEN a.action = 'sign_in' THEN -1 ELSE 1 END)) as total_attendance
from
hr_attendance a
LEFT JOIN hr_timesheet_sheet_sheet s
ON s.id = a.sheet_id
WHERE action in ('sign_in', 'sign_out')
group by a.name::date, s.id
)) AS foo
GROUP BY name, sheet_id
)) AS bar""")
class hr_timesheet_sheet_sheet_account(osv.osv):
_name = "hr_timesheet_sheet.sheet.account"
_description = "Timesheets by Period"
_auto = False
_order='name'
_columns = {
'name': fields.many2one('account.analytic.account', 'Project / Analytic Account', readonly=True),
'sheet_id': fields.many2one('hr_timesheet_sheet.sheet', 'Sheet', readonly=True),
'total': fields.float('Total Time', digits=(16,2), readonly=True),
'invoice_rate': fields.many2one('hr_timesheet_invoice.factor', 'Invoice rate', readonly=True),
}
_depends = {
'account.analytic.line': ['account_id', 'date', 'to_invoice', 'unit_amount', 'user_id'],
'hr.analytic.timesheet': ['line_id'],
'hr_timesheet_sheet.sheet': ['date_from', 'date_to', 'user_id'],
}
def init(self, cr):
cr.execute("""create or replace view hr_timesheet_sheet_sheet_account as (
select
min(hrt.id) as id,
l.account_id as name,
s.id as sheet_id,
sum(l.unit_amount) as total,
l.to_invoice as invoice_rate
from
hr_analytic_timesheet hrt
left join (account_analytic_line l
LEFT JOIN hr_timesheet_sheet_sheet s
ON (s.date_to >= l.date
AND s.date_from <= l.date
AND s.user_id = l.user_id))
on (l.id = hrt.line_id)
group by l.account_id, s.id, l.to_invoice
)""")
class res_company(osv.osv):
_inherit = 'res.company'
_columns = {
'timesheet_range': fields.selection(
[('day','Day'),('week','Week'),('month','Month')], 'Timesheet range',
help="Periodicity on which you validate your timesheets."),
'timesheet_max_difference': fields.float('Timesheet allowed difference(Hours)',
help="Allowed difference in hours between the sign in/out and the timesheet " \
"computation for one sheet. Set this to 0 if you do not want any control."),
}
_defaults = {
'timesheet_range': lambda *args: 'week',
'timesheet_max_difference': lambda *args: 0.0
}
class hr_employee(osv.osv):
'''
Employee
'''
_inherit = 'hr.employee'
_description = 'Employee'
def _timesheet_count(self, cr, uid, ids, field_name, arg, context=None):
Sheet = self.pool['hr_timesheet_sheet.sheet']
return {
employee_id: Sheet.search_count(cr,uid, [('employee_id', '=', employee_id)], context=context)
for employee_id in ids
}
_columns = {
'timesheet_count': fields.function(_timesheet_count, type='integer', string='Timesheets'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
wwj718/ANALYSE
|
lms/djangoapps/licenses/models.py
|
150
|
2538
|
import logging
from django.db import models, transaction
from student.models import User
from xmodule_django.models import CourseKeyField
log = logging.getLogger("edx.licenses")
class CourseSoftware(models.Model):
name = models.CharField(max_length=255)
full_name = models.CharField(max_length=255)
url = models.CharField(max_length=255)
course_id = CourseKeyField(max_length=255)
def __unicode__(self):
return u'{0} for {1}'.format(self.name, self.course_id)
class UserLicense(models.Model):
software = models.ForeignKey(CourseSoftware, db_index=True)
user = models.ForeignKey(User, null=True)
serial = models.CharField(max_length=255)
def get_courses_licenses(user, courses):
course_ids = set(course.id for course in courses)
all_software = CourseSoftware.objects.filter(course_id__in=course_ids)
assigned_licenses = UserLicense.objects.filter(software__in=all_software,
user=user)
licenses = dict.fromkeys(all_software, None)
for license in assigned_licenses:
licenses[license.software] = license
log.info(assigned_licenses)
log.info(licenses)
return licenses
def get_license(user, software):
try:
# TODO: temporary fix for when somehow a user got more that one license.
# The proper fix should use Meta.unique_together in the UserLicense model.
licenses = UserLicense.objects.filter(user=user, software=software)
license = licenses[0] if licenses else None
except UserLicense.DoesNotExist:
license = None
return license
def get_or_create_license(user, software):
license = get_license(user, software)
if license is None:
license = _create_license(user, software)
return license
def _create_license(user, software):
license = None
try:
# find one license that has not been assigned, locking the
# table/rows with select_for_update to prevent race conditions
with transaction.commit_on_success():
selected = UserLicense.objects.select_for_update()
license = selected.filter(user__isnull=True, software=software)[0]
license.user = user
license.save()
except IndexError:
# there are no free licenses
log.error('No serial numbers available for %s', software)
license = None
# TODO [rocha]look if someone has unenrolled from the class
# and already has a serial number
return license
|
agpl-3.0
|
scalable-networks/gnuradio-3.7.0.1
|
grc/gui/Element.py
|
5
|
7564
|
"""
Copyright 2007, 2008, 2009 Free Software Foundation, Inc.
This file is part of GNU Radio
GNU Radio Companion is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
GNU Radio Companion is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
"""
from Constants import LINE_SELECT_SENSITIVITY
from Constants import POSSIBLE_ROTATIONS
class Element(object):
"""
GraphicalElement is the base class for all graphical elements.
It contains an X,Y coordinate, a list of rectangular areas that the element occupies,
and methods to detect selection of those areas.
"""
def __init__(self):
"""
Make a new list of rectangular areas and lines, and set the coordinate and the rotation.
"""
self.set_rotation(POSSIBLE_ROTATIONS[0])
self.set_coordinate((0, 0))
self.clear()
self.set_highlighted(False)
def is_horizontal(self, rotation=None):
"""
Is this element horizontal?
If rotation is None, use this element's rotation.
Args:
rotation: the optional rotation
Returns:
true if rotation is horizontal
"""
rotation = rotation or self.get_rotation()
return rotation in (0, 180)
def is_vertical(self, rotation=None):
"""
Is this element vertical?
If rotation is None, use this element's rotation.
Args:
rotation: the optional rotation
Returns:
true if rotation is vertical
"""
rotation = rotation or self.get_rotation()
return rotation in (90, 270)
def create_labels(self):
"""
Create labels (if applicable) and call on all children.
Call this base method before creating labels in the element.
"""
for child in self.get_children(): child.create_labels()
def create_shapes(self):
"""
Create shapes (if applicable) and call on all children.
Call this base method before creating shapes in the element.
"""
self.clear()
for child in self.get_children(): child.create_shapes()
def draw(self, gc, window, border_color, bg_color):
"""
Draw in the given window.
Args:
gc: the graphics context
window: the gtk window to draw on
border_color: the color for lines and rectangle borders
bg_color: the color for the inside of the rectangle
"""
X,Y = self.get_coordinate()
for (rX,rY),(W,H) in self._areas_list:
aX = X + rX
aY = Y + rY
gc.set_foreground(bg_color)
window.draw_rectangle(gc, True, aX, aY, W, H)
gc.set_foreground(border_color)
window.draw_rectangle(gc, False, aX, aY, W, H)
for (x1, y1),(x2, y2) in self._lines_list:
gc.set_foreground(border_color)
window.draw_line(gc, X+x1, Y+y1, X+x2, Y+y2)
def rotate(self, rotation):
"""
Rotate all of the areas by 90 degrees.
Args:
rotation: multiple of 90 degrees
"""
self.set_rotation((self.get_rotation() + rotation)%360)
def clear(self):
"""Empty the lines and areas."""
self._areas_list = list()
self._lines_list = list()
def set_coordinate(self, coor):
"""
Set the reference coordinate.
Args:
coor: the coordinate tuple (x,y)
"""
self.coor = coor
def get_parent(self):
"""
Get the parent of this element.
Returns:
the parent
"""
return self.parent
def set_highlighted(self, highlighted):
"""
Set the highlight status.
Args:
highlighted: true to enable highlighting
"""
self.highlighted = highlighted
def is_highlighted(self):
"""
Get the highlight status.
Returns:
true if highlighted
"""
return self.highlighted
def get_coordinate(self):
"""Get the coordinate.
Returns:
the coordinate tuple (x,y)
"""
return self.coor
def move(self, delta_coor):
"""
Move the element by adding the delta_coor to the current coordinate.
Args:
delta_coor: (delta_x,delta_y) tuple
"""
deltaX, deltaY = delta_coor
X, Y = self.get_coordinate()
self.set_coordinate((X+deltaX, Y+deltaY))
def add_area(self, rel_coor, area):
"""
Add an area to the area list.
An area is actually a coordinate relative to the main coordinate
with a width/height pair relative to the area coordinate.
A positive width is to the right of the coordinate.
A positive height is above the coordinate.
The area is associated with a rotation.
Args:
rel_coor: (x,y) offset from this element's coordinate
area: (width,height) tuple
"""
self._areas_list.append((rel_coor, area))
def add_line(self, rel_coor1, rel_coor2):
"""
Add a line to the line list.
A line is defined by 2 relative coordinates.
Lines must be horizontal or vertical.
The line is associated with a rotation.
Args:
rel_coor1: relative (x1,y1) tuple
rel_coor2: relative (x2,y2) tuple
"""
self._lines_list.append((rel_coor1, rel_coor2))
def what_is_selected(self, coor, coor_m=None):
"""
One coordinate specified:
Is this element selected at given coordinate?
ie: is the coordinate encompassed by one of the areas or lines?
Both coordinates specified:
Is this element within the rectangular region defined by both coordinates?
ie: do any area corners or line endpoints fall within the region?
Args:
coor: the selection coordinate, tuple x, y
coor_m: an additional selection coordinate.
Returns:
self if one of the areas/lines encompasses coor, else None.
"""
#function to test if p is between a and b (inclusive)
in_between = lambda p, a, b: p >= min(a, b) and p <= max(a, b)
#relative coordinate
x, y = [a-b for a,b in zip(coor, self.get_coordinate())]
if coor_m:
x_m, y_m = [a-b for a,b in zip(coor_m, self.get_coordinate())]
#handle rectangular areas
for (x1,y1), (w,h) in self._areas_list:
if in_between(x1, x, x_m) and in_between(y1, y, y_m) or \
in_between(x1+w, x, x_m) and in_between(y1, y, y_m) or \
in_between(x1, x, x_m) and in_between(y1+h, y, y_m) or \
in_between(x1+w, x, x_m) and in_between(y1+h, y, y_m):
return self
#handle horizontal or vertical lines
for (x1, y1), (x2, y2) in self._lines_list:
if in_between(x1, x, x_m) and in_between(y1, y, y_m) or \
in_between(x2, x, x_m) and in_between(y2, y, y_m):
return self
return None
else:
#handle rectangular areas
for (x1,y1), (w,h) in self._areas_list:
if in_between(x, x1, x1+w) and in_between(y, y1, y1+h): return self
#handle horizontal or vertical lines
for (x1, y1), (x2, y2) in self._lines_list:
if x1 == x2: x1, x2 = x1-LINE_SELECT_SENSITIVITY, x2+LINE_SELECT_SENSITIVITY
if y1 == y2: y1, y2 = y1-LINE_SELECT_SENSITIVITY, y2+LINE_SELECT_SENSITIVITY
if in_between(x, x1, x2) and in_between(y, y1, y2): return self
return None
def get_rotation(self):
"""
Get the rotation in degrees.
Returns:
the rotation
"""
return self.rotation
def set_rotation(self, rotation):
"""
Set the rotation in degrees.
Args:
rotation: the rotation"""
if rotation not in POSSIBLE_ROTATIONS:
raise Exception('"%s" is not one of the possible rotations: (%s)'%(rotation, POSSIBLE_ROTATIONS))
self.rotation = rotation
|
gpl-3.0
|
alkemics/luigi
|
examples/wordcount_hadoop.py
|
66
|
2728
|
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import luigi
import luigi.contrib.hadoop
import luigi.contrib.hdfs
# To make this run, you probably want to edit /etc/luigi/client.cfg and add something like:
#
# [hadoop]
# jar: /usr/lib/hadoop-xyz/hadoop-streaming-xyz-123.jar
class InputText(luigi.ExternalTask):
"""
This task is a :py:class:`luigi.task.ExternalTask` which means it doesn't generate the
:py:meth:`~.InputText.output` target on its own instead relying on the execution something outside of Luigi
to produce it.
"""
date = luigi.DateParameter()
def output(self):
"""
Returns the target output for this task.
In this case, it expects a file to be present in HDFS.
:return: the target output for this task.
:rtype: object (:py:class:`luigi.target.Target`)
"""
return luigi.contrib.hdfs.HdfsTarget(self.date.strftime('/tmp/text/%Y-%m-%d.txt'))
class WordCount(luigi.contrib.hadoop.JobTask):
"""
This task runs a :py:class:`luigi.contrib.hadoop.JobTask`
over the target data returned by :py:meth:`~/.InputText.output` and
writes the result into its :py:meth:`~.WordCount.output` target.
This class uses :py:meth:`luigi.contrib.hadoop.JobTask.run`.
"""
date_interval = luigi.DateIntervalParameter()
def requires(self):
"""
This task's dependencies:
* :py:class:`~.InputText`
:return: list of object (:py:class:`luigi.task.Task`)
"""
return [InputText(date) for date in self.date_interval.dates()]
def output(self):
"""
Returns the target output for this task.
In this case, a successful execution of this task will create a file in HDFS.
:return: the target output for this task.
:rtype: object (:py:class:`luigi.target.Target`)
"""
return luigi.contrib.hdfs.HdfsTarget('/tmp/text-count/%s' % self.date_interval)
def mapper(self, line):
for word in line.strip().split():
yield word, 1
def reducer(self, key, values):
yield key, sum(values)
if __name__ == '__main__':
luigi.run()
|
apache-2.0
|
MingyuanXie/CopyNet
|
experiments/synthetic.py
|
1
|
2907
|
__author__ = 'jiataogu'
from emolga.dataset.build_dataset import deserialize_from_file, serialize_to_file
import numpy.random as n_rng
n_rng.seed(19920206)
# the vocabulary
tmp = [chr(x) for x in range(48, 58)] # '1', ... , '9', '0'
voc = [tmp[a] + tmp[b] + tmp[c]
for c in xrange(10)
for b in xrange(10)
for a in xrange(10)]
word2idx = {voc[k]: k + 2 for k in xrange(len(voc))}
word2idx['<eol>'] = 0
word2idx['<unk>'] = 1
idx2word = {word2idx[w]: w for w in word2idx}
voc = ['<eol>', '<unk>'] + voc
# word2idx['X'] = len(voc)
# idx2word[len(voc)] = 'X'
# voc += ['X']
#
# word2idx['Y'] = len(voc)
# idx2word[len(voc)] = 'Y'
# voc += ['Y']
# print word2idx['X'], word2idx['Y']
# load the dataset
Rules, _ = deserialize_from_file('/home/thoma/Work/Dial-DRL/dataset/rules.rnd.n10k.pkl')
num = 200
repeats = 100
maxleg = 15
Lmax = len(idx2word)
rules = dict(source=Rules['source'][:num],
target=Rules['target'][:num])
def ftr(v):
if v < 10:
return '00' + str(v)
elif v < 100:
return '0' + str(v)
else:
return str(v)
def build_instance():
instance = dict(x=[], y=[], source=[], target=[], target_c=[], rule_id=[], rule=[])
for k in xrange(num):
source = rules['source'][k]
target = rules['target'][k]
for j in xrange(repeats):
X = n_rng.randint(1000, size= n_rng.randint(maxleg) + 1)
Y = n_rng.randint(1000, size= n_rng.randint(maxleg) + 1)
S = []
T = []
for w in source:
if w is 'X':
S += [ftr(v) for v in X]
elif w is 'Y':
S += [ftr(v) for v in Y]
else:
S += [w]
for w in target:
if w is 'X':
T += [ftr(v) for v in X]
elif w is 'Y':
T += [ftr(v) for v in Y]
else:
T += [w]
A = [word2idx[w] for w in S]
B = [word2idx[w] for w in T]
C = [0 if w not in S else S.index(w) + Lmax for w in T]
instance['x'] += [S]
instance['y'] += [T]
instance['source'] += [A]
instance['target'] += [B]
instance['target_c'] += [C]
instance['rule_id'] += [k]
instance['rule'] += [' '.join(source) + ' -> ' + ' '.join(target)]
return instance
train_set = build_instance()
print 'build ok.'
test_set = build_instance()
print 'build ok.'
serialize_to_file([train_set, test_set, idx2word, word2idx], '/home/thoma/Work/Dial-DRL/dataset/synthetic_data_c.pkl')
# serialize_to_file([train_set, test_set], '/home/thoma/Work/Dial-DRL/dataset/synthetic_data.pkl')
|
mit
|
yencarnacion/jaikuengine
|
.google_appengine/lib/django-1.4/django/utils/simplejson/encoder.py
|
430
|
15620
|
"""Implementation of JSONEncoder
"""
import re
c_encode_basestring_ascii = None
c_make_encoder = None
ESCAPE = re.compile(r'[\x00-\x1f\\"\b\f\n\r\t]')
ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])')
HAS_UTF8 = re.compile(r'[\x80-\xff]')
ESCAPE_DCT = {
'\\': '\\\\',
'"': '\\"',
'\b': '\\b',
'\f': '\\f',
'\n': '\\n',
'\r': '\\r',
'\t': '\\t',
}
for i in range(0x20):
ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,))
# Assume this produces an infinity on all machines (probably not guaranteed)
INFINITY = float('1e66666')
FLOAT_REPR = repr
def encode_basestring(s):
"""Return a JSON representation of a Python string
"""
def replace(match):
return ESCAPE_DCT[match.group(0)]
return '"' + ESCAPE.sub(replace, s) + '"'
def py_encode_basestring_ascii(s):
"""Return an ASCII-only JSON representation of a Python string
"""
if isinstance(s, str) and HAS_UTF8.search(s) is not None:
s = s.decode('utf-8')
def replace(match):
s = match.group(0)
try:
return ESCAPE_DCT[s]
except KeyError:
n = ord(s)
if n < 0x10000:
return '\\u%04x' % (n,)
else:
# surrogate pair
n -= 0x10000
s1 = 0xd800 | ((n >> 10) & 0x3ff)
s2 = 0xdc00 | (n & 0x3ff)
return '\\u%04x\\u%04x' % (s1, s2)
return '"' + str(ESCAPE_ASCII.sub(replace, s)) + '"'
encode_basestring_ascii = c_encode_basestring_ascii or py_encode_basestring_ascii
class JSONEncoder(object):
"""Extensible JSON <http://json.org> encoder for Python data structures.
Supports the following objects and types by default:
+-------------------+---------------+
| Python | JSON |
+===================+===============+
| dict | object |
+-------------------+---------------+
| list, tuple | array |
+-------------------+---------------+
| str, unicode | string |
+-------------------+---------------+
| int, long, float | number |
+-------------------+---------------+
| True | true |
+-------------------+---------------+
| False | false |
+-------------------+---------------+
| None | null |
+-------------------+---------------+
To extend this to recognize other objects, subclass and implement a
``.default()`` method with another method that returns a serializable
object for ``o`` if possible, otherwise it should call the superclass
implementation (to raise ``TypeError``).
"""
item_separator = ', '
key_separator = ': '
def __init__(self, skipkeys=False, ensure_ascii=True,
check_circular=True, allow_nan=True, sort_keys=False,
indent=None, separators=None, encoding='utf-8', default=None):
"""Constructor for JSONEncoder, with sensible defaults.
If skipkeys is False, then it is a TypeError to attempt
encoding of keys that are not str, int, long, float or None. If
skipkeys is True, such items are simply skipped.
If ensure_ascii is True, the output is guaranteed to be str
objects with all incoming unicode characters escaped. If
ensure_ascii is false, the output will be unicode object.
If check_circular is True, then lists, dicts, and custom encoded
objects will be checked for circular references during encoding to
prevent an infinite recursion (which would cause an OverflowError).
Otherwise, no such check takes place.
If allow_nan is True, then NaN, Infinity, and -Infinity will be
encoded as such. This behavior is not JSON specification compliant,
but is consistent with most JavaScript based encoders and decoders.
Otherwise, it will be a ValueError to encode such floats.
If sort_keys is True, then the output of dictionaries will be
sorted by key; this is useful for regression tests to ensure
that JSON serializations can be compared on a day-to-day basis.
If indent is a non-negative integer, then JSON array
elements and object members will be pretty-printed with that
indent level. An indent level of 0 will only insert newlines.
None is the most compact representation.
If specified, separators should be a (item_separator, key_separator)
tuple. The default is (', ', ': '). To get the most compact JSON
representation you should specify (',', ':') to eliminate whitespace.
If specified, default is a function that gets called for objects
that can't otherwise be serialized. It should return a JSON encodable
version of the object or raise a ``TypeError``.
If encoding is not None, then all input strings will be
transformed into unicode using that encoding prior to JSON-encoding.
The default is UTF-8.
"""
self.skipkeys = skipkeys
self.ensure_ascii = ensure_ascii
self.check_circular = check_circular
self.allow_nan = allow_nan
self.sort_keys = sort_keys
self.indent = indent
if separators is not None:
self.item_separator, self.key_separator = separators
if default is not None:
self.default = default
self.encoding = encoding
def default(self, o):
"""Implement this method in a subclass such that it returns
a serializable object for ``o``, or calls the base implementation
(to raise a ``TypeError``).
For example, to support arbitrary iterators, you could
implement default like this::
def default(self, o):
try:
iterable = iter(o)
except TypeError:
pass
else:
return list(iterable)
return JSONEncoder.default(self, o)
"""
raise TypeError("%r is not JSON serializable" % (o,))
def encode(self, o):
"""Return a JSON string representation of a Python data structure.
>>> JSONEncoder().encode({"foo": ["bar", "baz"]})
'{"foo": ["bar", "baz"]}'
"""
# This is for extremely simple cases and benchmarks.
if isinstance(o, basestring):
if isinstance(o, str):
_encoding = self.encoding
if (_encoding is not None
and not (_encoding == 'utf-8')):
o = o.decode(_encoding)
if self.ensure_ascii:
return encode_basestring_ascii(o)
else:
return encode_basestring(o)
# This doesn't pass the iterator directly to ''.join() because the
# exceptions aren't as detailed. The list call should be roughly
# equivalent to the PySequence_Fast that ''.join() would do.
chunks = self.iterencode(o, _one_shot=True)
if not isinstance(chunks, (list, tuple)):
chunks = list(chunks)
return ''.join(chunks)
def iterencode(self, o, _one_shot=False):
"""Encode the given object and yield each string
representation as available.
For example::
for chunk in JSONEncoder().iterencode(bigobject):
mysocket.write(chunk)
"""
if self.check_circular:
markers = {}
else:
markers = None
if self.ensure_ascii:
_encoder = encode_basestring_ascii
else:
_encoder = encode_basestring
if self.encoding != 'utf-8':
def _encoder(o, _orig_encoder=_encoder, _encoding=self.encoding):
if isinstance(o, str):
o = o.decode(_encoding)
return _orig_encoder(o)
def floatstr(o, allow_nan=self.allow_nan, _repr=FLOAT_REPR, _inf=INFINITY, _neginf=-INFINITY):
# Check for specials. Note that this type of test is processor- and/or
# platform-specific, so do tests which don't depend on the internals.
if o != o:
text = 'NaN'
elif o == _inf:
text = 'Infinity'
elif o == _neginf:
text = '-Infinity'
else:
return _repr(o)
if not allow_nan:
raise ValueError("Out of range float values are not JSON compliant: %r"
% (o,))
return text
if _one_shot and c_make_encoder is not None and not self.indent and not self.sort_keys:
_iterencode = c_make_encoder(
markers, self.default, _encoder, self.indent,
self.key_separator, self.item_separator, self.sort_keys,
self.skipkeys, self.allow_nan)
else:
_iterencode = _make_iterencode(
markers, self.default, _encoder, self.indent, floatstr,
self.key_separator, self.item_separator, self.sort_keys,
self.skipkeys, _one_shot)
return _iterencode(o, 0)
def _make_iterencode(markers, _default, _encoder, _indent, _floatstr, _key_separator, _item_separator, _sort_keys, _skipkeys, _one_shot,
## HACK: hand-optimized bytecode; turn globals into locals
False=False,
True=True,
ValueError=ValueError,
basestring=basestring,
dict=dict,
float=float,
id=id,
int=int,
isinstance=isinstance,
list=list,
long=long,
str=str,
tuple=tuple,
):
def _iterencode_list(lst, _current_indent_level):
if not lst:
yield '[]'
return
if markers is not None:
markerid = id(lst)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = lst
buf = '['
if _indent is not None:
_current_indent_level += 1
newline_indent = '\n' + (' ' * (_indent * _current_indent_level))
separator = _item_separator + newline_indent
buf += newline_indent
else:
newline_indent = None
separator = _item_separator
first = True
for value in lst:
if first:
first = False
else:
buf = separator
if isinstance(value, basestring):
yield buf + _encoder(value)
elif value is None:
yield buf + 'null'
elif value is True:
yield buf + 'true'
elif value is False:
yield buf + 'false'
elif isinstance(value, (int, long)):
yield buf + str(value)
elif isinstance(value, float):
yield buf + _floatstr(value)
else:
yield buf
if isinstance(value, (list, tuple)):
chunks = _iterencode_list(value, _current_indent_level)
elif isinstance(value, dict):
chunks = _iterencode_dict(value, _current_indent_level)
else:
chunks = _iterencode(value, _current_indent_level)
for chunk in chunks:
yield chunk
if newline_indent is not None:
_current_indent_level -= 1
yield '\n' + (' ' * (_indent * _current_indent_level))
yield ']'
if markers is not None:
del markers[markerid]
def _iterencode_dict(dct, _current_indent_level):
if not dct:
yield '{}'
return
if markers is not None:
markerid = id(dct)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = dct
yield '{'
if _indent is not None:
_current_indent_level += 1
newline_indent = '\n' + (' ' * (_indent * _current_indent_level))
item_separator = _item_separator + newline_indent
yield newline_indent
else:
newline_indent = None
item_separator = _item_separator
first = True
if _sort_keys:
items = dct.items()
items.sort(key=lambda kv: kv[0])
else:
items = dct.iteritems()
for key, value in items:
if isinstance(key, basestring):
pass
# JavaScript is weakly typed for these, so it makes sense to
# also allow them. Many encoders seem to do something like this.
elif isinstance(key, float):
key = _floatstr(key)
elif isinstance(key, (int, long)):
key = str(key)
elif key is True:
key = 'true'
elif key is False:
key = 'false'
elif key is None:
key = 'null'
elif _skipkeys:
continue
else:
raise TypeError("key %r is not a string" % (key,))
if first:
first = False
else:
yield item_separator
yield _encoder(key)
yield _key_separator
if isinstance(value, basestring):
yield _encoder(value)
elif value is None:
yield 'null'
elif value is True:
yield 'true'
elif value is False:
yield 'false'
elif isinstance(value, (int, long)):
yield str(value)
elif isinstance(value, float):
yield _floatstr(value)
else:
if isinstance(value, (list, tuple)):
chunks = _iterencode_list(value, _current_indent_level)
elif isinstance(value, dict):
chunks = _iterencode_dict(value, _current_indent_level)
else:
chunks = _iterencode(value, _current_indent_level)
for chunk in chunks:
yield chunk
if newline_indent is not None:
_current_indent_level -= 1
yield '\n' + (' ' * (_indent * _current_indent_level))
yield '}'
if markers is not None:
del markers[markerid]
def _iterencode(o, _current_indent_level):
if isinstance(o, basestring):
yield _encoder(o)
elif o is None:
yield 'null'
elif o is True:
yield 'true'
elif o is False:
yield 'false'
elif isinstance(o, (int, long)):
yield str(o)
elif isinstance(o, float):
yield _floatstr(o)
elif isinstance(o, (list, tuple)):
for chunk in _iterencode_list(o, _current_indent_level):
yield chunk
elif isinstance(o, dict):
for chunk in _iterencode_dict(o, _current_indent_level):
yield chunk
else:
if markers is not None:
markerid = id(o)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = o
o = _default(o)
for chunk in _iterencode(o, _current_indent_level):
yield chunk
if markers is not None:
del markers[markerid]
return _iterencode
|
apache-2.0
|
kastriothaliti/techstitution
|
venv/lib/python3.5/site-packages/pip/_vendor/requests/utils.py
|
319
|
24163
|
# -*- coding: utf-8 -*-
"""
requests.utils
~~~~~~~~~~~~~~
This module provides utility functions that are used within Requests
that are also useful for external consumption.
"""
import cgi
import codecs
import collections
import io
import os
import re
import socket
import struct
import warnings
from . import __version__
from . import certs
from .compat import parse_http_list as _parse_list_header
from .compat import (quote, urlparse, bytes, str, OrderedDict, unquote, is_py2,
builtin_str, getproxies, proxy_bypass, urlunparse,
basestring)
from .cookies import RequestsCookieJar, cookiejar_from_dict
from .structures import CaseInsensitiveDict
from .exceptions import InvalidURL, InvalidHeader, FileModeWarning
_hush_pyflakes = (RequestsCookieJar,)
NETRC_FILES = ('.netrc', '_netrc')
DEFAULT_CA_BUNDLE_PATH = certs.where()
def dict_to_sequence(d):
"""Returns an internal sequence dictionary update."""
if hasattr(d, 'items'):
d = d.items()
return d
def super_len(o):
total_length = 0
current_position = 0
if hasattr(o, '__len__'):
total_length = len(o)
elif hasattr(o, 'len'):
total_length = o.len
elif hasattr(o, 'getvalue'):
# e.g. BytesIO, cStringIO.StringIO
total_length = len(o.getvalue())
elif hasattr(o, 'fileno'):
try:
fileno = o.fileno()
except io.UnsupportedOperation:
pass
else:
total_length = os.fstat(fileno).st_size
# Having used fstat to determine the file length, we need to
# confirm that this file was opened up in binary mode.
if 'b' not in o.mode:
warnings.warn((
"Requests has determined the content-length for this "
"request using the binary size of the file: however, the "
"file has been opened in text mode (i.e. without the 'b' "
"flag in the mode). This may lead to an incorrect "
"content-length. In Requests 3.0, support will be removed "
"for files in text mode."),
FileModeWarning
)
if hasattr(o, 'tell'):
try:
current_position = o.tell()
except (OSError, IOError):
# This can happen in some weird situations, such as when the file
# is actually a special file descriptor like stdin. In this
# instance, we don't know what the length is, so set it to zero and
# let requests chunk it instead.
current_position = total_length
return max(0, total_length - current_position)
def get_netrc_auth(url, raise_errors=False):
"""Returns the Requests tuple auth for a given url from netrc."""
try:
from netrc import netrc, NetrcParseError
netrc_path = None
for f in NETRC_FILES:
try:
loc = os.path.expanduser('~/{0}'.format(f))
except KeyError:
# os.path.expanduser can fail when $HOME is undefined and
# getpwuid fails. See http://bugs.python.org/issue20164 &
# https://github.com/kennethreitz/requests/issues/1846
return
if os.path.exists(loc):
netrc_path = loc
break
# Abort early if there isn't one.
if netrc_path is None:
return
ri = urlparse(url)
# Strip port numbers from netloc. This weird `if...encode`` dance is
# used for Python 3.2, which doesn't support unicode literals.
splitstr = b':'
if isinstance(url, str):
splitstr = splitstr.decode('ascii')
host = ri.netloc.split(splitstr)[0]
try:
_netrc = netrc(netrc_path).authenticators(host)
if _netrc:
# Return with login / password
login_i = (0 if _netrc[0] else 1)
return (_netrc[login_i], _netrc[2])
except (NetrcParseError, IOError):
# If there was a parsing error or a permissions issue reading the file,
# we'll just skip netrc auth unless explicitly asked to raise errors.
if raise_errors:
raise
# AppEngine hackiness.
except (ImportError, AttributeError):
pass
def guess_filename(obj):
"""Tries to guess the filename of the given object."""
name = getattr(obj, 'name', None)
if (name and isinstance(name, basestring) and name[0] != '<' and
name[-1] != '>'):
return os.path.basename(name)
def from_key_val_list(value):
"""Take an object and test to see if it can be represented as a
dictionary. Unless it can not be represented as such, return an
OrderedDict, e.g.,
::
>>> from_key_val_list([('key', 'val')])
OrderedDict([('key', 'val')])
>>> from_key_val_list('string')
ValueError: need more than 1 value to unpack
>>> from_key_val_list({'key': 'val'})
OrderedDict([('key', 'val')])
:rtype: OrderedDict
"""
if value is None:
return None
if isinstance(value, (str, bytes, bool, int)):
raise ValueError('cannot encode objects that are not 2-tuples')
return OrderedDict(value)
def to_key_val_list(value):
"""Take an object and test to see if it can be represented as a
dictionary. If it can be, return a list of tuples, e.g.,
::
>>> to_key_val_list([('key', 'val')])
[('key', 'val')]
>>> to_key_val_list({'key': 'val'})
[('key', 'val')]
>>> to_key_val_list('string')
ValueError: cannot encode objects that are not 2-tuples.
:rtype: list
"""
if value is None:
return None
if isinstance(value, (str, bytes, bool, int)):
raise ValueError('cannot encode objects that are not 2-tuples')
if isinstance(value, collections.Mapping):
value = value.items()
return list(value)
# From mitsuhiko/werkzeug (used with permission).
def parse_list_header(value):
"""Parse lists as described by RFC 2068 Section 2.
In particular, parse comma-separated lists where the elements of
the list may include quoted-strings. A quoted-string could
contain a comma. A non-quoted string could have quotes in the
middle. Quotes are removed automatically after parsing.
It basically works like :func:`parse_set_header` just that items
may appear multiple times and case sensitivity is preserved.
The return value is a standard :class:`list`:
>>> parse_list_header('token, "quoted value"')
['token', 'quoted value']
To create a header from the :class:`list` again, use the
:func:`dump_header` function.
:param value: a string with a list header.
:return: :class:`list`
:rtype: list
"""
result = []
for item in _parse_list_header(value):
if item[:1] == item[-1:] == '"':
item = unquote_header_value(item[1:-1])
result.append(item)
return result
# From mitsuhiko/werkzeug (used with permission).
def parse_dict_header(value):
"""Parse lists of key, value pairs as described by RFC 2068 Section 2 and
convert them into a python dict:
>>> d = parse_dict_header('foo="is a fish", bar="as well"')
>>> type(d) is dict
True
>>> sorted(d.items())
[('bar', 'as well'), ('foo', 'is a fish')]
If there is no value for a key it will be `None`:
>>> parse_dict_header('key_without_value')
{'key_without_value': None}
To create a header from the :class:`dict` again, use the
:func:`dump_header` function.
:param value: a string with a dict header.
:return: :class:`dict`
:rtype: dict
"""
result = {}
for item in _parse_list_header(value):
if '=' not in item:
result[item] = None
continue
name, value = item.split('=', 1)
if value[:1] == value[-1:] == '"':
value = unquote_header_value(value[1:-1])
result[name] = value
return result
# From mitsuhiko/werkzeug (used with permission).
def unquote_header_value(value, is_filename=False):
r"""Unquotes a header value. (Reversal of :func:`quote_header_value`).
This does not use the real unquoting but what browsers are actually
using for quoting.
:param value: the header value to unquote.
:rtype: str
"""
if value and value[0] == value[-1] == '"':
# this is not the real unquoting, but fixing this so that the
# RFC is met will result in bugs with internet explorer and
# probably some other browsers as well. IE for example is
# uploading files with "C:\foo\bar.txt" as filename
value = value[1:-1]
# if this is a filename and the starting characters look like
# a UNC path, then just return the value without quotes. Using the
# replace sequence below on a UNC path has the effect of turning
# the leading double slash into a single slash and then
# _fix_ie_filename() doesn't work correctly. See #458.
if not is_filename or value[:2] != '\\\\':
return value.replace('\\\\', '\\').replace('\\"', '"')
return value
def dict_from_cookiejar(cj):
"""Returns a key/value dictionary from a CookieJar.
:param cj: CookieJar object to extract cookies from.
:rtype: dict
"""
cookie_dict = {}
for cookie in cj:
cookie_dict[cookie.name] = cookie.value
return cookie_dict
def add_dict_to_cookiejar(cj, cookie_dict):
"""Returns a CookieJar from a key/value dictionary.
:param cj: CookieJar to insert cookies into.
:param cookie_dict: Dict of key/values to insert into CookieJar.
:rtype: CookieJar
"""
cj2 = cookiejar_from_dict(cookie_dict)
cj.update(cj2)
return cj
def get_encodings_from_content(content):
"""Returns encodings from given content string.
:param content: bytestring to extract encodings from.
"""
warnings.warn((
'In requests 3.0, get_encodings_from_content will be removed. For '
'more information, please see the discussion on issue #2266. (This'
' warning should only appear once.)'),
DeprecationWarning)
charset_re = re.compile(r'<meta.*?charset=["\']*(.+?)["\'>]', flags=re.I)
pragma_re = re.compile(r'<meta.*?content=["\']*;?charset=(.+?)["\'>]', flags=re.I)
xml_re = re.compile(r'^<\?xml.*?encoding=["\']*(.+?)["\'>]')
return (charset_re.findall(content) +
pragma_re.findall(content) +
xml_re.findall(content))
def get_encoding_from_headers(headers):
"""Returns encodings from given HTTP Header Dict.
:param headers: dictionary to extract encoding from.
:rtype: str
"""
content_type = headers.get('content-type')
if not content_type:
return None
content_type, params = cgi.parse_header(content_type)
if 'charset' in params:
return params['charset'].strip("'\"")
if 'text' in content_type:
return 'ISO-8859-1'
def stream_decode_response_unicode(iterator, r):
"""Stream decodes a iterator."""
if r.encoding is None:
for item in iterator:
yield item
return
decoder = codecs.getincrementaldecoder(r.encoding)(errors='replace')
for chunk in iterator:
rv = decoder.decode(chunk)
if rv:
yield rv
rv = decoder.decode(b'', final=True)
if rv:
yield rv
def iter_slices(string, slice_length):
"""Iterate over slices of a string."""
pos = 0
if slice_length is None or slice_length <= 0:
slice_length = len(string)
while pos < len(string):
yield string[pos:pos + slice_length]
pos += slice_length
def get_unicode_from_response(r):
"""Returns the requested content back in unicode.
:param r: Response object to get unicode content from.
Tried:
1. charset from content-type
2. fall back and replace all unicode characters
:rtype: str
"""
warnings.warn((
'In requests 3.0, get_unicode_from_response will be removed. For '
'more information, please see the discussion on issue #2266. (This'
' warning should only appear once.)'),
DeprecationWarning)
tried_encodings = []
# Try charset from content-type
encoding = get_encoding_from_headers(r.headers)
if encoding:
try:
return str(r.content, encoding)
except UnicodeError:
tried_encodings.append(encoding)
# Fall back:
try:
return str(r.content, encoding, errors='replace')
except TypeError:
return r.content
# The unreserved URI characters (RFC 3986)
UNRESERVED_SET = frozenset(
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
+ "0123456789-._~")
def unquote_unreserved(uri):
"""Un-escape any percent-escape sequences in a URI that are unreserved
characters. This leaves all reserved, illegal and non-ASCII bytes encoded.
:rtype: str
"""
parts = uri.split('%')
for i in range(1, len(parts)):
h = parts[i][0:2]
if len(h) == 2 and h.isalnum():
try:
c = chr(int(h, 16))
except ValueError:
raise InvalidURL("Invalid percent-escape sequence: '%s'" % h)
if c in UNRESERVED_SET:
parts[i] = c + parts[i][2:]
else:
parts[i] = '%' + parts[i]
else:
parts[i] = '%' + parts[i]
return ''.join(parts)
def requote_uri(uri):
"""Re-quote the given URI.
This function passes the given URI through an unquote/quote cycle to
ensure that it is fully and consistently quoted.
:rtype: str
"""
safe_with_percent = "!#$%&'()*+,/:;=?@[]~"
safe_without_percent = "!#$&'()*+,/:;=?@[]~"
try:
# Unquote only the unreserved characters
# Then quote only illegal characters (do not quote reserved,
# unreserved, or '%')
return quote(unquote_unreserved(uri), safe=safe_with_percent)
except InvalidURL:
# We couldn't unquote the given URI, so let's try quoting it, but
# there may be unquoted '%'s in the URI. We need to make sure they're
# properly quoted so they do not cause issues elsewhere.
return quote(uri, safe=safe_without_percent)
def address_in_network(ip, net):
"""This function allows you to check if on IP belongs to a network subnet
Example: returns True if ip = 192.168.1.1 and net = 192.168.1.0/24
returns False if ip = 192.168.1.1 and net = 192.168.100.0/24
:rtype: bool
"""
ipaddr = struct.unpack('=L', socket.inet_aton(ip))[0]
netaddr, bits = net.split('/')
netmask = struct.unpack('=L', socket.inet_aton(dotted_netmask(int(bits))))[0]
network = struct.unpack('=L', socket.inet_aton(netaddr))[0] & netmask
return (ipaddr & netmask) == (network & netmask)
def dotted_netmask(mask):
"""Converts mask from /xx format to xxx.xxx.xxx.xxx
Example: if mask is 24 function returns 255.255.255.0
:rtype: str
"""
bits = 0xffffffff ^ (1 << 32 - mask) - 1
return socket.inet_ntoa(struct.pack('>I', bits))
def is_ipv4_address(string_ip):
"""
:rtype: bool
"""
try:
socket.inet_aton(string_ip)
except socket.error:
return False
return True
def is_valid_cidr(string_network):
"""
Very simple check of the cidr format in no_proxy variable.
:rtype: bool
"""
if string_network.count('/') == 1:
try:
mask = int(string_network.split('/')[1])
except ValueError:
return False
if mask < 1 or mask > 32:
return False
try:
socket.inet_aton(string_network.split('/')[0])
except socket.error:
return False
else:
return False
return True
def should_bypass_proxies(url):
"""
Returns whether we should bypass proxies or not.
:rtype: bool
"""
get_proxy = lambda k: os.environ.get(k) or os.environ.get(k.upper())
# First check whether no_proxy is defined. If it is, check that the URL
# we're getting isn't in the no_proxy list.
no_proxy = get_proxy('no_proxy')
netloc = urlparse(url).netloc
if no_proxy:
# We need to check whether we match here. We need to see if we match
# the end of the netloc, both with and without the port.
no_proxy = (
host for host in no_proxy.replace(' ', '').split(',') if host
)
ip = netloc.split(':')[0]
if is_ipv4_address(ip):
for proxy_ip in no_proxy:
if is_valid_cidr(proxy_ip):
if address_in_network(ip, proxy_ip):
return True
elif ip == proxy_ip:
# If no_proxy ip was defined in plain IP notation instead of cidr notation &
# matches the IP of the index
return True
else:
for host in no_proxy:
if netloc.endswith(host) or netloc.split(':')[0].endswith(host):
# The URL does match something in no_proxy, so we don't want
# to apply the proxies on this URL.
return True
# If the system proxy settings indicate that this URL should be bypassed,
# don't proxy.
# The proxy_bypass function is incredibly buggy on macOS in early versions
# of Python 2.6, so allow this call to fail. Only catch the specific
# exceptions we've seen, though: this call failing in other ways can reveal
# legitimate problems.
try:
bypass = proxy_bypass(netloc)
except (TypeError, socket.gaierror):
bypass = False
if bypass:
return True
return False
def get_environ_proxies(url):
"""
Return a dict of environment proxies.
:rtype: dict
"""
if should_bypass_proxies(url):
return {}
else:
return getproxies()
def select_proxy(url, proxies):
"""Select a proxy for the url, if applicable.
:param url: The url being for the request
:param proxies: A dictionary of schemes or schemes and hosts to proxy URLs
"""
proxies = proxies or {}
urlparts = urlparse(url)
if urlparts.hostname is None:
return proxies.get('all', proxies.get(urlparts.scheme))
proxy_keys = [
'all://' + urlparts.hostname,
'all',
urlparts.scheme + '://' + urlparts.hostname,
urlparts.scheme,
]
proxy = None
for proxy_key in proxy_keys:
if proxy_key in proxies:
proxy = proxies[proxy_key]
break
return proxy
def default_user_agent(name="python-requests"):
"""
Return a string representing the default user agent.
:rtype: str
"""
return '%s/%s' % (name, __version__)
def default_headers():
"""
:rtype: requests.structures.CaseInsensitiveDict
"""
return CaseInsensitiveDict({
'User-Agent': default_user_agent(),
'Accept-Encoding': ', '.join(('gzip', 'deflate')),
'Accept': '*/*',
'Connection': 'keep-alive',
})
def parse_header_links(value):
"""Return a dict of parsed link headers proxies.
i.e. Link: <http:/.../front.jpeg>; rel=front; type="image/jpeg",<http://.../back.jpeg>; rel=back;type="image/jpeg"
:rtype: list
"""
links = []
replace_chars = ' \'"'
for val in re.split(', *<', value):
try:
url, params = val.split(';', 1)
except ValueError:
url, params = val, ''
link = {'url': url.strip('<> \'"')}
for param in params.split(';'):
try:
key, value = param.split('=')
except ValueError:
break
link[key.strip(replace_chars)] = value.strip(replace_chars)
links.append(link)
return links
# Null bytes; no need to recreate these on each call to guess_json_utf
_null = '\x00'.encode('ascii') # encoding to ASCII for Python 3
_null2 = _null * 2
_null3 = _null * 3
def guess_json_utf(data):
"""
:rtype: str
"""
# JSON always starts with two ASCII characters, so detection is as
# easy as counting the nulls and from their location and count
# determine the encoding. Also detect a BOM, if present.
sample = data[:4]
if sample in (codecs.BOM_UTF32_LE, codecs.BOM32_BE):
return 'utf-32' # BOM included
if sample[:3] == codecs.BOM_UTF8:
return 'utf-8-sig' # BOM included, MS style (discouraged)
if sample[:2] in (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE):
return 'utf-16' # BOM included
nullcount = sample.count(_null)
if nullcount == 0:
return 'utf-8'
if nullcount == 2:
if sample[::2] == _null2: # 1st and 3rd are null
return 'utf-16-be'
if sample[1::2] == _null2: # 2nd and 4th are null
return 'utf-16-le'
# Did not detect 2 valid UTF-16 ascii-range characters
if nullcount == 3:
if sample[:3] == _null3:
return 'utf-32-be'
if sample[1:] == _null3:
return 'utf-32-le'
# Did not detect a valid UTF-32 ascii-range character
return None
def prepend_scheme_if_needed(url, new_scheme):
"""Given a URL that may or may not have a scheme, prepend the given scheme.
Does not replace a present scheme with the one provided as an argument.
:rtype: str
"""
scheme, netloc, path, params, query, fragment = urlparse(url, new_scheme)
# urlparse is a finicky beast, and sometimes decides that there isn't a
# netloc present. Assume that it's being over-cautious, and switch netloc
# and path if urlparse decided there was no netloc.
if not netloc:
netloc, path = path, netloc
return urlunparse((scheme, netloc, path, params, query, fragment))
def get_auth_from_url(url):
"""Given a url with authentication components, extract them into a tuple of
username,password.
:rtype: (str,str)
"""
parsed = urlparse(url)
try:
auth = (unquote(parsed.username), unquote(parsed.password))
except (AttributeError, TypeError):
auth = ('', '')
return auth
def to_native_string(string, encoding='ascii'):
"""Given a string object, regardless of type, returns a representation of
that string in the native string type, encoding and decoding where
necessary. This assumes ASCII unless told otherwise.
"""
if isinstance(string, builtin_str):
out = string
else:
if is_py2:
out = string.encode(encoding)
else:
out = string.decode(encoding)
return out
# Moved outside of function to avoid recompile every call
_CLEAN_HEADER_REGEX_BYTE = re.compile(b'^\\S[^\\r\\n]*$|^$')
_CLEAN_HEADER_REGEX_STR = re.compile(r'^\S[^\r\n]*$|^$')
def check_header_validity(header):
"""Verifies that header value is a string which doesn't contain
leading whitespace or return characters. This prevents unintended
header injection.
:param header: tuple, in the format (name, value).
"""
name, value = header
if isinstance(value, bytes):
pat = _CLEAN_HEADER_REGEX_BYTE
else:
pat = _CLEAN_HEADER_REGEX_STR
try:
if not pat.match(value):
raise InvalidHeader("Invalid return character or leading space in header: %s" % name)
except TypeError:
raise InvalidHeader("Header value %s must be of type str or bytes, "
"not %s" % (value, type(value)))
def urldefragauth(url):
"""
Given a url remove the fragment and the authentication part.
:rtype: str
"""
scheme, netloc, path, params, query, fragment = urlparse(url)
# see func:`prepend_scheme_if_needed`
if not netloc:
netloc, path = path, netloc
netloc = netloc.rsplit('@', 1)[-1]
return urlunparse((scheme, netloc, path, params, query, ''))
|
gpl-3.0
|
ekalosak/boto
|
tests/unit/cloudformation/test_connection.py
|
70
|
30604
|
#!/usr/bin/env python
import unittest
from datetime import datetime
from mock import Mock
from tests.unit import AWSMockServiceTestCase
from boto.cloudformation.connection import CloudFormationConnection
from boto.exception import BotoServerError
from boto.compat import json
SAMPLE_TEMPLATE = r"""
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "Sample template",
"Parameters" : {
"KeyName" : {
"Description" : "key pair",
"Type" : "String"
}
},
"Resources" : {
"Ec2Instance" : {
"Type" : "AWS::EC2::Instance",
"Properties" : {
"KeyName" : { "Ref" : "KeyName" },
"ImageId" : "ami-7f418316",
"UserData" : { "Fn::Base64" : "80" }
}
}
},
"Outputs" : {
"InstanceId" : {
"Description" : "InstanceId of the newly created EC2 instance",
"Value" : { "Ref" : "Ec2Instance" }
}
}
"""
class CloudFormationConnectionBase(AWSMockServiceTestCase):
connection_class = CloudFormationConnection
def setUp(self):
super(CloudFormationConnectionBase, self).setUp()
self.stack_id = u'arn:aws:cloudformation:us-east-1:18:stack/Name/id'
class TestCloudFormationCreateStack(CloudFormationConnectionBase):
def default_body(self):
return json.dumps(
{u'CreateStackResponse':
{u'CreateStackResult': {u'StackId': self.stack_id},
u'ResponseMetadata': {u'RequestId': u'1'}}}).encode('utf-8')
def test_create_stack_has_correct_request_params(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.create_stack(
'stack_name', template_url='http://url',
template_body=SAMPLE_TEMPLATE,
parameters=[('KeyName', 'myKeyName')],
tags={'TagKey': 'TagValue'},
notification_arns=['arn:notify1', 'arn:notify2'],
disable_rollback=True,
timeout_in_minutes=20, capabilities=['CAPABILITY_IAM']
)
self.assertEqual(api_response, self.stack_id)
# These are the parameters that are actually sent to the CloudFormation
# service.
self.assert_request_parameters({
'Action': 'CreateStack',
'Capabilities.member.1': 'CAPABILITY_IAM',
'ContentType': 'JSON',
'DisableRollback': 'true',
'NotificationARNs.member.1': 'arn:notify1',
'NotificationARNs.member.2': 'arn:notify2',
'Parameters.member.1.ParameterKey': 'KeyName',
'Parameters.member.1.ParameterValue': 'myKeyName',
'Tags.member.1.Key': 'TagKey',
'Tags.member.1.Value': 'TagValue',
'StackName': 'stack_name',
'Version': '2010-05-15',
'TimeoutInMinutes': 20,
'TemplateBody': SAMPLE_TEMPLATE,
'TemplateURL': 'http://url',
})
# The test_create_stack_has_correct_request_params verified all of the
# params needed when making a create_stack service call. The rest of the
# tests for create_stack only verify specific parts of the params sent
# to CloudFormation.
def test_create_stack_with_minimum_args(self):
# This will fail in practice, but the API docs only require stack_name.
self.set_http_response(status_code=200)
api_response = self.service_connection.create_stack('stack_name')
self.assertEqual(api_response, self.stack_id)
self.assert_request_parameters({
'Action': 'CreateStack',
'ContentType': 'JSON',
'DisableRollback': 'false',
'StackName': 'stack_name',
'Version': '2010-05-15',
})
def test_create_stack_fails(self):
self.set_http_response(status_code=400, reason='Bad Request',
body=b'{"Error": {"Code": 1, "Message": "Invalid arg."}}')
with self.assertRaisesRegexp(self.service_connection.ResponseError,
'Invalid arg.'):
api_response = self.service_connection.create_stack(
'stack_name', template_body=SAMPLE_TEMPLATE,
parameters=[('KeyName', 'myKeyName')])
def test_create_stack_fail_error(self):
self.set_http_response(status_code=400, reason='Bad Request',
body=b'{"RequestId": "abc", "Error": {"Code": 1, "Message": "Invalid arg."}}')
try:
api_response = self.service_connection.create_stack(
'stack_name', template_body=SAMPLE_TEMPLATE,
parameters=[('KeyName', 'myKeyName')])
except BotoServerError as e:
self.assertEqual('abc', e.request_id)
self.assertEqual(1, e.error_code)
self.assertEqual('Invalid arg.', e.message)
class TestCloudFormationUpdateStack(CloudFormationConnectionBase):
def default_body(self):
return json.dumps(
{u'UpdateStackResponse':
{u'UpdateStackResult': {u'StackId': self.stack_id},
u'ResponseMetadata': {u'RequestId': u'1'}}}).encode('utf-8')
def test_update_stack_all_args(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.update_stack(
'stack_name', template_url='http://url',
template_body=SAMPLE_TEMPLATE,
parameters=[('KeyName', 'myKeyName'), ('KeyName2', "", True),
('KeyName3', "", False), ('KeyName4', None, True),
('KeyName5', "Ignore Me", True)],
tags={'TagKey': 'TagValue'},
notification_arns=['arn:notify1', 'arn:notify2'],
disable_rollback=True,
timeout_in_minutes=20,
use_previous_template=True
)
self.assert_request_parameters({
'Action': 'UpdateStack',
'ContentType': 'JSON',
'DisableRollback': 'true',
'NotificationARNs.member.1': 'arn:notify1',
'NotificationARNs.member.2': 'arn:notify2',
'Parameters.member.1.ParameterKey': 'KeyName',
'Parameters.member.1.ParameterValue': 'myKeyName',
'Parameters.member.2.ParameterKey': 'KeyName2',
'Parameters.member.2.UsePreviousValue': 'true',
'Parameters.member.3.ParameterKey': 'KeyName3',
'Parameters.member.3.ParameterValue': '',
'Parameters.member.4.UsePreviousValue': 'true',
'Parameters.member.4.ParameterKey': 'KeyName4',
'Parameters.member.5.UsePreviousValue': 'true',
'Parameters.member.5.ParameterKey': 'KeyName5',
'Tags.member.1.Key': 'TagKey',
'Tags.member.1.Value': 'TagValue',
'StackName': 'stack_name',
'Version': '2010-05-15',
'TimeoutInMinutes': 20,
'TemplateBody': SAMPLE_TEMPLATE,
'TemplateURL': 'http://url',
'UsePreviousTemplate': 'true',
})
def test_update_stack_with_minimum_args(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.update_stack('stack_name')
self.assertEqual(api_response, self.stack_id)
self.assert_request_parameters({
'Action': 'UpdateStack',
'ContentType': 'JSON',
'DisableRollback': 'false',
'StackName': 'stack_name',
'Version': '2010-05-15',
})
def test_update_stack_fails(self):
self.set_http_response(status_code=400, reason='Bad Request',
body=b'Invalid arg.')
with self.assertRaises(self.service_connection.ResponseError):
api_response = self.service_connection.update_stack(
'stack_name', template_body=SAMPLE_TEMPLATE,
parameters=[('KeyName', 'myKeyName')])
class TestCloudFormationDeleteStack(CloudFormationConnectionBase):
def default_body(self):
return json.dumps(
{u'DeleteStackResponse':
{u'ResponseMetadata': {u'RequestId': u'1'}}}).encode('utf-8')
def test_delete_stack(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.delete_stack('stack_name')
self.assertEqual(api_response, json.loads(self.default_body().decode('utf-8')))
self.assert_request_parameters({
'Action': 'DeleteStack',
'ContentType': 'JSON',
'StackName': 'stack_name',
'Version': '2010-05-15',
})
def test_delete_stack_fails(self):
self.set_http_response(status_code=400)
with self.assertRaises(self.service_connection.ResponseError):
api_response = self.service_connection.delete_stack('stack_name')
class TestCloudFormationDescribeStackResource(CloudFormationConnectionBase):
def default_body(self):
return json.dumps('fake server response').encode('utf-8')
def test_describe_stack_resource(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.describe_stack_resource(
'stack_name', 'resource_id')
self.assertEqual(api_response, 'fake server response')
self.assert_request_parameters({
'Action': 'DescribeStackResource',
'ContentType': 'JSON',
'LogicalResourceId': 'resource_id',
'StackName': 'stack_name',
'Version': '2010-05-15',
})
def test_describe_stack_resource_fails(self):
self.set_http_response(status_code=400)
with self.assertRaises(self.service_connection.ResponseError):
api_response = self.service_connection.describe_stack_resource(
'stack_name', 'resource_id')
class TestCloudFormationGetTemplate(CloudFormationConnectionBase):
def default_body(self):
return json.dumps('fake server response').encode('utf-8')
def test_get_template(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.get_template('stack_name')
self.assertEqual(api_response, 'fake server response')
self.assert_request_parameters({
'Action': 'GetTemplate',
'ContentType': 'JSON',
'StackName': 'stack_name',
'Version': '2010-05-15',
})
def test_get_template_fails(self):
self.set_http_response(status_code=400)
with self.assertRaises(self.service_connection.ResponseError):
api_response = self.service_connection.get_template('stack_name')
class TestCloudFormationGetStackevents(CloudFormationConnectionBase):
def default_body(self):
return b"""
<DescribeStackEventsResult>
<StackEvents>
<member>
<EventId>Event-1-Id</EventId>
<StackId>arn:aws:cfn:us-east-1:1:stack</StackId>
<StackName>MyStack</StackName>
<LogicalResourceId>MyStack</LogicalResourceId>
<PhysicalResourceId>MyStack_One</PhysicalResourceId>
<ResourceType>AWS::CloudFormation::Stack</ResourceType>
<Timestamp>2010-07-27T22:26:28Z</Timestamp>
<ResourceStatus>CREATE_IN_PROGRESS</ResourceStatus>
<ResourceStatusReason>User initiated</ResourceStatusReason>
</member>
<member>
<EventId>Event-2-Id</EventId>
<StackId>arn:aws:cfn:us-east-1:1:stack</StackId>
<StackName>MyStack</StackName>
<LogicalResourceId>MySG1</LogicalResourceId>
<PhysicalResourceId>MyStack_SG1</PhysicalResourceId>
<ResourceType>AWS::SecurityGroup</ResourceType>
<Timestamp>2010-07-27T22:28:28Z</Timestamp>
<ResourceStatus>CREATE_COMPLETE</ResourceStatus>
</member>
</StackEvents>
</DescribeStackEventsResult>
"""
def test_describe_stack_events(self):
self.set_http_response(status_code=200)
first, second = self.service_connection.describe_stack_events('stack_name', next_token='next_token')
self.assertEqual(first.event_id, 'Event-1-Id')
self.assertEqual(first.logical_resource_id, 'MyStack')
self.assertEqual(first.physical_resource_id, 'MyStack_One')
self.assertEqual(first.resource_properties, None)
self.assertEqual(first.resource_status, 'CREATE_IN_PROGRESS')
self.assertEqual(first.resource_status_reason, 'User initiated')
self.assertEqual(first.resource_type, 'AWS::CloudFormation::Stack')
self.assertEqual(first.stack_id, 'arn:aws:cfn:us-east-1:1:stack')
self.assertEqual(first.stack_name, 'MyStack')
self.assertIsNotNone(first.timestamp)
self.assertEqual(second.event_id, 'Event-2-Id')
self.assertEqual(second.logical_resource_id, 'MySG1')
self.assertEqual(second.physical_resource_id, 'MyStack_SG1')
self.assertEqual(second.resource_properties, None)
self.assertEqual(second.resource_status, 'CREATE_COMPLETE')
self.assertEqual(second.resource_status_reason, None)
self.assertEqual(second.resource_type, 'AWS::SecurityGroup')
self.assertEqual(second.stack_id, 'arn:aws:cfn:us-east-1:1:stack')
self.assertEqual(second.stack_name, 'MyStack')
self.assertIsNotNone(second.timestamp)
self.assert_request_parameters({
'Action': 'DescribeStackEvents',
'NextToken': 'next_token',
'StackName': 'stack_name',
'Version': '2010-05-15',
})
class TestCloudFormationDescribeStackResources(CloudFormationConnectionBase):
def default_body(self):
return b"""
<DescribeStackResourcesResult>
<StackResources>
<member>
<StackId>arn:aws:cfn:us-east-1:1:stack</StackId>
<StackName>MyStack</StackName>
<LogicalResourceId>MyDBInstance</LogicalResourceId>
<PhysicalResourceId>MyStack_DB1</PhysicalResourceId>
<ResourceType>AWS::DBInstance</ResourceType>
<Timestamp>2010-07-27T22:27:28Z</Timestamp>
<ResourceStatus>CREATE_COMPLETE</ResourceStatus>
</member>
<member>
<StackId>arn:aws:cfn:us-east-1:1:stack</StackId>
<StackName>MyStack</StackName>
<LogicalResourceId>MyAutoScalingGroup</LogicalResourceId>
<PhysicalResourceId>MyStack_ASG1</PhysicalResourceId>
<ResourceType>AWS::AutoScalingGroup</ResourceType>
<Timestamp>2010-07-27T22:28:28Z</Timestamp>
<ResourceStatus>CREATE_IN_PROGRESS</ResourceStatus>
</member>
</StackResources>
</DescribeStackResourcesResult>
"""
def test_describe_stack_resources(self):
self.set_http_response(status_code=200)
first, second = self.service_connection.describe_stack_resources(
'stack_name', 'logical_resource_id', 'physical_resource_id')
self.assertEqual(first.description, None)
self.assertEqual(first.logical_resource_id, 'MyDBInstance')
self.assertEqual(first.physical_resource_id, 'MyStack_DB1')
self.assertEqual(first.resource_status, 'CREATE_COMPLETE')
self.assertEqual(first.resource_status_reason, None)
self.assertEqual(first.resource_type, 'AWS::DBInstance')
self.assertEqual(first.stack_id, 'arn:aws:cfn:us-east-1:1:stack')
self.assertEqual(first.stack_name, 'MyStack')
self.assertIsNotNone(first.timestamp)
self.assertEqual(second.description, None)
self.assertEqual(second.logical_resource_id, 'MyAutoScalingGroup')
self.assertEqual(second.physical_resource_id, 'MyStack_ASG1')
self.assertEqual(second.resource_status, 'CREATE_IN_PROGRESS')
self.assertEqual(second.resource_status_reason, None)
self.assertEqual(second.resource_type, 'AWS::AutoScalingGroup')
self.assertEqual(second.stack_id, 'arn:aws:cfn:us-east-1:1:stack')
self.assertEqual(second.stack_name, 'MyStack')
self.assertIsNotNone(second.timestamp)
self.assert_request_parameters({
'Action': 'DescribeStackResources',
'LogicalResourceId': 'logical_resource_id',
'PhysicalResourceId': 'physical_resource_id',
'StackName': 'stack_name',
'Version': '2010-05-15',
})
class TestCloudFormationDescribeStacks(CloudFormationConnectionBase):
def default_body(self):
return b"""
<DescribeStacksResponse>
<DescribeStacksResult>
<Stacks>
<member>
<StackId>arn:aws:cfn:us-east-1:1:stack</StackId>
<StackStatus>CREATE_COMPLETE</StackStatus>
<StackName>MyStack</StackName>
<StackStatusReason/>
<Description>My Description</Description>
<CreationTime>2012-05-16T22:55:31Z</CreationTime>
<Capabilities>
<member>CAPABILITY_IAM</member>
</Capabilities>
<NotificationARNs>
<member>arn:aws:sns:region-name:account-name:topic-name</member>
</NotificationARNs>
<DisableRollback>false</DisableRollback>
<Parameters>
<member>
<ParameterValue>MyValue</ParameterValue>
<ParameterKey>MyKey</ParameterKey>
</member>
</Parameters>
<Outputs>
<member>
<OutputValue>http://url/</OutputValue>
<Description>Server URL</Description>
<OutputKey>ServerURL</OutputKey>
</member>
</Outputs>
<Tags>
<member>
<Key>MyTagKey</Key>
<Value>MyTagValue</Value>
</member>
</Tags>
</member>
</Stacks>
</DescribeStacksResult>
<ResponseMetadata>
<RequestId>12345</RequestId>
</ResponseMetadata>
</DescribeStacksResponse>
"""
def test_describe_stacks(self):
self.set_http_response(status_code=200)
stacks = self.service_connection.describe_stacks('MyStack')
self.assertEqual(len(stacks), 1)
stack = stacks[0]
self.assertEqual(stack.creation_time,
datetime(2012, 5, 16, 22, 55, 31))
self.assertEqual(stack.description, 'My Description')
self.assertEqual(stack.disable_rollback, False)
self.assertEqual(stack.stack_id, 'arn:aws:cfn:us-east-1:1:stack')
self.assertEqual(stack.stack_status, 'CREATE_COMPLETE')
self.assertEqual(stack.stack_name, 'MyStack')
self.assertEqual(stack.stack_name_reason, None)
self.assertEqual(stack.timeout_in_minutes, None)
self.assertEqual(len(stack.outputs), 1)
self.assertEqual(stack.outputs[0].description, 'Server URL')
self.assertEqual(stack.outputs[0].key, 'ServerURL')
self.assertEqual(stack.outputs[0].value, 'http://url/')
self.assertEqual(len(stack.parameters), 1)
self.assertEqual(stack.parameters[0].key, 'MyKey')
self.assertEqual(stack.parameters[0].value, 'MyValue')
self.assertEqual(len(stack.capabilities), 1)
self.assertEqual(stack.capabilities[0].value, 'CAPABILITY_IAM')
self.assertEqual(len(stack.notification_arns), 1)
self.assertEqual(stack.notification_arns[0].value, 'arn:aws:sns:region-name:account-name:topic-name')
self.assertEqual(len(stack.tags), 1)
self.assertEqual(stack.tags['MyTagKey'], 'MyTagValue')
self.assert_request_parameters({
'Action': 'DescribeStacks',
'StackName': 'MyStack',
'Version': '2010-05-15',
})
class TestCloudFormationListStackResources(CloudFormationConnectionBase):
def default_body(self):
return b"""
<ListStackResourcesResponse>
<ListStackResourcesResult>
<StackResourceSummaries>
<member>
<ResourceStatus>CREATE_COMPLETE</ResourceStatus>
<LogicalResourceId>SampleDB</LogicalResourceId>
<LastUpdatedTime>2011-06-21T20:25:57Z</LastUpdatedTime>
<PhysicalResourceId>My-db-ycx</PhysicalResourceId>
<ResourceType>AWS::RDS::DBInstance</ResourceType>
</member>
<member>
<ResourceStatus>CREATE_COMPLETE</ResourceStatus>
<LogicalResourceId>CPUAlarmHigh</LogicalResourceId>
<LastUpdatedTime>2011-06-21T20:29:23Z</LastUpdatedTime>
<PhysicalResourceId>MyStack-CPUH-PF</PhysicalResourceId>
<ResourceType>AWS::CloudWatch::Alarm</ResourceType>
</member>
</StackResourceSummaries>
</ListStackResourcesResult>
<ResponseMetadata>
<RequestId>2d06e36c-ac1d-11e0-a958-f9382b6eb86b</RequestId>
</ResponseMetadata>
</ListStackResourcesResponse>
"""
def test_list_stack_resources(self):
self.set_http_response(status_code=200)
resources = self.service_connection.list_stack_resources('MyStack',
next_token='next_token')
self.assertEqual(len(resources), 2)
self.assertEqual(resources[0].last_updated_time,
datetime(2011, 6, 21, 20, 25, 57))
self.assertEqual(resources[0].logical_resource_id, 'SampleDB')
self.assertEqual(resources[0].physical_resource_id, 'My-db-ycx')
self.assertEqual(resources[0].resource_status, 'CREATE_COMPLETE')
self.assertEqual(resources[0].resource_status_reason, None)
self.assertEqual(resources[0].resource_type, 'AWS::RDS::DBInstance')
self.assertEqual(resources[1].last_updated_time,
datetime(2011, 6, 21, 20, 29, 23))
self.assertEqual(resources[1].logical_resource_id, 'CPUAlarmHigh')
self.assertEqual(resources[1].physical_resource_id, 'MyStack-CPUH-PF')
self.assertEqual(resources[1].resource_status, 'CREATE_COMPLETE')
self.assertEqual(resources[1].resource_status_reason, None)
self.assertEqual(resources[1].resource_type, 'AWS::CloudWatch::Alarm')
self.assert_request_parameters({
'Action': 'ListStackResources',
'NextToken': 'next_token',
'StackName': 'MyStack',
'Version': '2010-05-15',
})
class TestCloudFormationListStacks(CloudFormationConnectionBase):
def default_body(self):
return b"""
<ListStacksResponse>
<ListStacksResult>
<StackSummaries>
<member>
<StackId>arn:aws:cfn:us-east-1:1:stack/Test1/aa</StackId>
<StackStatus>CREATE_IN_PROGRESS</StackStatus>
<StackName>vpc1</StackName>
<CreationTime>2011-05-23T15:47:44Z</CreationTime>
<TemplateDescription>My Description.</TemplateDescription>
</member>
</StackSummaries>
</ListStacksResult>
</ListStacksResponse>
"""
def test_list_stacks(self):
self.set_http_response(status_code=200)
stacks = self.service_connection.list_stacks(['CREATE_IN_PROGRESS'],
next_token='next_token')
self.assertEqual(len(stacks), 1)
self.assertEqual(stacks[0].stack_id,
'arn:aws:cfn:us-east-1:1:stack/Test1/aa')
self.assertEqual(stacks[0].stack_status, 'CREATE_IN_PROGRESS')
self.assertEqual(stacks[0].stack_name, 'vpc1')
self.assertEqual(stacks[0].creation_time,
datetime(2011, 5, 23, 15, 47, 44))
self.assertEqual(stacks[0].deletion_time, None)
self.assertEqual(stacks[0].template_description, 'My Description.')
self.assert_request_parameters({
'Action': 'ListStacks',
'NextToken': 'next_token',
'StackStatusFilter.member.1': 'CREATE_IN_PROGRESS',
'Version': '2010-05-15',
})
class TestCloudFormationValidateTemplate(CloudFormationConnectionBase):
def default_body(self):
return b"""
<ValidateTemplateResponse xmlns="http://cloudformation.amazonaws.com/doc/2010-05-15/">
<ValidateTemplateResult>
<Description>My Description.</Description>
<Parameters>
<member>
<NoEcho>false</NoEcho>
<ParameterKey>InstanceType</ParameterKey>
<Description>Type of instance to launch</Description>
<DefaultValue>m1.small</DefaultValue>
</member>
<member>
<NoEcho>false</NoEcho>
<ParameterKey>KeyName</ParameterKey>
<Description>EC2 KeyPair</Description>
</member>
</Parameters>
<CapabilitiesReason>Reason</CapabilitiesReason>
<Capabilities>
<member>CAPABILITY_IAM</member>
</Capabilities>
</ValidateTemplateResult>
<ResponseMetadata>
<RequestId>0be7b6e8-e4a0-11e0-a5bd-9f8d5a7dbc91</RequestId>
</ResponseMetadata>
</ValidateTemplateResponse>
"""
def test_validate_template(self):
self.set_http_response(status_code=200)
template = self.service_connection.validate_template(template_body=SAMPLE_TEMPLATE,
template_url='http://url')
self.assertEqual(template.description, 'My Description.')
self.assertEqual(len(template.template_parameters), 2)
param1, param2 = template.template_parameters
self.assertEqual(param1.default_value, 'm1.small')
self.assertEqual(param1.description, 'Type of instance to launch')
self.assertEqual(param1.no_echo, True)
self.assertEqual(param1.parameter_key, 'InstanceType')
self.assertEqual(param2.default_value, None)
self.assertEqual(param2.description, 'EC2 KeyPair')
self.assertEqual(param2.no_echo, True)
self.assertEqual(param2.parameter_key, 'KeyName')
self.assertEqual(template.capabilities_reason, 'Reason')
self.assertEqual(len(template.capabilities), 1)
self.assertEqual(template.capabilities[0].value, 'CAPABILITY_IAM')
self.assert_request_parameters({
'Action': 'ValidateTemplate',
'TemplateBody': SAMPLE_TEMPLATE,
'TemplateURL': 'http://url',
'Version': '2010-05-15',
})
class TestCloudFormationCancelUpdateStack(CloudFormationConnectionBase):
def default_body(self):
return b"""<CancelUpdateStackResult/>"""
def test_cancel_update_stack(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.cancel_update_stack('stack_name')
self.assertEqual(api_response, True)
self.assert_request_parameters({
'Action': 'CancelUpdateStack',
'StackName': 'stack_name',
'Version': '2010-05-15',
})
class TestCloudFormationEstimateTemplateCost(CloudFormationConnectionBase):
def default_body(self):
return b"""
{
"EstimateTemplateCostResponse": {
"EstimateTemplateCostResult": {
"Url": "http://calculator.s3.amazonaws.com/calc5.html?key=cf-2e351785-e821-450c-9d58-625e1e1ebfb6"
}
}
}
"""
def test_estimate_template_cost(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.estimate_template_cost(
template_body='{}')
self.assertEqual(api_response,
'http://calculator.s3.amazonaws.com/calc5.html?key=cf-2e351785-e821-450c-9d58-625e1e1ebfb6')
self.assert_request_parameters({
'Action': 'EstimateTemplateCost',
'ContentType': 'JSON',
'TemplateBody': '{}',
'Version': '2010-05-15',
})
class TestCloudFormationGetStackPolicy(CloudFormationConnectionBase):
def default_body(self):
return b"""
{
"GetStackPolicyResponse": {
"GetStackPolicyResult": {
"StackPolicyBody": "{...}"
}
}
}
"""
def test_get_stack_policy(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.get_stack_policy('stack-id')
self.assertEqual(api_response, '{...}')
self.assert_request_parameters({
'Action': 'GetStackPolicy',
'ContentType': 'JSON',
'StackName': 'stack-id',
'Version': '2010-05-15',
})
class TestCloudFormationSetStackPolicy(CloudFormationConnectionBase):
def default_body(self):
return b"""
{
"SetStackPolicyResponse": {
"SetStackPolicyResult": {
"Some": "content"
}
}
}
"""
def test_set_stack_policy(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.set_stack_policy('stack-id',
stack_policy_body='{}')
self.assertDictEqual(api_response, {'SetStackPolicyResult': {'Some': 'content'}})
self.assert_request_parameters({
'Action': 'SetStackPolicy',
'ContentType': 'JSON',
'StackName': 'stack-id',
'StackPolicyBody': '{}',
'Version': '2010-05-15',
})
if __name__ == '__main__':
unittest.main()
|
mit
|
QianBIG/odoo
|
addons/l10n_be_hr_payroll/__openerp__.py
|
312
|
1872
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 OpenERP SA (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Belgium - Payroll',
'category': 'Localization',
'author': 'OpenERP SA',
'depends': ['hr_payroll'],
'version': '1.0',
'description': """
Belgian Payroll Rules.
======================
* Employee Details
* Employee Contracts
* Passport based Contract
* Allowances/Deductions
* Allow to configure Basic/Gross/Net Salary
* Employee Payslip
* Monthly Payroll Register
* Integrated with Holiday Management
* Salary Maj, ONSS, Withholding Tax, Child Allowance, ...
""",
'auto_install': False,
'demo': ['l10n_be_hr_payroll_demo.xml'],
'website': 'https://www.odoo.com/page/accounting',
'data':[
'l10n_be_hr_payroll_view.xml',
'l10n_be_hr_payroll_data.xml',
'data/hr.salary.rule.csv',
],
'installable': True
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
davehorton/drachtio-server
|
deps/boost_1_69_0/tools/build/test/project_dependencies.py
|
7
|
1160
|
#!/usr/bin/python
# Copyright 2003 Dave Abrahams
# Copyright 2002, 2003, 2004 Vladimir Prus
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
# Test that we can specify a dependency property in project requirements, and
# that it will not cause every main target in the project to be generated in its
# own subdirectory.
# The whole test is somewhat moot now.
import BoostBuild
t = BoostBuild.Tester(use_test_config=False)
t.write("jamroot.jam", "build-project src ;")
t.write("lib/jamfile.jam", "lib lib1 : lib1.cpp ;")
t.write("lib/lib1.cpp", """
#ifdef _WIN32
__declspec(dllexport)
#endif
void foo() {}\n
""")
t.write("src/jamfile.jam", """
project : requirements <library>../lib//lib1 ;
exe a : a.cpp ;
exe b : b.cpp ;
""")
t.write("src/a.cpp", """
#ifdef _WIN32
__declspec(dllimport)
#endif
void foo();
int main() { foo(); }
""")
t.copy("src/a.cpp", "src/b.cpp")
t.run_build_system()
# Test that there is no "main-target-a" part.
# t.expect_addition("src/bin/$toolset/debug*/a.exe")
# t.expect_addition("src/bin/$toolset/debug*/b.exe")
t.cleanup()
|
mit
|
strint/tensorflow
|
tensorflow/contrib/distributions/python/kernel_tests/bijectors/identity_test.py
|
22
|
1831
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Identity Tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.distributions.python.ops.bijectors import bijector_test_util
from tensorflow.contrib.distributions.python.ops.bijectors import identity as identity_lib
from tensorflow.python.platform import test
class IdentityBijectorTest(test.TestCase):
"""Tests correctness of the Y = g(X) = X transformation."""
def testBijector(self):
with self.test_session():
bijector = identity_lib.Identity()
self.assertEqual("identity", bijector.name)
x = [[[0.], [1.]]]
self.assertAllEqual(x, bijector.forward(x).eval())
self.assertAllEqual(x, bijector.inverse(x).eval())
self.assertAllEqual(0., bijector.inverse_log_det_jacobian(x).eval())
self.assertAllEqual(0., bijector.forward_log_det_jacobian(x).eval())
def testScalarCongruency(self):
with self.test_session():
bijector = identity_lib.Identity()
bijector_test_util.assert_scalar_congruency(
bijector, lower_x=-2., upper_x=2.)
if __name__ == "__main__":
test.main()
|
apache-2.0
|
cdr-stats/cdr-stats
|
install/conf/settings_local.py
|
2
|
5199
|
#
# CDR-Stats License
# http://www.cdr-stats.org
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (C) 2011-2015 Star2Billing S.L.
#
# The Initial Developer of the Original Code is
# Arezqui Belaid <info@star2billing.com>
#
import os
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
TIME_ZONE = 'Europe/Madrid'
APPLICATION_DIR = os.path.dirname(globals()['__file__'])
# DATABASE SETTINGS
# =================
DATABASES = {
'default': {
# Add 'postgresql_psycopg2','postgresql','mysql','sqlite3','oracle'
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'DATABASENAME',
'USER': 'DB_USERNAME',
'PASSWORD': 'DB_PASSWORD',
'HOST': 'DB_HOSTNAME',
'PORT': 'DB_PORT',
'OPTIONS': {
'autocommit': True,
}
},
'import_cdr': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'CDRPUSHER_DBNAME',
'USER': 'DB_USERNAME',
'PASSWORD': 'DB_PASSWORD',
'HOST': 'DB_HOSTNAME',
'PORT': 'DB_PORT',
'OPTIONS': {
'autocommit': True,
}
}
}
# CACHES
# ======
CACHES = {
'default': {
# 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
# 'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache',
'BACKEND': 'redis_cache.RedisCache',
'LOCATION': '127.0.0.1:6379',
# 'LOCATION': '/var/tmp/django_cache',
'TIMEOUT': '600', # 600 secs
}
}
# EMAIL BACKEND
# =============
# Email configuration
DEFAULT_FROM_EMAIL = 'CDR-Stats <cdr-stats@localhost.com>'
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_USE_TLS = True
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_PORT = 587
EMAIL_HOST_USER = 'username@gmail.com'
EMAIL_HOST_PASSWORD = 'password'
EMAIL_SUBJECT_PREFIX = '[CDR-Stats] '
ALLOWED_HOSTS = ['SERVER_IP']
# GENERAL
# =======
# PREFIX_LIMIT_MIN & PREFIX_LIMIT_MAX are used to know
# how many digits are used to match against the dialcode prefix database
PREFIX_LIMIT_MIN = 2
PREFIX_LIMIT_MAX = 5
# If PN is lower than PN_MIN_DIGITS it will be considered as an extension
# If PN is longer than PN_MIN_DIGITS but lower than PN_MAX_DIGITS then
# The PN will be considered as local call and the LOCAL_DIALCODE will be added
LOCAL_DIALCODE = 1 # Set the Dialcode of your country (44 for UK, 1 for US)
PN_MIN_DIGITS = 6
PN_MAX_DIGITS = 9
# List of phonenumber prefix to ignore, this will be remove prior analysis
PREFIX_TO_IGNORE = "+,0,00,000,0000,00000,011,55555,99999"
# When the PN len is less or equal to INTERNAL_CALL, the call will be considered
# as a internal call, for example when dialed number is 41200 and INTERNAL_CALL=5
INTERNAL_CALL = 5
# Realtime Graph : set the Y axis limit
REALTIME_Y_AXIS_LIMIT = 300
# LOGGING
# =======
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'root': {
'level': 'DEBUG',
'handlers': ['default'],
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'simple': {
'format': '%(asctime)s %(levelname)s || %(message)s'
},
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
# Include the default Django email handler for errors
# This is what you'd get without configuring logging at all.
'mail_admins': {
'class': 'django.utils.log.AdminEmailHandler',
'level': 'ERROR',
'filters': ['require_debug_false'],
'include_html': True,
},
'default': {
'class': 'logging.handlers.WatchedFileHandler',
'filename': '/var/log/cdr-stats/cdr-stats.log',
'formatter': 'verbose',
},
'default-db': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': '/var/log/cdr-stats/cdr-stats-db.log',
'maxBytes': 1024 * 1024 * 5, # 5 MB
'backupCount': 20,
'formatter': 'verbose',
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose'
},
},
'loggers': {
# Again, default Django configuration to email unhandled exceptions
'django': {
'handlers': ['default'],
'propagate': False,
'level': 'DEBUG',
},
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
'cdr-stats.filelog': {
'handlers': ['default'],
'level': 'DEBUG',
'propagate': False,
},
'django.db.backends': {
'handlers': ['default-db'],
'level': 'DEBUG',
'propagate': False,
},
},
}
|
mpl-2.0
|
rversteegen/commandergenius
|
project/jni/python/src/Lib/test/test_errno.py
|
58
|
1178
|
#! /usr/bin/env python
"""Test the errno module
Roger E. Masse
"""
import errno
from test import test_support
import unittest
std_c_errors = frozenset(['EDOM', 'ERANGE'])
class ErrnoAttributeTests(unittest.TestCase):
def test_for_improper_attributes(self):
# No unexpected attributes should be on the module.
for error_code in std_c_errors:
self.assert_(hasattr(errno, error_code),
"errno is missing %s" % error_code)
def test_using_errorcode(self):
# Every key value in errno.errorcode should be on the module.
for value in errno.errorcode.itervalues():
self.assert_(hasattr(errno, value), 'no %s attr in errno' % value)
class ErrorcodeTests(unittest.TestCase):
def test_attributes_in_errorcode(self):
for attribute in errno.__dict__.iterkeys():
if attribute.isupper():
self.assert_(getattr(errno, attribute) in errno.errorcode,
'no %s attr in errno.errorcode' % attribute)
def test_main():
test_support.run_unittest(ErrnoAttributeTests, ErrorcodeTests)
if __name__ == '__main__':
test_main()
|
lgpl-2.1
|
jordy33/yowsup
|
yowsup/layers/protocol_presence/layer.py
|
61
|
1175
|
from yowsup.layers import YowLayer, YowLayerEvent, YowProtocolLayer
from .protocolentities import *
from yowsup.layers.protocol_iq.protocolentities import ErrorIqProtocolEntity
class YowPresenceProtocolLayer(YowProtocolLayer):
def __init__(self):
handleMap = {
"presence": (self.recvPresence, self.sendPresence),
"iq": (None, self.sendIq)
}
super(YowPresenceProtocolLayer, self).__init__(handleMap)
def __str__(self):
return "Presence Layer"
def sendPresence(self, entity):
self.entityToLower(entity)
def recvPresence(self, node):
self.toUpper(PresenceProtocolEntity.fromProtocolTreeNode(node))
def sendIq(self, entity):
if entity.getXmlns() == LastseenIqProtocolEntity.XMLNS:
self._sendIq(entity, self.onLastSeenSuccess, self.onLastSeenError)
def onLastSeenSuccess(self, protocolTreeNode, lastSeenEntity):
self.toUpper(ResultLastseenIqProtocolEntity.fromProtocolTreeNode(protocolTreeNode))
def onLastSeenError(self, protocolTreeNode, lastSeenEntity):
self.toUpper(ErrorIqProtocolEntity.fromProtocolTreeNode(protocolTreeNode))
|
gpl-3.0
|
tinkerinestudio/Tinkerine-Suite
|
TinkerineSuite/pypy/lib-python/2.7/user.py
|
313
|
1627
|
"""Hook to allow user-specified customization code to run.
As a policy, Python doesn't run user-specified code on startup of
Python programs (interactive sessions execute the script specified in
the PYTHONSTARTUP environment variable if it exists).
However, some programs or sites may find it convenient to allow users
to have a standard customization file, which gets run when a program
requests it. This module implements such a mechanism. A program
that wishes to use the mechanism must execute the statement
import user
The user module looks for a file .pythonrc.py in the user's home
directory and if it can be opened, execfile()s it in its own global
namespace. Errors during this phase are not caught; that's up to the
program that imports the user module, if it wishes.
The user's .pythonrc.py could conceivably test for sys.version if it
wishes to do different things depending on the Python version.
"""
from warnings import warnpy3k
warnpy3k("the user module has been removed in Python 3.0", stacklevel=2)
del warnpy3k
import os
home = os.curdir # Default
if 'HOME' in os.environ:
home = os.environ['HOME']
elif os.name == 'posix':
home = os.path.expanduser("~/")
elif os.name == 'nt': # Contributed by Jeff Bauer
if 'HOMEPATH' in os.environ:
if 'HOMEDRIVE' in os.environ:
home = os.environ['HOMEDRIVE'] + os.environ['HOMEPATH']
else:
home = os.environ['HOMEPATH']
pythonrc = os.path.join(home, ".pythonrc.py")
try:
f = open(pythonrc)
except IOError:
pass
else:
f.close()
execfile(pythonrc)
|
agpl-3.0
|
wong2/sentry
|
src/sentry/migrations/0017_auto__add_field_projectmember_api_key.py
|
36
|
10324
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'ProjectMember.api_key'
db.add_column('sentry_projectmember', 'api_key', self.gf('django.db.models.fields.CharField')(max_length=32, unique=True, null=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'ProjectMember.api_key'
db.delete_column('sentry_projectmember', 'api_key')
models = {
'sentry.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sentry.filtervalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'FilterValue'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.groupedmessage': {
'Meta': {'unique_together': "(('project', 'logger', 'view', 'checksum'),)", 'object_name': 'GroupedMessage'},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'class_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'}),
'traceback': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'view': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'sentry.message': {
'Meta': {'object_name': 'Message'},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'class_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'message_set'", 'null': 'True', 'to': "orm['sentry.GroupedMessage']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'message_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'server_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'site': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'traceback': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'view': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'sentry.messagecountbyminute': {
'Meta': {'unique_together': "(('project', 'group', 'date'),)", 'object_name': 'MessageCountByMinute'},
'date': ('django.db.models.fields.DateTimeField', [], {}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.GroupedMessage']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.messagefiltervalue': {
'Meta': {'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'MessageFilterValue'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.GroupedMessage']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.messageindex': {
'Meta': {'unique_together': "(('column', 'value', 'object_id'),)", 'object_name': 'MessageIndex'},
'column': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'sentry.project': {
'Meta': {'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'owner': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'owned_project_set'", 'to': "orm['sentry.User']"}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'sentry.projectmember': {
'Meta': {'unique_together': "(('project', 'user'),)", 'object_name': 'ProjectMember'},
'api_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'permissions': ('django.db.models.fields.BigIntegerField', [], {}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'project_set'", 'to': "orm['sentry.User']"})
}
}
complete_apps = ['sentry']
|
bsd-3-clause
|
catapult-project/catapult-csm
|
systrace/systrace/tracing_agents/agents_unittest.py
|
8
|
1650
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from systrace import util
from devil.android import device_utils
from devil.android.sdk import intent
from devil.android.sdk import keyevent
class BaseAgentTest(unittest.TestCase):
def setUp(self):
devices = device_utils.DeviceUtils.HealthyDevices()
self.browser = 'stable'
self.package_info = util.get_supported_browsers()[self.browser]
self.device = devices[0]
curr_browser = self.GetChromeProcessID()
if curr_browser == None:
self.StartBrowser()
def tearDown(self):
# Stop the browser after each test to ensure that it doesn't interfere
# with subsequent tests, e.g. by holding the devtools socket open.
self.device.ForceStop(self.package_info.package)
def StartBrowser(self):
# Turn on the device screen.
self.device.SetScreen(True)
# Unlock device.
self.device.SendKeyEvent(keyevent.KEYCODE_MENU)
# Start browser.
self.device.StartActivity(
intent.Intent(activity=self.package_info.activity,
package=self.package_info.package,
data='about:blank',
extras={'create_new_tab': True}),
blocking=True, force_stop=True)
def GetChromeProcessID(self):
chrome_processes = self.device.GetPids(self.package_info.package)
if (self.package_info.package in chrome_processes and
len(chrome_processes[self.package_info.package]) > 0):
return chrome_processes[self.package_info.package][0]
return None
|
bsd-3-clause
|
alextruberg/custom_django
|
django/contrib/sites/models.py
|
114
|
3549
|
from __future__ import unicode_literals
import string
from django.db import models
from django.db.models.signals import pre_save, pre_delete
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import python_2_unicode_compatible
from django.core.exceptions import ValidationError
SITE_CACHE = {}
def _simple_domain_name_validator(value):
"""
Validates that the given value contains no whitespaces to prevent common
typos.
"""
if not value:
return
checks = ((s in value) for s in string.whitespace)
if any(checks):
raise ValidationError(
_("The domain name cannot contain any spaces or tabs."),
code='invalid',
)
class SiteManager(models.Manager):
def get_current(self):
"""
Returns the current ``Site`` based on the SITE_ID in the
project's settings. The ``Site`` object is cached the first
time it's retrieved from the database.
"""
from django.conf import settings
try:
sid = settings.SITE_ID
except AttributeError:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("You're using the Django \"sites framework\" without having set the SITE_ID setting. Create a site in your database and set the SITE_ID setting to fix this error.")
try:
current_site = SITE_CACHE[sid]
except KeyError:
current_site = self.get(pk=sid)
SITE_CACHE[sid] = current_site
return current_site
def clear_cache(self):
"""Clears the ``Site`` object cache."""
global SITE_CACHE
SITE_CACHE = {}
@python_2_unicode_compatible
class Site(models.Model):
domain = models.CharField(_('domain name'), max_length=100,
validators=[_simple_domain_name_validator])
name = models.CharField(_('display name'), max_length=50)
objects = SiteManager()
class Meta:
db_table = 'django_site'
verbose_name = _('site')
verbose_name_plural = _('sites')
ordering = ('domain',)
def __str__(self):
return self.domain
@python_2_unicode_compatible
class RequestSite(object):
"""
A class that shares the primary interface of Site (i.e., it has
``domain`` and ``name`` attributes) but gets its data from a Django
HttpRequest object rather than from a database.
The save() and delete() methods raise NotImplementedError.
"""
def __init__(self, request):
self.domain = self.name = request.get_host()
def __str__(self):
return self.domain
def save(self, force_insert=False, force_update=False):
raise NotImplementedError('RequestSite cannot be saved.')
def delete(self):
raise NotImplementedError('RequestSite cannot be deleted.')
def get_current_site(request):
"""
Checks if contrib.sites is installed and returns either the current
``Site`` object or a ``RequestSite`` object based on the request.
"""
if Site._meta.installed:
current_site = Site.objects.get_current()
else:
current_site = RequestSite(request)
return current_site
def clear_site_cache(sender, **kwargs):
"""
Clears the cache (if primed) each time a site is saved or deleted
"""
instance = kwargs['instance']
try:
del SITE_CACHE[instance.pk]
except KeyError:
pass
pre_save.connect(clear_site_cache, sender=Site)
pre_delete.connect(clear_site_cache, sender=Site)
|
bsd-3-clause
|
Distrotech/intellij-community
|
python/lib/Lib/site-packages/django/contrib/gis/tests/geo3d/models.py
|
404
|
1835
|
from django.contrib.gis.db import models
class City3D(models.Model):
name = models.CharField(max_length=30)
point = models.PointField(dim=3)
objects = models.GeoManager()
def __unicode__(self):
return self.name
class Interstate2D(models.Model):
name = models.CharField(max_length=30)
line = models.LineStringField(srid=4269)
objects = models.GeoManager()
def __unicode__(self):
return self.name
class Interstate3D(models.Model):
name = models.CharField(max_length=30)
line = models.LineStringField(dim=3, srid=4269)
objects = models.GeoManager()
def __unicode__(self):
return self.name
class InterstateProj2D(models.Model):
name = models.CharField(max_length=30)
line = models.LineStringField(srid=32140)
objects = models.GeoManager()
def __unicode__(self):
return self.name
class InterstateProj3D(models.Model):
name = models.CharField(max_length=30)
line = models.LineStringField(dim=3, srid=32140)
objects = models.GeoManager()
def __unicode__(self):
return self.name
class Polygon2D(models.Model):
name = models.CharField(max_length=30)
poly = models.PolygonField(srid=32140)
objects = models.GeoManager()
def __unicode__(self):
return self.name
class Polygon3D(models.Model):
name = models.CharField(max_length=30)
poly = models.PolygonField(dim=3, srid=32140)
objects = models.GeoManager()
def __unicode__(self):
return self.name
class Point2D(models.Model):
point = models.PointField()
objects = models.GeoManager()
class Point3D(models.Model):
point = models.PointField(dim=3)
objects = models.GeoManager()
class MultiPoint3D(models.Model):
mpoint = models.MultiPointField(dim=3)
objects = models.GeoManager()
|
apache-2.0
|
ramramps/mkdocs
|
mkdocs/legacy.py
|
29
|
4548
|
from __future__ import unicode_literals
import logging
from mkdocs import utils
from mkdocs.exceptions import ConfigurationError
log = logging.getLogger(__name__)
def pages_compat_shim(original_pages):
"""
Support legacy pages configuration
Re-write the pages config fron MkDocs <=0.12 to match the
new nested structure added in 0.13.
Given a pages configuration in the old style of:
pages:
- ['index.md', 'Home']
- ['user-guide/writing-your-docs.md', 'User Guide']
- ['user-guide/styling-your-docs.md', 'User Guide']
- ['about/license.md', 'About', 'License']
- ['about/release-notes.md', 'About']
- ['help/contributing.md', 'Help', 'Contributing']
- ['support.md']
- ['cli.md', 'CLI Guide']
Rewrite it to look like:
pages:
- Home: index.md
- User Guide:
- user-guide/writing-your-docs.md
- user-guide/styling-your-docs.md
- About:
- License: about/license.md
- about/release-notes.md
- Help:
- Contributing: about/contributing.md
- support.md
- CLI Guide: cli.md
TODO: Remove in 1.0
"""
log.warning("The pages config in the mkdocs.yml uses the deprecated "
"structure. This will be removed in the next release of "
"MkDocs. See for details on updating: "
"http://www.mkdocs.org/about/release-notes/")
new_pages = []
for config_line in original_pages:
if isinstance(config_line, utils.string_types):
config_line = [config_line, ]
if len(config_line) not in (1, 2, 3):
msg = (
"Line in 'page' config contained {0} items. In Line {1}. "
"Expected 1, 2 or 3 strings.".format(
config_line, len(config_line))
)
raise ConfigurationError(msg)
# First we need to pad out the config line as it could contain
# 1-3 items.
path, category, title = (list(config_line) + [None, None])[:3]
if len(new_pages) > 0:
# Get the previous top-level page so we can see if the category
# matches up with the one we have now.
prev_cat, subpages = next(iter(new_pages[-1].items()))
else:
# We are on the first page
prev_cat, subpages = None, []
# If the category is different, add a new top level category. If the
# previous category is None, the it's another top level one too.
if prev_cat is None or prev_cat != category:
subpages = []
new_pages.append({category: subpages})
# Add the current page to the determined category.
subpages.append({title: path})
# We need to do a bit of cleaning up to match the new structure. In the
# above example, pages can either be `- file.md` or `- Title: file.md`.
# For pages without a title we currently have `- None: file.md` - so we
# need to remove those Nones by changing from a dict to just a string with
# the path.
for i, category in enumerate(new_pages):
# Categories are a dictionary with one key as the name and the value
# is a list of pages. So, grab that from the dict.
category, pages = next(iter(category.items()))
# If we only have one page, then we can assume it is a top level
# category and no further nesting is required unless that single page
# has a title itself,
if len(pages) == 1:
title, path = pages.pop().popitem()
# If we have a title, it should be a sub page
if title is not None:
pages.append({title: path})
# if we have a category, but no title it should be a top-level page
elif category is not None:
new_pages[i] = {category: path}
# if we have no category or title, it must be a top level page with
# an atomatic title.
else:
new_pages[i] = path
else:
# We have more than one page, so the category is valid. We just
# need to iterate through and convert any {None: path} dicts to
# be just the path string.
for j, page in enumerate(pages):
title, path = page.popitem()
if title:
pages[j] = {title: path}
else:
pages[j] = path
return new_pages
|
bsd-2-clause
|
wnt-zhp/hufce
|
django/contrib/admin/templatetags/admin_list.py
|
79
|
15696
|
import datetime
from django.contrib.admin.util import lookup_field, display_for_field, label_for_field
from django.contrib.admin.views.main import (ALL_VAR, EMPTY_CHANGELIST_VALUE,
ORDER_VAR, PAGE_VAR, SEARCH_VAR)
from django.contrib.admin.templatetags.admin_static import static
from django.core.exceptions import ObjectDoesNotExist
from django.db import models
from django.utils import formats
from django.utils.html import escape, conditional_escape
from django.utils.safestring import mark_safe
from django.utils.text import capfirst
from django.utils.translation import ugettext as _
from django.utils.encoding import smart_unicode, force_unicode
from django.template import Library
from django.template.loader import get_template
from django.template.context import Context
register = Library()
DOT = '.'
@register.simple_tag
def paginator_number(cl,i):
"""
Generates an individual page index link in a paginated list.
"""
if i == DOT:
return u'... '
elif i == cl.page_num:
return mark_safe(u'<span class="this-page">%d</span> ' % (i+1))
else:
return mark_safe(u'<a href="%s"%s>%d</a> ' % (escape(cl.get_query_string({PAGE_VAR: i})), (i == cl.paginator.num_pages-1 and ' class="end"' or ''), i+1))
@register.inclusion_tag('admin/pagination.html')
def pagination(cl):
"""
Generates the series of links to the pages in a paginated list.
"""
paginator, page_num = cl.paginator, cl.page_num
pagination_required = (not cl.show_all or not cl.can_show_all) and cl.multi_page
if not pagination_required:
page_range = []
else:
ON_EACH_SIDE = 3
ON_ENDS = 2
# If there are 10 or fewer pages, display links to every page.
# Otherwise, do some fancy
if paginator.num_pages <= 10:
page_range = range(paginator.num_pages)
else:
# Insert "smart" pagination links, so that there are always ON_ENDS
# links at either end of the list of pages, and there are always
# ON_EACH_SIDE links at either end of the "current page" link.
page_range = []
if page_num > (ON_EACH_SIDE + ON_ENDS):
page_range.extend(range(0, ON_EACH_SIDE - 1))
page_range.append(DOT)
page_range.extend(range(page_num - ON_EACH_SIDE, page_num + 1))
else:
page_range.extend(range(0, page_num + 1))
if page_num < (paginator.num_pages - ON_EACH_SIDE - ON_ENDS - 1):
page_range.extend(range(page_num + 1, page_num + ON_EACH_SIDE + 1))
page_range.append(DOT)
page_range.extend(range(paginator.num_pages - ON_ENDS, paginator.num_pages))
else:
page_range.extend(range(page_num + 1, paginator.num_pages))
need_show_all_link = cl.can_show_all and not cl.show_all and cl.multi_page
return {
'cl': cl,
'pagination_required': pagination_required,
'show_all_url': need_show_all_link and cl.get_query_string({ALL_VAR: ''}),
'page_range': page_range,
'ALL_VAR': ALL_VAR,
'1': 1,
}
def result_headers(cl):
"""
Generates the list column headers.
"""
ordering_field_columns = cl.get_ordering_field_columns()
for i, field_name in enumerate(cl.list_display):
text, attr = label_for_field(field_name, cl.model,
model_admin = cl.model_admin,
return_attr = True
)
if attr:
# Potentially not sortable
# if the field is the action checkbox: no sorting and special class
if field_name == 'action_checkbox':
yield {
"text": text,
"class_attrib": mark_safe(' class="action-checkbox-column"'),
"sortable": False,
}
continue
admin_order_field = getattr(attr, "admin_order_field", None)
if not admin_order_field:
# Not sortable
yield {
"text": text,
"sortable": False,
}
continue
# OK, it is sortable if we got this far
th_classes = ['sortable']
order_type = ''
new_order_type = 'asc'
sort_priority = 0
sorted = False
# Is it currently being sorted on?
if i in ordering_field_columns:
sorted = True
order_type = ordering_field_columns.get(i).lower()
sort_priority = ordering_field_columns.keys().index(i) + 1
th_classes.append('sorted %sending' % order_type)
new_order_type = {'asc': 'desc', 'desc': 'asc'}[order_type]
# build new ordering param
o_list_primary = [] # URL for making this field the primary sort
o_list_remove = [] # URL for removing this field from sort
o_list_toggle = [] # URL for toggling order type for this field
make_qs_param = lambda t, n: ('-' if t == 'desc' else '') + str(n)
for j, ot in ordering_field_columns.items():
if j == i: # Same column
param = make_qs_param(new_order_type, j)
# We want clicking on this header to bring the ordering to the
# front
o_list_primary.insert(0, param)
o_list_toggle.append(param)
# o_list_remove - omit
else:
param = make_qs_param(ot, j)
o_list_primary.append(param)
o_list_toggle.append(param)
o_list_remove.append(param)
if i not in ordering_field_columns:
o_list_primary.insert(0, make_qs_param(new_order_type, i))
yield {
"text": text,
"sortable": True,
"sorted": sorted,
"ascending": order_type == "asc",
"sort_priority": sort_priority,
"url_primary": cl.get_query_string({ORDER_VAR: '.'.join(o_list_primary)}),
"url_remove": cl.get_query_string({ORDER_VAR: '.'.join(o_list_remove)}),
"url_toggle": cl.get_query_string({ORDER_VAR: '.'.join(o_list_toggle)}),
"class_attrib": mark_safe(th_classes and ' class="%s"' % ' '.join(th_classes) or '')
}
def _boolean_icon(field_val):
icon_url = static('admin/img/icon-%s.gif' %
{True: 'yes', False: 'no', None: 'unknown'}[field_val])
return mark_safe(u'<img src="%s" alt="%s" />' % (icon_url, field_val))
def items_for_result(cl, result, form):
"""
Generates the actual list of data.
"""
first = True
pk = cl.lookup_opts.pk.attname
for field_name in cl.list_display:
row_class = ''
try:
f, attr, value = lookup_field(field_name, result, cl.model_admin)
except (AttributeError, ObjectDoesNotExist):
result_repr = EMPTY_CHANGELIST_VALUE
else:
if f is None:
if field_name == u'action_checkbox':
row_class = ' class="action-checkbox"'
allow_tags = getattr(attr, 'allow_tags', False)
boolean = getattr(attr, 'boolean', False)
if boolean:
allow_tags = True
result_repr = _boolean_icon(value)
else:
result_repr = smart_unicode(value)
# Strip HTML tags in the resulting text, except if the
# function has an "allow_tags" attribute set to True.
if not allow_tags:
result_repr = escape(result_repr)
else:
result_repr = mark_safe(result_repr)
else:
if isinstance(f.rel, models.ManyToOneRel):
field_val = getattr(result, f.name)
if field_val is None:
result_repr = EMPTY_CHANGELIST_VALUE
else:
result_repr = escape(field_val)
else:
result_repr = display_for_field(value, f)
if isinstance(f, models.DateField)\
or isinstance(f, models.TimeField)\
or isinstance(f, models.ForeignKey):
row_class = ' class="nowrap"'
if force_unicode(result_repr) == '':
result_repr = mark_safe(' ')
# If list_display_links not defined, add the link tag to the first field
if (first and not cl.list_display_links) or field_name in cl.list_display_links:
table_tag = {True:'th', False:'td'}[first]
first = False
url = cl.url_for_result(result)
# Convert the pk to something that can be used in Javascript.
# Problem cases are long ints (23L) and non-ASCII strings.
if cl.to_field:
attr = str(cl.to_field)
else:
attr = pk
value = result.serializable_value(attr)
result_id = repr(force_unicode(value))[1:]
yield mark_safe(u'<%s%s><a href="%s"%s>%s</a></%s>' % \
(table_tag, row_class, url, (cl.is_popup and ' onclick="opener.dismissRelatedLookupPopup(window, %s); return false;"' % result_id or ''), conditional_escape(result_repr), table_tag))
else:
# By default the fields come from ModelAdmin.list_editable, but if we pull
# the fields out of the form instead of list_editable custom admins
# can provide fields on a per request basis
if (form and field_name in form.fields and not (
field_name == cl.model._meta.pk.name and
form[cl.model._meta.pk.name].is_hidden)):
bf = form[field_name]
result_repr = mark_safe(force_unicode(bf.errors) + force_unicode(bf))
else:
result_repr = conditional_escape(result_repr)
yield mark_safe(u'<td%s>%s</td>' % (row_class, result_repr))
if form and not form[cl.model._meta.pk.name].is_hidden:
yield mark_safe(u'<td>%s</td>' % force_unicode(form[cl.model._meta.pk.name]))
class ResultList(list):
# Wrapper class used to return items in a list_editable
# changelist, annotated with the form object for error
# reporting purposes. Needed to maintain backwards
# compatibility with existing admin templates.
def __init__(self, form, *items):
self.form = form
super(ResultList, self).__init__(*items)
def results(cl):
if cl.formset:
for res, form in zip(cl.result_list, cl.formset.forms):
yield ResultList(form, items_for_result(cl, res, form))
else:
for res in cl.result_list:
yield ResultList(None, items_for_result(cl, res, None))
def result_hidden_fields(cl):
if cl.formset:
for res, form in zip(cl.result_list, cl.formset.forms):
if form[cl.model._meta.pk.name].is_hidden:
yield mark_safe(force_unicode(form[cl.model._meta.pk.name]))
@register.inclusion_tag("admin/change_list_results.html")
def result_list(cl):
"""
Displays the headers and data list together
"""
headers = list(result_headers(cl))
num_sorted_fields = 0
for h in headers:
if h['sortable'] and h['sorted']:
num_sorted_fields += 1
return {'cl': cl,
'result_hidden_fields': list(result_hidden_fields(cl)),
'result_headers': headers,
'num_sorted_fields': num_sorted_fields,
'results': list(results(cl))}
@register.inclusion_tag('admin/date_hierarchy.html')
def date_hierarchy(cl):
"""
Displays the date hierarchy for date drill-down functionality.
"""
if cl.date_hierarchy:
field_name = cl.date_hierarchy
year_field = '%s__year' % field_name
month_field = '%s__month' % field_name
day_field = '%s__day' % field_name
field_generic = '%s__' % field_name
year_lookup = cl.params.get(year_field)
month_lookup = cl.params.get(month_field)
day_lookup = cl.params.get(day_field)
link = lambda d: cl.get_query_string(d, [field_generic])
if not (year_lookup or month_lookup or day_lookup):
# select appropriate start level
date_range = cl.query_set.aggregate(first=models.Min(field_name),
last=models.Max(field_name))
if date_range['first'] and date_range['last']:
if date_range['first'].year == date_range['last'].year:
year_lookup = date_range['first'].year
if date_range['first'].month == date_range['last'].month:
month_lookup = date_range['first'].month
if year_lookup and month_lookup and day_lookup:
day = datetime.date(int(year_lookup), int(month_lookup), int(day_lookup))
return {
'show': True,
'back': {
'link': link({year_field: year_lookup, month_field: month_lookup}),
'title': capfirst(formats.date_format(day, 'YEAR_MONTH_FORMAT'))
},
'choices': [{'title': capfirst(formats.date_format(day, 'MONTH_DAY_FORMAT'))}]
}
elif year_lookup and month_lookup:
days = cl.query_set.filter(**{year_field: year_lookup, month_field: month_lookup}).dates(field_name, 'day')
return {
'show': True,
'back': {
'link': link({year_field: year_lookup}),
'title': str(year_lookup)
},
'choices': [{
'link': link({year_field: year_lookup, month_field: month_lookup, day_field: day.day}),
'title': capfirst(formats.date_format(day, 'MONTH_DAY_FORMAT'))
} for day in days]
}
elif year_lookup:
months = cl.query_set.filter(**{year_field: year_lookup}).dates(field_name, 'month')
return {
'show' : True,
'back': {
'link' : link({}),
'title': _('All dates')
},
'choices': [{
'link': link({year_field: year_lookup, month_field: month.month}),
'title': capfirst(formats.date_format(month, 'YEAR_MONTH_FORMAT'))
} for month in months]
}
else:
years = cl.query_set.dates(field_name, 'year')
return {
'show': True,
'choices': [{
'link': link({year_field: str(year.year)}),
'title': str(year.year),
} for year in years]
}
@register.inclusion_tag('admin/search_form.html')
def search_form(cl):
"""
Displays a search form for searching the list.
"""
return {
'cl': cl,
'show_result_count': cl.result_count != cl.full_result_count,
'search_var': SEARCH_VAR
}
@register.simple_tag
def admin_list_filter(cl, spec):
tpl = get_template(spec.template)
return tpl.render(Context({
'title': spec.title,
'choices' : list(spec.choices(cl)),
'spec': spec,
}))
@register.inclusion_tag('admin/actions.html', takes_context=True)
def admin_actions(context):
"""
Track the number of times the action field has been rendered on the page,
so we know which value to use.
"""
context['action_index'] = context.get('action_index', -1) + 1
return context
|
gpl-3.0
|
dcramer/django-db-routes
|
src/dbroutes/backend/base.py
|
1
|
1560
|
import re
from django.db.backends.postgresql_psycopg2.base import *
# from the postgresql doc
SQL_IDENTIFIER_RE = re.compile(r'^[_a-zA-Z][_a-zA-Z0-9]{,62}$')
PUBLIC_SCHEMA_NAME = 'public'
def _check_identifier(identifier):
if not SQL_IDENTIFIER_RE.match(identifier):
raise RuntimeError("Invalid string used for the schema name.")
class DatabaseWrapper(DatabaseWrapper):
"""
Adds the capability to manipulate the search_path using set_schema_name.
"""
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.set_schema(self.settings_dict.get('SCHEMA', PUBLIC_SCHEMA_NAME))
def set_schema(self, schema_name, include_public=True):
self.schema_name = schema_name
self.include_public_schema = include_public
def set_schema_to_public(self):
self.schema_name = PUBLIC_SCHEMA_NAME
def _cursor(self):
cursor = super(DatabaseWrapper, self)._cursor()
if not self.schema_name:
raise ValueError(
"Database schema not set. Did your forget to call set_schema()?")
_check_identifier(self.schema_name)
if self.schema_name == PUBLIC_SCHEMA_NAME:
schemas = [PUBLIC_SCHEMA_NAME]
elif self.include_public_schema:
schemas = [self.schema_name, PUBLIC_SCHEMA_NAME]
else:
schemas = [self.schema_name]
query = 'SET search_path = %s' % (','.join('%s' for _ in schemas),)
cursor.execute(query, schemas)
return cursor
|
apache-2.0
|
edgarli/proj8
|
env/lib/python3.4/site-packages/pip/_vendor/requests/packages/chardet/latin1prober.py
|
1778
|
5232
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .charsetprober import CharSetProber
from .constants import eNotMe
from .compat import wrap_ord
FREQ_CAT_NUM = 4
UDF = 0 # undefined
OTH = 1 # other
ASC = 2 # ascii capital letter
ASS = 3 # ascii small letter
ACV = 4 # accent capital vowel
ACO = 5 # accent capital other
ASV = 6 # accent small vowel
ASO = 7 # accent small other
CLASS_NUM = 8 # total classes
Latin1_CharToClass = (
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 00 - 07
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 08 - 0F
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 10 - 17
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 18 - 1F
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 20 - 27
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 28 - 2F
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 30 - 37
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 38 - 3F
OTH, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 40 - 47
ASC, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 48 - 4F
ASC, ASC, ASC, ASC, ASC, ASC, ASC, ASC, # 50 - 57
ASC, ASC, ASC, OTH, OTH, OTH, OTH, OTH, # 58 - 5F
OTH, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 60 - 67
ASS, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 68 - 6F
ASS, ASS, ASS, ASS, ASS, ASS, ASS, ASS, # 70 - 77
ASS, ASS, ASS, OTH, OTH, OTH, OTH, OTH, # 78 - 7F
OTH, UDF, OTH, ASO, OTH, OTH, OTH, OTH, # 80 - 87
OTH, OTH, ACO, OTH, ACO, UDF, ACO, UDF, # 88 - 8F
UDF, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # 90 - 97
OTH, OTH, ASO, OTH, ASO, UDF, ASO, ACO, # 98 - 9F
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # A0 - A7
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # A8 - AF
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # B0 - B7
OTH, OTH, OTH, OTH, OTH, OTH, OTH, OTH, # B8 - BF
ACV, ACV, ACV, ACV, ACV, ACV, ACO, ACO, # C0 - C7
ACV, ACV, ACV, ACV, ACV, ACV, ACV, ACV, # C8 - CF
ACO, ACO, ACV, ACV, ACV, ACV, ACV, OTH, # D0 - D7
ACV, ACV, ACV, ACV, ACV, ACO, ACO, ACO, # D8 - DF
ASV, ASV, ASV, ASV, ASV, ASV, ASO, ASO, # E0 - E7
ASV, ASV, ASV, ASV, ASV, ASV, ASV, ASV, # E8 - EF
ASO, ASO, ASV, ASV, ASV, ASV, ASV, OTH, # F0 - F7
ASV, ASV, ASV, ASV, ASV, ASO, ASO, ASO, # F8 - FF
)
# 0 : illegal
# 1 : very unlikely
# 2 : normal
# 3 : very likely
Latin1ClassModel = (
# UDF OTH ASC ASS ACV ACO ASV ASO
0, 0, 0, 0, 0, 0, 0, 0, # UDF
0, 3, 3, 3, 3, 3, 3, 3, # OTH
0, 3, 3, 3, 3, 3, 3, 3, # ASC
0, 3, 3, 3, 1, 1, 3, 3, # ASS
0, 3, 3, 3, 1, 2, 1, 2, # ACV
0, 3, 3, 3, 3, 3, 3, 3, # ACO
0, 3, 1, 3, 1, 1, 1, 3, # ASV
0, 3, 1, 3, 1, 1, 3, 3, # ASO
)
class Latin1Prober(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self.reset()
def reset(self):
self._mLastCharClass = OTH
self._mFreqCounter = [0] * FREQ_CAT_NUM
CharSetProber.reset(self)
def get_charset_name(self):
return "windows-1252"
def feed(self, aBuf):
aBuf = self.filter_with_english_letters(aBuf)
for c in aBuf:
charClass = Latin1_CharToClass[wrap_ord(c)]
freq = Latin1ClassModel[(self._mLastCharClass * CLASS_NUM)
+ charClass]
if freq == 0:
self._mState = eNotMe
break
self._mFreqCounter[freq] += 1
self._mLastCharClass = charClass
return self.get_state()
def get_confidence(self):
if self.get_state() == eNotMe:
return 0.01
total = sum(self._mFreqCounter)
if total < 0.01:
confidence = 0.0
else:
confidence = ((self._mFreqCounter[3] - self._mFreqCounter[1] * 20.0)
/ total)
if confidence < 0.0:
confidence = 0.0
# lower the confidence of latin1 so that other more accurate
# detector can take priority.
confidence = confidence * 0.73
return confidence
|
artistic-2.0
|
Affix/fas
|
fas/group.py
|
1
|
33669
|
# -*- coding: utf-8 -*-
#
# Copyright © 2008 Ricky Zhou
# Copyright © 2008-2010 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use, modify,
# copy, or redistribute it subject to the terms and conditions of the GNU
# General Public License v.2. This program is distributed in the hope that it
# will be useful, but WITHOUT ANY WARRANTY expressed or implied, including the
# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details. You should have
# received a copy of the GNU General Public License along with this program;
# if not, write to the Free Software Foundation, Inc., 51 Franklin Street,
# Fifth Floor, Boston, MA 02110-1301, USA. Any Red Hat trademarks that are
# incorporated in the source code or documentation are not subject to the GNU
# General Public License and may only be used or replicated with the express
# permission of Red Hat, Inc.
#
# Author(s): Ricky Zhou <ricky@fedoraproject.org>
# Mike McGrath <mmcgrath@redhat.com>
# Toshio Kuratomi <toshio@redhat.com>
#
# Does this need to come before the import turbogears or does it not matter?
try:
from fedora.util import tg_url
except ImportError:
from turbogears import url as tg_url
import turbogears
from turbogears import controllers, expose, identity, validate, validators, \
error_handler, config
from turbogears.database import session
import cherrypy
import sqlalchemy
from sqlalchemy import select, func
from sqlalchemy.sql import and_
import re
import fas
from fas.model import People, PeopleTable, PersonRoles, PersonRolesTable, \
Groups, GroupsTable, Log
from fas.auth import can_view_group, can_create_group, can_admin_group, \
can_edit_group, can_apply_group, can_remove_user, can_upgrade_user, \
can_sponsor_user, can_downgrade_user, is_approved
from fas.validators import UnknownGroup, KnownGroup, ValidGroupType, \
ValidRoleSort, KnownUser
from fas.util import send_mail
class GroupView(validators.Schema):
groupname = KnownGroup
class GroupMembers(validators.Schema):
groupname = KnownGroup
order_by = ValidRoleSort
class GroupCreate(validators.Schema):
name = validators.All(
UnknownGroup,
validators.String(max=32, min=3),
validators.Regex(regex='^[a-z0-9\-_]+$'),
)
display_name = validators.NotEmpty
owner = validators.All(
KnownUser,
validators.NotEmpty,
)
prerequisite = KnownGroup
group_type = ValidGroupType
needs_sponsor = validators.Bool()
user_can_remove = validators.Bool()
invite_only = validators.Bool()
class GroupEdit(validators.Schema):
groupname = KnownGroup
class GroupSave(validators.Schema):
groupname = validators.All(KnownGroup, validators.String(max=32, min=2))
display_name = validators.NotEmpty
owner = KnownUser
prerequisite = KnownGroup
group_type = ValidGroupType
invite_only = validators.Bool()
class GroupApply(validators.Schema):
groupname = KnownGroup
targetname = KnownUser
class GroupSponsor(validators.Schema):
groupname = KnownGroup
targetname = KnownUser
class GroupRemove(validators.Schema):
groupname = KnownGroup
targetname = KnownUser
class GroupUpgrade(validators.Schema):
groupname = KnownGroup
targetname = KnownUser
class GroupDowngrade(validators.Schema):
groupname = KnownGroup
targetname = KnownUser
class GroupInvite(validators.Schema):
groupname = KnownGroup
class GroupSendInvite(validators.Schema):
groupname = KnownGroup
target = validators.Email(not_empty=True, strip=True),
#class findUser(widgets.WidgetsList):
# username = widgets.AutoCompleteField(label=_('Username'), search_controller='search', search_param='username', result_name='people')
# action = widgets.HiddenField(default='apply', validator=validators.String(not_empty=True))
# groupname = widgets.HiddenField(validator=validators.String(not_empty=True))
#
#findUserForm = widgets.ListForm(fields=findUser(), submit_text=_('Invite'))
class Group(controllers.Controller):
def __init__(self):
'''Create a Group Controller.'''
@identity.require(turbogears.identity.not_anonymous())
def index(self):
'''Perhaps show a nice explanatory message about groups here?'''
return dict()
def jsonRequest(self):
return 'tg_format' in cherrypy.request.params and \
cherrypy.request.params['tg_format'] == 'json'
@expose(template="fas.templates.error", allow_json=True)
def error(self, tg_errors=None):
'''Show a friendly error message'''
if not tg_errors:
turbogears.redirect('/')
return dict(tg_errors=tg_errors)
@identity.require(turbogears.identity.not_anonymous())
@validate(validators=GroupView())
@error_handler(error) # pylint: disable-msg=E0602
@expose(template="fas.templates.group.view", allow_json=True)
def view(self, groupname, order_by='username'):
'''View group'''
sort_map = { 'username': 'people_1.username',
'creation': 'person_roles_creation',
'approval': 'person_roles_approval',
'role_status': 'person_roles_role_status',
'role_type': 'person_roles_role_type',
'sponsor': 'people_2.username',
}
username = turbogears.identity.current.user_name
person = People.by_username(username)
group = Groups.by_name(groupname)
if not can_view_group(person, group):
turbogears.flash(_("You cannot view '%s'") % group.name)
turbogears.redirect('/group/list')
return dict()
# Also return information on who is not sponsored
unsponsored = PersonRoles.query.join('group').join('member',
aliased=True).outerjoin('sponsor', aliased=True).filter(
and_(Groups.name==groupname,
PersonRoles.role_status=='unapproved')).order_by(sort_map[order_by])
unsponsored.json_props = {'PersonRoles': ['member']}
return dict(group=group, sponsor_queue=unsponsored)
@identity.require(turbogears.identity.not_anonymous())
@validate(validators=GroupMembers())
@error_handler(error) # pylint: disable-msg=E0602
@expose(template="fas.templates.group.members", allow_json=True)
def members(self, groupname, search=u'a*', role_type=None,
order_by='username'):
'''View group'''
sort_map = { 'username': 'people_1.username',
'creation': 'person_roles_creation',
'approval': 'person_roles_approval',
'role_status': 'person_roles_role_status',
'role_type': 'person_roles_role_type',
'sponsor': 'people_2.username',
}
if not isinstance(search, unicode) and isinstance(search, basestring):
search = unicode(search, 'utf-8', 'replace')
re_search = search.translate({ord(u'*'): ur'%'}).lower()
username = turbogears.identity.current.user_name
person = People.by_username(username)
group = Groups.by_name(groupname)
if not can_view_group(person, group):
turbogears.flash(_("You cannot view '%s'") % group.name)
turbogears.redirect('/group/list')
return dict()
# return all members of this group that fit the search criteria
members = PersonRoles.query.join('group').join('member', aliased=True).filter(
People.username.like(re_search)
).outerjoin('sponsor', aliased=True).filter(
Groups.name==groupname,
).order_by(sort_map[order_by])
if role_type:
members = members.filter(PersonRoles.role_type==role_type)
group.json_props = {'PersonRoles': ['member']}
return dict(group=group, members=members, search=search)
@identity.require(turbogears.identity.not_anonymous())
@expose(template="fas.templates.group.new")
def new(self):
'''Display create group form'''
username = turbogears.identity.current.user_name
person = People.by_username(username)
if not can_create_group(person):
turbogears.flash(_('Only FAS adminstrators can create groups.'))
turbogears.redirect('/')
return dict()
@identity.require(turbogears.identity.not_anonymous())
@validate(validators=GroupCreate())
@error_handler(error) # pylint: disable-msg=E0602
@expose(template="fas.templates.group.new", allow_json=True)
def create(self, name, display_name, owner, group_type, invite_only=0,
needs_sponsor=0, user_can_remove=1, prerequisite='',
joinmsg='', apply_rules='None'):
'''Create a group'''
groupname = name
person = People.by_username(turbogears.identity.current.user_name)
person_owner = People.by_username(owner)
if not can_create_group(person):
turbogears.flash(_('Only FAS adminstrators can create groups.'))
turbogears.redirect('/')
try:
owner = People.by_username(owner)
group = Groups()
group.name = name
group.display_name = display_name
group.owner_id = person_owner.id
group.group_type = group_type
group.needs_sponsor = bool(needs_sponsor)
if invite_only:
group.invite_only = True
else:
group.invite_only = False
group.user_can_remove = bool(user_can_remove)
if prerequisite:
prerequisite = Groups.by_name(prerequisite)
group.prerequisite = prerequisite
group.joinmsg = joinmsg
group.apply_rules = apply_rules
# Log group creation
Log(author_id=person.id, description='%s created group %s' %
(person.username, group.name))
session.flush()
except TypeError:
turbogears.flash(_("The group: '%s' could not be created.") % groupname)
return dict()
else:
try:
owner.apply(group, person) # Apply...
session.flush()
owner.sponsor(group, person)
owner.upgrade(group, person)
owner.upgrade(group, person)
except KeyError:
turbogears.flash(_("The group: '%(group)s' has been created, but '%(user)s' could not be added as a group administrator.") % {'group': group.name, 'user': owner.username})
else:
turbogears.flash(_("The group: '%s' has been created.") % group.name)
turbogears.redirect('/group/view/%s' % group.name)
return dict()
@identity.require(turbogears.identity.not_anonymous())
@validate(validators=GroupEdit())
@error_handler(error) # pylint: disable-msg=E0602
@expose(template="fas.templates.group.edit")
def edit(self, groupname):
'''Display edit group form'''
username = turbogears.identity.current.user_name
person = People.by_username(username)
group = Groups.by_name(groupname)
if not can_admin_group(person, group):
turbogears.flash(_("You cannot edit '%s'.") % group.name)
turbogears.redirect('/group/view/%s' % group.name)
return dict(group=group)
@identity.require(turbogears.identity.not_anonymous())
@validate(validators=GroupSave())
@error_handler(error) # pylint: disable-msg=E0602
@expose(template="fas.templates.group.edit")
def save(self, groupname, display_name, owner, group_type,
needs_sponsor=0, user_can_remove=1, prerequisite='',
url='', mailing_list='', mailing_list_url='', invite_only=0,
irc_channel='', irc_network='', joinmsg='', apply_rules="None"):
'''Edit a group'''
username = turbogears.identity.current.user_name
person = People.by_username(username)
group = Groups.by_name(groupname)
if not can_edit_group(person, group):
turbogears.flash(_("You cannot edit '%s'.") % group.name)
turbogears.redirect('/group/view/%s' % group.name)
else:
try:
owner = People.by_username(owner)
group.display_name = display_name
group.owner = owner
group.group_type = group_type
group.needs_sponsor = bool(needs_sponsor)
group.user_can_remove = bool(user_can_remove)
if prerequisite:
prerequisite = Groups.by_name(prerequisite)
group.prerequisite = prerequisite
else:
group.prerequisite = None
group.url = url
group.mailing_list = mailing_list
group.mailing_list_url = mailing_list_url
if invite_only:
group.invite_only = True
else:
group.invite_only = False
group.irc_channel = irc_channel
group.irc_network = irc_network
group.joinmsg = joinmsg
group.apply_rules = apply_rules
# Log here
session.flush()
except:
turbogears.flash(_('The group details could not be saved.'))
else:
Log(author_id=person.id, description='%s edited group %s' %
(person.username, group.name))
turbogears.flash(_('The group details have been saved.'))
turbogears.redirect('/group/view/%s' % group.name)
return dict(group=group)
@identity.require(turbogears.identity.not_anonymous())
@expose(template="genshi-text:fas.templates.group.list",
as_format="plain", accept_format="text/plain",
format="text", content_type='text/plain; charset=utf-8')
@expose(template="fas.templates.group.list", allow_json=True)
def list(self, search='*', with_members=True):
username = turbogears.identity.current.user_name
person = People.by_username(username)
memberships = {}
groups = []
re_search = re.sub(r'\*', r'%', search).lower()
results = Groups.query.filter(Groups.name.like(re_search)).order_by('name').all()
if self.jsonRequest():
if with_members:
membersql = sqlalchemy.select([PersonRoles.person_id, PersonRoles.group_id, PersonRoles.role_type], PersonRoles.role_status=='approved').order_by(PersonRoles.group_id)
members = membersql.execute()
for member in members:
try:
memberships[member[1]].append({'person_id': member[0], 'role_type': member[2]})
except KeyError:
memberships[member[1]]=[{'person_id': member[0], 'role_type': member[2]}]
else:
memberships = []
for group in results:
if can_view_group(person, group):
groups.append(group)
if not len(groups):
turbogears.flash(_("No Groups found matching '%s'") % search)
return dict(groups=groups, search=search, memberships=memberships)
@identity.require(turbogears.identity.not_anonymous())
@validate(validators=GroupApply())
@error_handler(error) # pylint: disable-msg=E0602
@expose(template='fas.templates.group.apply')
def application_screen(self, groupname, targetname=None):
username = turbogears.identity.current.user_name
person = People.by_username(username)
if not targetname:
targetname = username
target = person
else:
target = People.by_username(targetname)
group = Groups.by_name(groupname)
if username != targetname or group.apply_rules is None or len(group.apply_rules) < 1:
turbogears.redirect('/group/apply/%s/%s' % (group.name, target.username))
if group in target.memberships:
turbogears.flash('You are already a member of %s!' % group.name)
turbogears.redirect('/group/view/%s' % group.name)
if not can_apply_group(person, group, target):
turbogears.flash(_('%(user)s can not apply to %(group)s.') % \
{'user': target.username, 'group': group.name })
turbogears.redirect('/group/view/%s' % group.name)
return dict()
else:
return dict(group=group)
@identity.require(turbogears.identity.not_anonymous())
@validate(validators=GroupApply())
@error_handler(error) # pylint: disable-msg=E0602
@expose(template='fas.templates.group.view', allow_json=True)
def apply(self, groupname, targetname=None):
'''Apply to a group'''
username = turbogears.identity.current.user_name
person = People.by_username(username)
if not targetname:
target = person
else:
target = People.by_username(targetname)
group = Groups.by_name(groupname)
if not can_apply_group(person, group, target):
turbogears.flash(_('%(user)s can not apply to %(group)s.') % \
{'user': target.username, 'group': group.name })
turbogears.redirect('/group/view/%s' % group.name)
return dict()
else:
try:
target.apply(group, person)
except fas.ApplyError, e:
turbogears.flash(_('%(user)s could not apply to %(group)s: %(error)s') % \
{'user': target.username, 'group': group.name, 'error': e})
turbogears.redirect('/group/view/%s' % group.name)
else:
# TODO: Localize for each recipient. This will require
# some database calls so we'll also need to check whether it
# makes things too slow.
# Basic outline is:
# for person_role in group.approved_roles:
# if person_role.role_type in ('administrator', 'sponsor'):
# sponsors_addr = p.member.email
# locale = p.member.locale or 'C'
# ## Do all the rest of the stuff to construct the
# ## email message -- the _(locale=locale) will
# ## translate the strings for that recipient
# send_mail(sponsors_addr, join_subject, join_text)
# ## And if we still want to send this message to the user,
# ## have to set locale = target.locale or 'C' and construct
# ## the email one additional time and send to target.email
locale = 'en'
sponsor_url = config.get('base_url_filter.base_url') + \
tg_url('/group/view/%s' % groupname)
sponsors_addr = '%(group)s-sponsors@%(host)s' % \
{'group': group.name, 'host': config.get('email_host')}
sponsor_subject = _('Fedora \'%(group)s\' sponsor needed for %(user)s',
locale=locale) % {'user': target.username,
'group': group.name}
sponsors_text = _('''
Fedora user %(user)s <%(email)s> has requested
membership for %(applicant)s in the %(group)s group and needs a sponsor.
Please go to %(url)s to take action.
''', locale=locale) % { 'user': person.username,
'applicant': target.username,
'email': person.email,
'url': sponsor_url,
'group': group.name }
join_subject = _('Application to the \'%(group)s\' group',
locale=locale) % {'user': target.username,
'group': group.name}
join_text = _('''
Thank you for applying for the %(group)s group.
%(joinmsg)s
''', locale=locale) % { 'user': person.username,
'joinmsg': group.joinmsg,
'group': group.name }
send_mail(sponsors_addr, sponsor_subject, sponsors_text)
send_mail(target.email, join_subject, join_text)
Log(author_id=target.id, description='%s applied %s to %s' %
(person.username, target.username, group.name))
turbogears.flash(_('%(user)s has applied to %(group)s!') % \
{'user': target.username, 'group': group.name})
turbogears.redirect('/group/view/%s' % group.name)
return dict()
@identity.require(turbogears.identity.not_anonymous())
@validate(validators=GroupSponsor())
@error_handler(error) # pylint: disable-msg=E0602
@expose(template='fas.templates.group.view')
def sponsor(self, groupname, targetname):
'''Sponsor user'''
username = turbogears.identity.current.user_name
person = People.by_username(username)
target = People.by_username(targetname)
group = Groups.by_name(groupname)
if not can_sponsor_user(person, group):
turbogears.flash(_("You cannot sponsor '%s'") % target.username)
turbogears.redirect('/group/view/%s' % group.name)
return dict()
else:
try:
target.sponsor(group, person)
except fas.SponsorError, e:
turbogears.flash(_("%(user)s could not be sponsored in %(group)s: %(error)s") % \
{'user': target.username, 'group': group.name, 'error': e})
turbogears.redirect('/group/view/%s' % group.name)
else:
sponsor_subject = _('Your Fedora \'%s\' membership has been sponsored') % group.name
sponsor_text = _('''
%(user)s <%(email)s> has sponsored you for membership in the %(group)s
group of the Fedora account system. If applicable, this change should
propagate into the e-mail aliases and git repository within an hour.
''') % {'group': group.name, 'user': person.username, 'email': person.email}
send_mail(target.email, sponsor_subject, sponsor_text)
Log(author_id=target.id, description='%s sponsored %s into %s' %
(person.username, target.username, group.name))
turbogears.flash(_("'%s' has been sponsored!") % target.username)
turbogears.redirect('/group/view/%s' % group.name)
return dict()
@identity.require(turbogears.identity.not_anonymous())
@validate(validators=GroupRemove())
@error_handler(error) # pylint: disable-msg=E0602
@expose(template='fas.templates.group.view')
def remove(self, groupname, targetname):
'''Remove user from group'''
# TODO: Add confirmation?
username = turbogears.identity.current.user_name
person = People.by_username(username)
target = People.by_username(targetname)
group = Groups.by_name(groupname)
if not can_remove_user(person, group, target):
turbogears.flash(_("You cannot remove '%(user)s' from '%(group)s'.") % \
{'user': target.username, 'group': group.name})
turbogears.redirect(cherrypy.request.headerMap.get("Referer", "/"))
return dict()
else:
try:
target.remove(group, target)
except fas.RemoveError, e:
turbogears.flash(_("%(user)s could not be removed from %(group)s: %(error)s") % \
{'user': target.username, 'group': group.name, 'error': e})
turbogears.redirect(cherrypy.request.headerMap.get("Referer", "/"))
else:
removal_subject = _('Your Fedora \'%s\' membership has been removed') % group.name
removal_text = _('''
%(user)s <%(email)s> has removed you from the '%(group)s'
group of the Fedora Accounts System This change is effective
immediately for new operations, and should propagate into the e-mail
aliases within an hour.
''') % {'group': group.name, 'user': person.username, 'email': person.email}
send_mail(target.email, removal_subject, removal_text)
Log(author_id=target.id, description='%s removed %s from %s' %
(person.username, target.username, group.name))
turbogears.flash(_('%(name)s has been removed from %(group)s') % \
{'name': target.username, 'group': group.name})
turbogears.redirect(cherrypy.request.headerMap.get("Referer", "/"))
return dict()
@identity.require(turbogears.identity.not_anonymous())
@validate(validators=GroupUpgrade())
@error_handler(error) # pylint: disable-msg=E0602
@expose(template='fas.templates.group.view')
def upgrade(self, groupname, targetname):
'''Upgrade user in group'''
username = turbogears.identity.current.user_name
person = People.by_username(username)
target = People.by_username(targetname)
group = Groups.by_name(groupname)
if not can_upgrade_user(person, group):
turbogears.flash(_("You cannot upgrade '%s'") % target.username)
turbogears.redirect(cherrypy.request.headerMap.get("Referer", "/"))
return dict()
else:
try:
target.upgrade(group, person)
except fas.UpgradeError, e:
turbogears.flash(_('%(name)s could not be upgraded in %(group)s: %(error)s') % \
{'name': target.username, 'group': group.name, 'error': e})
turbogears.redirect(cherrypy.request.headerMap.get("Referer", "/"))
else:
upgrade_subject = _('Your Fedora \'%s\' membership has been upgraded') % group.name
# Should we make person.upgrade return this?
role = PersonRoles.query.filter_by(group=group, member=target).one()
status = role.role_type
upgrade_text = _('''
%(user)s <%(email)s> has upgraded you to %(status)s status in the
'%(group)s' group of the Fedora Accounts System This change is
effective immediately for new operations, and should propagate
into the e-mail aliases within an hour.
''') % {'group': group.name, 'user': person.username, 'email': person.email, 'status': status}
send_mail(target.email, upgrade_subject, upgrade_text)
Log(author_id=target.id, description='%s upgraded %s to %s in %s' %
(person.username, target.username, status, group.name))
turbogears.flash(_('%s has been upgraded!') % target.username)
turbogears.redirect(cherrypy.request.headerMap.get("Referer", "/"))
return dict()
@identity.require(turbogears.identity.not_anonymous())
@validate(validators=GroupDowngrade())
@error_handler(error) # pylint: disable-msg=E0602
@expose(template='fas.templates.group.view')
def downgrade(self, groupname, targetname):
'''Upgrade user in group'''
username = turbogears.identity.current.user_name
person = People.by_username(username)
target = People.by_username(targetname)
group = Groups.by_name(groupname)
if not can_downgrade_user(person, group):
turbogears.flash(_("You cannot downgrade '%s'") % target.username)
turbogears.redirect(cherrypy.request.headerMap.get("Referer", "/"))
return dict()
else:
try:
target.downgrade(group, person)
except fas.DowngradeError, e:
turbogears.flash(_('%(name)s could not be downgraded in %(group)s: %(error)s') % \
{'name': target.username, 'group': group.name, 'error': e})
turbogears.redirect(cherrypy.request.headerMap.get("Referer", "/"))
else:
downgrade_subject = _('Your Fedora \'%s\' membership has been downgraded') % group.name
role = PersonRoles.query.filter_by(group=group, member=target).one()
status = role.role_type
downgrade_text = _('''
%(user)s <%(email)s> has downgraded you to %(status)s status in the
'%(group)s' group of the Fedora Accounts System This change is
effective immediately for new operations, and should propagate
into the e-mail aliases within an hour.
''') % {'group': group.name, 'user': person.username, 'email': person.email, 'status': status}
send_mail(target.email, downgrade_subject, downgrade_text)
Log(author_id=target.id, description='%s downgraded %s to %s in %s' %
(person.username, target.username, status, group.name))
turbogears.flash(_('%s has been downgraded!') % target.username)
turbogears.redirect(cherrypy.request.headerMap.get("Referer", "/"))
return dict()
@identity.require(turbogears.identity.not_anonymous())
@expose(template="genshi-text:fas.templates.group.dump", format="text",
content_type='text/plain; charset=utf-8')
@expose(allow_json=True)
def dump(self, groupname=None, role_type=None):
if not groupname:
stmt = select([People.privacy, People.username, People.email,
People.human_name, "'user'", 's.sponsored'],
from_obj=PeopleTable.outerjoin(select([PersonRoles.sponsor_id,
func.count(PersonRoles.sponsor_id).label('sponsored')]
).group_by(PersonRoles.sponsor_id
).correlate().alias('s')
)).order_by(People.username)
else:
stmt = select([People.privacy, People.username, People.email,
People.human_name, PersonRoles.role_type, 's.sponsored'],
from_obj=GroupsTable.join(PersonRolesTable).join(PeopleTable,
onclause=PeopleTable.c.id==PersonRolesTable.c.person_id
).outerjoin(select([PersonRoles.sponsor_id,
func.count(PersonRoles.sponsor_id).label('sponsored')]
).where(and_(
PersonRoles.group_id==Groups.id,
Groups.name==groupname)).group_by(
PersonRoles.sponsor_id).correlate().alias('s')
)).where(and_(Groups.name==groupname,
PersonRoles.role_status=='approved')
).order_by(People.username)
people = []
if identity.in_any_group(config.get('admingroup', 'accounts'),
config.get('systemgroup', 'fas-system')):
user = 'admin'
elif identity.current.anonymous:
user = 'anonymous'
else:
user = 'public'
username = identity.current.user_name
for row in stmt.execute():
person = list(row[1:])
if not row['sponsored']:
person[-1] = 0
if row['privacy'] and user != 'admin' \
and username != row['username']:
# filter private data
person[2] = u''
people.append(person)
return dict(people=people)
@identity.require(identity.not_anonymous())
@validate(validators=GroupInvite())
@error_handler(error) # pylint: disable-msg=E0602
@expose(template='fas.templates.group.invite')
def invite(self, groupname):
username = turbogears.identity.current.user_name
person = People.by_username(username)
group = Groups.by_name(groupname)
person = person.filter_private()
return dict(person=person, group=group)
@identity.require(identity.not_anonymous())
@validate(validators=GroupSendInvite())
@error_handler(error) # pylint: disable-msg=E0602
@expose(template='fas.templates.group.invite')
def sendinvite(self, groupname, target):
username = turbogears.identity.current.user_name
person = People.by_username(username)
group = Groups.by_name(groupname)
if is_approved(person, group):
invite_subject = _('Come join The Fedora Project!')
invite_text = _('''
%(user)s <%(email)s> has invited you to join the Fedora
Project! We are a community of users and developers who produce a
complete operating system from entirely free and open source software
(FOSS). %(user)s thinks that you have knowledge and skills
that make you a great fit for the Fedora community, and that you might
be interested in contributing.
How could you team up with the Fedora community to use and develop your
skills? Check out http://fedoraproject.org/join-fedora for some ideas.
Our community is more than just software developers -- we also have a
place for you whether you're an artist, a web site builder, a writer, or
a people person. You'll grow and learn as you work on a team with other
very smart and talented people.
Fedora and FOSS are changing the world -- come be a part of it!''') % \
{'user': person.username, 'email': person.email}
send_mail(target, invite_subject, invite_text)
turbogears.flash(_('Message sent to: %s') % target)
turbogears.redirect('/group/view/%s' % group.name)
else:
turbogears.flash(_("You are not in the '%s' group.") % group.name)
person = person.filter_private()
return dict(target=target, person=person, group=group)
|
gpl-2.0
|
quchunguang/test
|
testpy3/pyqt5_widget_splitter.py
|
1
|
2031
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import sys
from PyQt5.QtWidgets import (QWidget, QHBoxLayout, QVBoxLayout, QLabel,
QComboBox, QFrame, QSplitter, QApplication)
from PyQt5.QtCore import Qt
class Example(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
'''
Example
hbox
splitter2
splitter1
topleft
vbox
self.lbl
combo
topright
bottom
'''
# splitter
hbox = QHBoxLayout(self)
topleft = QFrame(self)
topleft.setFrameShape(QFrame.StyledPanel)
topright = QFrame(self)
topright.setFrameShape(QFrame.StyledPanel)
bottom = QFrame(self)
bottom.setFrameShape(QFrame.StyledPanel)
splitter1 = QSplitter(Qt.Horizontal)
splitter1.addWidget(topleft)
splitter1.addWidget(topright)
splitter2 = QSplitter(Qt.Vertical)
splitter2.addWidget(splitter1)
splitter2.addWidget(bottom)
hbox.addWidget(splitter2)
self.setLayout(hbox)
# combo
self.lbl = QLabel("Ubuntu", self)
combo = QComboBox(self)
combo.addItem("Ubuntu")
combo.addItem("Mandriva")
combo.addItem("Fedora")
combo.addItem("Arch")
combo.addItem("Gentoo")
combo.activated[str].connect(self.onActivated)
# put combo and label into layout into frame
vbox = QVBoxLayout(topleft)
vbox.addWidget(self.lbl)
vbox.addWidget(combo)
self.setGeometry(300, 300, 300, 200)
self.setWindowTitle('QSplitter')
self.show()
def onActivated(self, text):
self.lbl.setText(text)
self.lbl.adjustSize()
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = Example()
sys.exit(app.exec_())
|
mit
|
vipul-sharma20/oh-mainline
|
vendor/packages/Django/django/conf/locale/lv/formats.py
|
107
|
1578
|
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = r'Y. \g\a\d\a j. F'
TIME_FORMAT = 'H:i:s'
DATETIME_FORMAT = r'Y. \g\a\d\a j. F, H:i:s'
YEAR_MONTH_FORMAT = r'Y. \g. F'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = r'j.m.Y'
SHORT_DATETIME_FORMAT = 'j.m.Y H:i:s'
FIRST_DAY_OF_WEEK = 1 #Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# Kept ISO formats as they are in first position
DATE_INPUT_FORMATS = (
'%Y-%m-%d', '%d.%m.%Y', '%d.%m.%y', # '2006-10-25', '25.10.2006', '25.10.06'
)
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M', # '14:30'
'%H.%M.%S', # '14.30.59'
'%H.%M', # '14.30'
)
DATETIME_INPUT_FORMATS = (
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
'%d.%m.%y %H:%M:%S', # '25.10.06 14:30:59'
'%d.%m.%y %H:%M', # '25.10.06 14:30'
'%d.%m.%y %H.%M.%S', # '25.10.06 14.30.59'
'%d.%m.%y %H.%M', # '25.10.06 14.30'
'%d.%m.%y', # '25.10.06'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = ' ' # Non-breaking space
NUMBER_GROUPING = 3
|
agpl-3.0
|
lbjay/cds-invenio
|
modules/bibindex/lib/bibindex_engine_stemmer_tests.py
|
4
|
2712
|
# -*- coding: utf-8 -*-
##
## This file is part of CDS Invenio.
## Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007, 2008 CERN.
##
## CDS Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## CDS Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with CDS Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Unit tests for the indexing engine."""
__revision__ = "$Id$"
import unittest
from invenio import bibindex_engine_stemmer
from invenio.testutils import make_test_suite, run_test_suite
class TestStemmer(unittest.TestCase):
"""Test stemmer."""
def test_stemmer_none(self):
"""bibindex engine - no stemmer"""
self.assertEqual("information",
bibindex_engine_stemmer.stem("information", None))
def test_stemmer_english(self):
"""bibindex engine - English stemmer"""
english_test_cases = [['information', 'inform'],
['experiment', 'experi'],
['experiments', 'experi'],
['experimented', 'experi'],
['experimenting', 'experi'],
['experimental', 'experiment'],
['experimentally', 'experiment'],
['experimentation', 'experiment'],
['experimentalism', 'experiment'],
['experimenter', 'experiment'],
['experimentalise', 'experimentalis'],
['experimentalist', 'experimentalist'],
['experimentalists', 'experimentalist'],
['GeV', 'GeV'],
['$\Omega$', '$\Omega$'],
['e^-', 'e^-'],
['C#', 'C#'],
['C++', 'C++']]
for test_word, expected_result in english_test_cases:
self.assertEqual(expected_result,
bibindex_engine_stemmer.stem(test_word, "en"))
TEST_SUITE = make_test_suite(TestStemmer,)
if __name__ == "__main__":
run_test_suite(TEST_SUITE)
|
gpl-2.0
|
cobalys/django
|
django/contrib/localflavor/si/forms.py
|
8
|
4672
|
"""
Slovenian specific form helpers.
"""
from __future__ import absolute_import, unicode_literals
import datetime
import re
from django.contrib.localflavor.si.si_postalcodes import SI_POSTALCODES_CHOICES
from django.core.validators import EMPTY_VALUES
from django.forms import ValidationError
from django.forms.fields import CharField, Select, ChoiceField
from django.utils.translation import ugettext_lazy as _
class SIEMSOField(CharField):
"""A form for validating Slovenian personal identification number.
Additionally stores gender, nationality and birthday to self.info dictionary.
"""
default_error_messages = {
'invalid': _('This field should contain exactly 13 digits.'),
'date': _('The first 7 digits of the EMSO must represent a valid past date.'),
'checksum': _('The EMSO is not valid.'),
}
emso_regex = re.compile('^(\d{2})(\d{2})(\d{3})(\d{2})(\d{3})(\d)$')
def clean(self, value):
super(SIEMSOField, self).clean(value)
if value in EMPTY_VALUES:
return ''
value = value.strip()
m = self.emso_regex.match(value)
if m is None:
raise ValidationError(self.default_error_messages['invalid'])
# Validate EMSO
s = 0
int_values = [int(i) for i in value]
for a, b in zip(int_values, range(7, 1, -1) * 2):
s += a * b
chk = s % 11
if chk == 0:
K = 0
else:
K = 11 - chk
if K == 10 or int_values[-1] != K:
raise ValidationError(self.default_error_messages['checksum'])
# Extract extra info in the identification number
day, month, year, nationality, gender, chksum = [int(i) for i in m.groups()]
if year < 890:
year += 2000
else:
year += 1000
# validate birthday
try:
birthday = datetime.date(year, month, day)
except ValueError:
raise ValidationError(self.error_messages['date'])
if datetime.date.today() < birthday:
raise ValidationError(self.error_messages['date'])
self.info = {
'gender': gender < 500 and 'male' or 'female',
'birthdate': birthday,
'nationality': nationality,
}
return value
class SITaxNumberField(CharField):
"""Slovenian tax number field.
Valid input is SIXXXXXXXX or XXXXXXXX where X is a number.
"""
default_error_messages = {
'invalid': _('Enter a valid tax number in form SIXXXXXXXX'),
}
sitax_regex = re.compile('^(?:SI)?([1-9]\d{7})$')
def clean(self, value):
super(SITaxNumberField, self).clean(value)
if value in EMPTY_VALUES:
return ''
value = value.strip()
m = self.sitax_regex.match(value)
if m is None:
raise ValidationError(self.default_error_messages['invalid'])
value = m.groups()[0]
# Validate Tax number
s = 0
int_values = [int(i) for i in value]
for a, b in zip(int_values, range(8, 1, -1)):
s += a * b
chk = 11 - (s % 11)
if chk == 10:
chk = 0
if int_values[-1] != chk:
raise ValidationError(self.default_error_messages['invalid'])
return value
class SIPostalCodeField(ChoiceField):
"""Slovenian post codes field.
"""
def __init__(self, *args, **kwargs):
kwargs.setdefault('choices', SI_POSTALCODES_CHOICES)
super(SIPostalCodeField, self).__init__(*args, **kwargs)
class SIPostalCodeSelect(Select):
"""A Select widget that uses Slovenian postal codes as its choices.
"""
def __init__(self, attrs=None):
super(SIPostalCodeSelect, self).__init__(attrs,
choices=SI_POSTALCODES_CHOICES)
class SIPhoneNumberField(CharField):
"""Slovenian phone number field.
Phone number must contain at least local area code.
Country code can be present.
Examples:
* +38640XXXXXX
* 0038640XXXXXX
* 040XXXXXX
* 01XXXXXX
* 0590XXXXX
"""
default_error_messages = {
'invalid': _('Enter phone number in form +386XXXXXXXX or 0XXXXXXXX.'),
}
phone_regex = re.compile('^(?:(?:00|\+)386|0)(\d{7,8})$')
def clean(self, value):
super(SIPhoneNumberField, self).clean(value)
if value in EMPTY_VALUES:
return ''
value = value.replace(' ', '').replace('-', '').replace('/', '')
m = self.phone_regex.match(value)
if m is None:
raise ValidationError(self.default_error_messages['invalid'])
return m.groups()[0]
|
bsd-3-clause
|
hergin/DelTa
|
mt/ptcal/pytcore/rules/srule.py
|
1
|
5367
|
'''*****************************************************************************
AToMPM - A Tool for Multi-Paradigm Modelling
Copyright (c) 2011 Eugene Syriani
This file is part of AToMPM.
AToMPM is free software: you can redistribute it and/or modify it under the
terms of the GNU Lesser General Public License as published by the Free Software
Foundation, either version 3 of the License, or (at your option) any later
version.
AToMPM is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License along
with AToMPM. If not, see <http://www.gnu.org/licenses/>.
*****************************************************************************'''
from ..util.infinity import INFINITY
from arule import ARule
from ..tcore.resolver import Resolver
class SRule(ARule):
'''
Applies the transformation as long as matches can be found.
'''
def __init__(self, LHS, RHS, max_iterations=INFINITY,sendAndApplyDeltaFunc=None):
'''
Applies the transformation as long as matches can be found.
@param LHS: The pre-condition pattern (LHS + NACs).
@param RHS: The post-condition pattern (RHS).
@param max_iterations: The maximum number of times to apply the transformation.
'''
super(SRule, self).__init__(LHS, RHS,sendAndApplyDeltaFunc)
self.I.max_iterations = max_iterations
def packet_in(self, packet):
self.exception = None
self.is_success = False
# Match
packet = self.M.packet_in(packet)
if not self.M.is_success:
self.exception = self.M.exception
return packet
# Choose the first match
packet = self.I.packet_in(packet)
if not self.I.is_success:
self.exception = self.I.exception
return packet
while True:
# Rewrite
packet = self.W.packet_in(packet)
if not self.W.is_success:
self.exception = self.W.exception
return packet
# Rule has been applied once, so it's a success anyway
self.is_success = True
if self.I.iterations == self.I.max_iterations:
return packet
# Re-Match
packet = self.M.packet_in(packet)
if not self.M.is_success:
self.exception = self.M.exception
return packet
# Choose another match
packet = self.I.next_in(packet)
# No more iterations are left
if not self.I.is_success:
if self.I.exception:
self.exception = self.I.exception
return packet
class SRule_r(SRule):
'''
Applies the transformation on one match.
'''
def __init__(self, LHS, RHS, max_iterations=INFINITY, external_matches_only=False, custom_resolution=lambda packet: False):
'''
Applies the transformation as long as matches can be found.
@param LHS: The pre-condition pattern (LHS + NACs).
@param RHS: The post-condition pattern (RHS).
@param max_iterations: The maximum number of times to apply the transformation.
@param external_matches_only: Resolve conflicts ignoring the matches found in this SRule.
@param custom_resolution: Override the default resolution function.
'''
super(SRule_r, self).__init__(LHS, RHS, max_iterations)
self.R = Resolver(external_matches_only=external_matches_only,
custom_resolution=custom_resolution)
def packet_in(self, packet):
self.exception = None
self.is_success = False
# Match
packet = self.M.packet_in(packet)
if not self.M.is_success:
self.exception = self.M.exception
return packet
# Choose the first match
packet = self.I.packet_in(packet)
if not self.I.is_success:
self.exception = self.I.exception
return packet
while True:
# Rewrite
packet = self.W.packet_in(packet)
if not self.W.is_success:
self.exception = self.W.exception
return packet
# Resolve any conflicts if necessary
packet = self.R.packet_in(packet)
if not self.R.is_success:
self.exception = self.R.exception
return packet
# Rule has been applied once, so it's a success anyway
self.is_success = True
if self.I.iterations == self.I.max_iterations:
return packet
# Re-Match
packet = self.M.packet_in(packet)
if not self.M.is_success:
self.exception = self.M.exception
return packet
# Choose another match
packet = self.I.next_in(packet)
# No more iterations are left
if not self.I.is_success:
if self.I.exception:
self.exception = self.I.exception
return packet
|
gpl-3.0
|
sandeepgupta2k4/tensorflow
|
tensorflow/python/training/device_setter.py
|
32
|
8296
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Device function for replicated training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.core.framework import node_def_pb2
from tensorflow.python.framework import device as pydev
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import server_lib
class _RoundRobinStrategy(object):
"""Returns the next ps task index for placement in round-robin order.
This class is not to be used directly by users. See instead
`replica_device_setter()` below.
"""
def __init__(self, num_tasks):
"""Create a new `_RoundRobinStrategy`.
Args:
num_tasks: Number of ps tasks to cycle among.
"""
self._num_tasks = num_tasks
self._next_task = 0
def __call__(self, unused_op):
"""Choose a ps task index for the given `Operation`.
Args:
unused_op: An `Operation` to be placed on ps.
Returns:
The next ps task index to use for the `Operation`. Returns the next
index, in the range `[offset, offset + num_tasks)`.
"""
task = self._next_task
self._next_task = (self._next_task + 1) % self._num_tasks
return task
class _ReplicaDeviceChooser(object):
"""Class to choose devices for Ops in a replicated training setup.
This class is not to be used directly by users. See instead
`replica_device_setter()` below.
"""
def __init__(self, ps_tasks, ps_device, worker_device, merge_devices, ps_ops,
ps_strategy):
"""Create a new `_ReplicaDeviceChooser`.
Args:
ps_tasks: Number of tasks in the `ps` job.
ps_device: String. Name of the `ps` job.
worker_device: String. Name of the `worker` job.
merge_devices: Boolean. Set to True to allow merging of device specs.
ps_ops: List of strings representing `Operation` types that need to be
placed on `ps` devices.
ps_strategy: A callable invoked for every ps `Operation` (i.e. matched by
`ps_ops`), that takes the `Operation` and returns the ps task index to
use.
"""
self._ps_tasks = ps_tasks
self._ps_device = ps_device
self._worker_device = worker_device
self._merge_devices = merge_devices
self._ps_ops = ps_ops
self._ps_strategy = ps_strategy
def device_function(self, op):
"""Choose a device for `op`.
Args:
op: an `Operation`.
Returns:
The device to use for the `Operation`.
"""
# If we don't return early here, either merge_devices is True, or op.device
# is empty (in which case merging is a no-op). So we can always merge below.
if not self._merge_devices and op.device:
return op.device
current_device = pydev.DeviceSpec.from_string(op.device or "")
# The ps_device will be used for specified ops (ps_ops) whenever it is
# present and ps_tasks is non-zero. However, its task number will only be
# set (using ps_strategy) if there is a job field in ps_device that won't be
# changed by the job field (if present) in current_device.
node_def = op if isinstance(op, node_def_pb2.NodeDef) else op.node_def
if self._ps_tasks and self._ps_device and node_def.op in self._ps_ops:
ps_device = pydev.DeviceSpec.from_string(self._ps_device)
current_job, ps_job = current_device.job, ps_device.job
if ps_job and (not current_job or current_job == ps_job):
ps_device.task = self._ps_strategy(op)
ps_device.merge_from(current_device)
return ps_device.to_string()
worker_device = pydev.DeviceSpec.from_string(self._worker_device or "")
worker_device.merge_from(current_device)
return worker_device.to_string()
def replica_device_setter(ps_tasks=0, ps_device="/job:ps",
worker_device="/job:worker", merge_devices=True,
cluster=None, ps_ops=None, ps_strategy=None):
"""Return a `device function` to use when building a Graph for replicas.
Device Functions are used in `with tf.device(device_function):` statement to
automatically assign devices to `Operation` objects as they are constructed,
Device constraints are added from the inner-most context first, working
outwards. The merging behavior adds constraints to fields that are yet unset
by a more inner context. Currently the fields are (job, task, cpu/gpu).
If `cluster` is `None`, and `ps_tasks` is 0, the returned function is a no-op.
Otherwise, the value of `ps_tasks` is derived from `cluster`.
By default, only Variable ops are placed on ps tasks, and the placement
strategy is round-robin over all ps tasks. A custom `ps_strategy` may be used
to do more intelligent placement, such as
`tf.contrib.training.GreedyLoadBalancingStrategy`.
For example,
```python
# To build a cluster with two ps jobs on hosts ps0 and ps1, and 3 worker
# jobs on hosts worker0, worker1 and worker2.
cluster_spec = {
"ps": ["ps0:2222", "ps1:2222"],
"worker": ["worker0:2222", "worker1:2222", "worker2:2222"]}
with tf.device(tf.train.replica_device_setter(cluster=cluster_spec)):
# Build your graph
v1 = tf.Variable(...) # assigned to /job:ps/task:0
v2 = tf.Variable(...) # assigned to /job:ps/task:1
v3 = tf.Variable(...) # assigned to /job:ps/task:0
# Run compute
```
Args:
ps_tasks: Number of tasks in the `ps` job. Ignored if `cluster` is
provided.
ps_device: String. Device of the `ps` job. If empty no `ps` job is used.
Defaults to `ps`.
worker_device: String. Device of the `worker` job. If empty no `worker`
job is used.
merge_devices: `Boolean`. If `True`, merges or only sets a device if the
device constraint is completely unset. merges device specification rather
than overriding them.
cluster: `ClusterDef` proto or `ClusterSpec`.
ps_ops: List of strings representing `Operation` types that need to be
placed on `ps` devices. If `None`, defaults to `["Variable"]`.
ps_strategy: A callable invoked for every ps `Operation` (i.e. matched by
`ps_ops`), that takes the `Operation` and returns the ps task index to
use. If `None`, defaults to a round-robin strategy across all `ps`
devices.
Returns:
A function to pass to `tf.device()`.
Raises:
TypeError if `cluster` is not a dictionary or `ClusterDef` protocol buffer,
or if `ps_strategy` is provided but not a callable.
"""
if cluster is not None:
if isinstance(cluster, server_lib.ClusterSpec):
cluster_spec = cluster.as_dict()
else:
cluster_spec = server_lib.ClusterSpec(cluster).as_dict()
# Get ps_job_name from ps_device by stripping "/job:".
ps_job_name = pydev.DeviceSpec.from_string(ps_device).job
if ps_job_name not in cluster_spec or cluster_spec[ps_job_name] is None:
return None
ps_tasks = len(cluster_spec[ps_job_name])
if ps_tasks == 0:
return None
if ps_ops is None:
# TODO(sherrym): Variables in the LOCAL_VARIABLES collection should not be
# placed in the parameter server.
ps_ops = ["Variable", "VariableV2", "VarHandleOp"]
if not merge_devices:
logging.warning(
"DEPRECATION: It is recommended to set merge_devices=true in "
"replica_device_setter")
if ps_strategy is None:
ps_strategy = _RoundRobinStrategy(ps_tasks)
if not six.callable(ps_strategy):
raise TypeError("ps_strategy must be callable")
chooser = _ReplicaDeviceChooser(
ps_tasks, ps_device, worker_device, merge_devices, ps_ops, ps_strategy)
return chooser.device_function
|
apache-2.0
|
zengenti/ansible
|
lib/ansible/modules/cloud/univention/udm_dns_zone.py
|
21
|
7623
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
# Copyright (c) 2016, Adfinis SyGroup AG
# Tobias Rueetschi <tobias.ruetschi@adfinis-sygroup.ch>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: udm_dns_zone
version_added: "2.2"
author: "Tobias Rueetschi (@2-B)"
short_description: Manage dns zones on a univention corporate server
description:
- "This module allows to manage dns zones on a univention corporate server (UCS).
It uses the python API of the UCS to create a new object or edit it."
requirements:
- Python >= 2.6
options:
state:
required: false
default: "present"
choices: [ present, absent ]
description:
- Whether the dns zone is present or not.
type:
required: true
choices: [ forward_zone, reverse_zone ]
description:
- Define if the zone is a forward or reverse DNS zone.
zone:
required: true
description:
- DNS zone name, e.g. C(example.com).
nameserver:
required: false
description:
- List of appropriate name servers. Required if C(state=present).
interfaces:
required: false
description:
- List of interface IP addresses, on which the server should
response this zone. Required if C(state=present).
refresh:
required: false
default: 3600
description:
- Interval before the zone should be refreshed.
retry:
required: false
default: 1800
description:
- Interval that should elapse before a failed refresh should be retried.
expire:
required: false
default: 604800
description:
- Specifies the upper limit on the time interval that can elapse before the zone is no longer authoritative.
ttl:
required: false
default: 600
description:
- Minimum TTL field that should be exported with any RR from this zone.
contact:
required: false
default: ''
description:
- Contact person in the SOA record.
mx:
required: false
default: []
description:
- List of MX servers. (Must declared as A or AAAA records).
'''
EXAMPLES = '''
# Create a DNS zone on a UCS
- udm_dns_zone:
zone: example.com
type: forward_zone
nameserver:
- ucs.example.com
interfaces:
- 192.0.2.1
'''
RETURN = '''# '''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.univention_umc import (
umc_module_for_add,
umc_module_for_edit,
ldap_search,
base_dn,
)
def convert_time(time):
"""Convert a time in seconds into the biggest unit"""
units = [
(24 * 60 * 60 , 'days'),
(60 * 60 , 'hours'),
(60 , 'minutes'),
(1 , 'seconds'),
]
if time == 0:
return ('0', 'seconds')
for unit in units:
if time >= unit[0]:
return ('{}'.format(time // unit[0]), unit[1])
def main():
module = AnsibleModule(
argument_spec = dict(
type = dict(required=True,
type='str'),
zone = dict(required=True,
aliases=['name'],
type='str'),
nameserver = dict(default=[],
type='list'),
interfaces = dict(default=[],
type='list'),
refresh = dict(default=3600,
type='int'),
retry = dict(default=1800,
type='int'),
expire = dict(default=604800,
type='int'),
ttl = dict(default=600,
type='int'),
contact = dict(default='',
type='str'),
mx = dict(default=[],
type='list'),
state = dict(default='present',
choices=['present', 'absent'],
type='str')
),
supports_check_mode=True,
required_if = ([
('state', 'present', ['nameserver', 'interfaces'])
])
)
type = module.params['type']
zone = module.params['zone']
nameserver = module.params['nameserver']
interfaces = module.params['interfaces']
refresh = module.params['refresh']
retry = module.params['retry']
expire = module.params['expire']
ttl = module.params['ttl']
contact = module.params['contact']
mx = module.params['mx']
state = module.params['state']
changed = False
obj = list(ldap_search(
'(&(objectClass=dNSZone)(zoneName={}))'.format(zone),
attr=['dNSZone']
))
exists = bool(len(obj))
container = 'cn=dns,{}'.format(base_dn())
dn = 'zoneName={},{}'.format(zone, container)
if contact == '':
contact = 'root@{}.'.format(zone)
if state == 'present':
try:
if not exists:
obj = umc_module_for_add('dns/{}'.format(type), container)
else:
obj = umc_module_for_edit('dns/{}'.format(type), dn)
obj['zone'] = zone
obj['nameserver'] = nameserver
obj['a'] = interfaces
obj['refresh'] = convert_time(refresh)
obj['retry'] = convert_time(retry)
obj['expire'] = convert_time(expire)
obj['ttl'] = convert_time(ttl)
obj['contact'] = contact
obj['mx'] = mx
diff = obj.diff()
if exists:
for k in obj.keys():
if obj.hasChanged(k):
changed = True
else:
changed = True
if not module.check_mode:
if not exists:
obj.create()
elif changed:
obj.modify()
except Exception as e:
module.fail_json(
msg='Creating/editing dns zone {} failed: {}'.format(zone, e)
)
if state == 'absent' and exists:
try:
obj = umc_module_for_edit('dns/{}'.format(type), dn)
if not module.check_mode:
obj.remove()
changed = True
except Exception as e:
module.fail_json(
msg='Removing dns zone {} failed: {}'.format(zone, e)
)
module.exit_json(
changed=changed,
diff=diff,
zone=zone
)
if __name__ == '__main__':
main()
|
gpl-3.0
|
ping/youtube-dl
|
youtube_dl/extractor/canvas.py
|
9
|
12184
|
from __future__ import unicode_literals
import re
import json
from .common import InfoExtractor
from .gigya import GigyaBaseIE
from ..compat import compat_HTTPError
from ..utils import (
ExtractorError,
strip_or_none,
float_or_none,
int_or_none,
merge_dicts,
parse_iso8601,
)
class CanvasIE(InfoExtractor):
_VALID_URL = r'https?://mediazone\.vrt\.be/api/v1/(?P<site_id>canvas|een|ketnet|vrtvideo)/assets/(?P<id>[^/?#&]+)'
_TESTS = [{
'url': 'https://mediazone.vrt.be/api/v1/ketnet/assets/md-ast-4ac54990-ce66-4d00-a8ca-9eac86f4c475',
'md5': '90139b746a0a9bd7bb631283f6e2a64e',
'info_dict': {
'id': 'md-ast-4ac54990-ce66-4d00-a8ca-9eac86f4c475',
'display_id': 'md-ast-4ac54990-ce66-4d00-a8ca-9eac86f4c475',
'ext': 'flv',
'title': 'Nachtwacht: De Greystook',
'description': 'md5:1db3f5dc4c7109c821261e7512975be7',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 1468.03,
},
'expected_warnings': ['is not a supported codec', 'Unknown MIME type'],
}, {
'url': 'https://mediazone.vrt.be/api/v1/canvas/assets/mz-ast-5e5f90b6-2d72-4c40-82c2-e134f884e93e',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
site_id, video_id = mobj.group('site_id'), mobj.group('id')
data = self._download_json(
'https://mediazone.vrt.be/api/v1/%s/assets/%s'
% (site_id, video_id), video_id)
title = data['title']
description = data.get('description')
formats = []
for target in data['targetUrls']:
format_url, format_type = target.get('url'), target.get('type')
if not format_url or not format_type:
continue
if format_type == 'HLS':
formats.extend(self._extract_m3u8_formats(
format_url, video_id, 'mp4', entry_protocol='m3u8_native',
m3u8_id=format_type, fatal=False))
elif format_type == 'HDS':
formats.extend(self._extract_f4m_formats(
format_url, video_id, f4m_id=format_type, fatal=False))
elif format_type == 'MPEG_DASH':
formats.extend(self._extract_mpd_formats(
format_url, video_id, mpd_id=format_type, fatal=False))
elif format_type == 'HSS':
formats.extend(self._extract_ism_formats(
format_url, video_id, ism_id='mss', fatal=False))
else:
formats.append({
'format_id': format_type,
'url': format_url,
})
self._sort_formats(formats)
subtitles = {}
subtitle_urls = data.get('subtitleUrls')
if isinstance(subtitle_urls, list):
for subtitle in subtitle_urls:
subtitle_url = subtitle.get('url')
if subtitle_url and subtitle.get('type') == 'CLOSED':
subtitles.setdefault('nl', []).append({'url': subtitle_url})
return {
'id': video_id,
'display_id': video_id,
'title': title,
'description': description,
'formats': formats,
'duration': float_or_none(data.get('duration'), 1000),
'thumbnail': data.get('posterImageUrl'),
'subtitles': subtitles,
}
class CanvasEenIE(InfoExtractor):
IE_DESC = 'canvas.be and een.be'
_VALID_URL = r'https?://(?:www\.)?(?P<site_id>canvas|een)\.be/(?:[^/]+/)*(?P<id>[^/?#&]+)'
_TESTS = [{
'url': 'http://www.canvas.be/video/de-afspraak/najaar-2015/de-afspraak-veilt-voor-de-warmste-week',
'md5': 'ed66976748d12350b118455979cca293',
'info_dict': {
'id': 'mz-ast-5e5f90b6-2d72-4c40-82c2-e134f884e93e',
'display_id': 'de-afspraak-veilt-voor-de-warmste-week',
'ext': 'flv',
'title': 'De afspraak veilt voor de Warmste Week',
'description': 'md5:24cb860c320dc2be7358e0e5aa317ba6',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 49.02,
},
'expected_warnings': ['is not a supported codec'],
}, {
# with subtitles
'url': 'http://www.canvas.be/video/panorama/2016/pieter-0167',
'info_dict': {
'id': 'mz-ast-5240ff21-2d30-4101-bba6-92b5ec67c625',
'display_id': 'pieter-0167',
'ext': 'mp4',
'title': 'Pieter 0167',
'description': 'md5:943cd30f48a5d29ba02c3a104dc4ec4e',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 2553.08,
'subtitles': {
'nl': [{
'ext': 'vtt',
}],
},
},
'params': {
'skip_download': True,
},
'skip': 'Pagina niet gevonden',
}, {
'url': 'https://www.een.be/sorry-voor-alles/herbekijk-sorry-voor-alles',
'info_dict': {
'id': 'mz-ast-11a587f8-b921-4266-82e2-0bce3e80d07f',
'display_id': 'herbekijk-sorry-voor-alles',
'ext': 'mp4',
'title': 'Herbekijk Sorry voor alles',
'description': 'md5:8bb2805df8164e5eb95d6a7a29dc0dd3',
'thumbnail': r're:^https?://.*\.jpg$',
'duration': 3788.06,
},
'params': {
'skip_download': True,
},
'skip': 'Episode no longer available',
}, {
'url': 'https://www.canvas.be/check-point/najaar-2016/de-politie-uw-vriend',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
site_id, display_id = mobj.group('site_id'), mobj.group('id')
webpage = self._download_webpage(url, display_id)
title = strip_or_none(self._search_regex(
r'<h1[^>]+class="video__body__header__title"[^>]*>(.+?)</h1>',
webpage, 'title', default=None) or self._og_search_title(
webpage, default=None))
video_id = self._html_search_regex(
r'data-video=(["\'])(?P<id>(?:(?!\1).)+)\1', webpage, 'video id',
group='id')
return {
'_type': 'url_transparent',
'url': 'https://mediazone.vrt.be/api/v1/%s/assets/%s' % (site_id, video_id),
'ie_key': CanvasIE.ie_key(),
'id': video_id,
'display_id': display_id,
'title': title,
'description': self._og_search_description(webpage),
}
class VrtNUIE(GigyaBaseIE):
IE_DESC = 'VrtNU.be'
_VALID_URL = r'https?://(?:www\.)?vrt\.be/(?P<site_id>vrtnu)/(?:[^/]+/)*(?P<id>[^/?#&]+)'
_TESTS = [{
'url': 'https://www.vrt.be/vrtnu/a-z/postbus-x/1/postbus-x-s1a1/',
'info_dict': {
'id': 'pbs-pub-2e2d8c27-df26-45c9-9dc6-90c78153044d$vid-90c932b1-e21d-4fb8-99b1-db7b49cf74de',
'ext': 'flv',
'title': 'De zwarte weduwe',
'description': 'md5:d90c21dced7db869a85db89a623998d4',
'duration': 1457.04,
'thumbnail': r're:^https?://.*\.jpg$',
'season': '1',
'season_number': 1,
'episode_number': 1,
},
'skip': 'This video is only available for registered users'
}]
_NETRC_MACHINE = 'vrtnu'
_APIKEY = '3_0Z2HujMtiWq_pkAjgnS2Md2E11a1AwZjYiBETtwNE-EoEHDINgtnvcAOpNgmrVGy'
_CONTEXT_ID = 'R3595707040'
def _real_initialize(self):
self._login()
def _login(self):
username, password = self._get_login_info()
if username is None:
return
auth_data = {
'APIKey': self._APIKEY,
'targetEnv': 'jssdk',
'loginID': username,
'password': password,
'authMode': 'cookie',
}
auth_info = self._gigya_login(auth_data)
# Sometimes authentication fails for no good reason, retry
login_attempt = 1
while login_attempt <= 3:
try:
# When requesting a token, no actual token is returned, but the
# necessary cookies are set.
self._request_webpage(
'https://token.vrt.be',
None, note='Requesting a token', errnote='Could not get a token',
headers={
'Content-Type': 'application/json',
'Referer': 'https://www.vrt.be/vrtnu/',
},
data=json.dumps({
'uid': auth_info['UID'],
'uidsig': auth_info['UIDSignature'],
'ts': auth_info['signatureTimestamp'],
'email': auth_info['profile']['email'],
}).encode('utf-8'))
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 401:
login_attempt += 1
self.report_warning('Authentication failed')
self._sleep(1, None, msg_template='Waiting for %(timeout)s seconds before trying again')
else:
raise e
else:
break
def _real_extract(self, url):
display_id = self._match_id(url)
webpage, urlh = self._download_webpage_handle(url, display_id)
info = self._search_json_ld(webpage, display_id, default={})
# title is optional here since it may be extracted by extractor
# that is delegated from here
title = strip_or_none(self._html_search_regex(
r'(?ms)<h1 class="content__heading">(.+?)</h1>',
webpage, 'title', default=None))
description = self._html_search_regex(
r'(?ms)<div class="content__description">(.+?)</div>',
webpage, 'description', default=None)
season = self._html_search_regex(
[r'''(?xms)<div\ class="tabs__tab\ tabs__tab--active">\s*
<span>seizoen\ (.+?)</span>\s*
</div>''',
r'<option value="seizoen (\d{1,3})" data-href="[^"]+?" selected>'],
webpage, 'season', default=None)
season_number = int_or_none(season)
episode_number = int_or_none(self._html_search_regex(
r'''(?xms)<div\ class="content__episode">\s*
<abbr\ title="aflevering">afl</abbr>\s*<span>(\d+)</span>
</div>''',
webpage, 'episode_number', default=None))
release_date = parse_iso8601(self._html_search_regex(
r'(?ms)<div class="content__broadcastdate">\s*<time\ datetime="(.+?)"',
webpage, 'release_date', default=None))
# If there's a ? or a # in the URL, remove them and everything after
clean_url = urlh.geturl().split('?')[0].split('#')[0].strip('/')
securevideo_url = clean_url + '.mssecurevideo.json'
try:
video = self._download_json(securevideo_url, display_id)
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 401:
self.raise_login_required()
raise
# We are dealing with a '../<show>.relevant' URL
redirect_url = video.get('url')
if redirect_url:
return self.url_result(self._proto_relative_url(redirect_url, 'https:'))
# There is only one entry, but with an unknown key, so just get
# the first one
video_id = list(video.values())[0].get('videoid')
return merge_dicts(info, {
'_type': 'url_transparent',
'url': 'https://mediazone.vrt.be/api/v1/vrtvideo/assets/%s' % video_id,
'ie_key': CanvasIE.ie_key(),
'id': video_id,
'display_id': display_id,
'title': title,
'description': description,
'season': season,
'season_number': season_number,
'episode_number': episode_number,
'release_date': release_date,
})
|
unlicense
|
Mathijsz/razdroid-kernel
|
tools/perf/scripts/python/check-perf-trace.py
|
11214
|
2503
|
# perf script event handlers, generated by perf script -g python
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# This script tests basic functionality such as flag and symbol
# strings, common_xxx() calls back into perf, begin, end, unhandled
# events, etc. Basically, if this script runs successfully and
# displays expected results, Python scripting support should be ok.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Core import *
from perf_trace_context import *
unhandled = autodict()
def trace_begin():
print "trace_begin"
pass
def trace_end():
print_unhandled()
def irq__softirq_entry(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
vec):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "vec=%s\n" % \
(symbol_str("irq__softirq_entry", "vec", vec)),
def kmem__kmalloc(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
call_site, ptr, bytes_req, bytes_alloc,
gfp_flags):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "call_site=%u, ptr=%u, bytes_req=%u, " \
"bytes_alloc=%u, gfp_flags=%s\n" % \
(call_site, ptr, bytes_req, bytes_alloc,
flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)),
def trace_unhandled(event_name, context, event_fields_dict):
try:
unhandled[event_name] += 1
except TypeError:
unhandled[event_name] = 1
def print_header(event_name, cpu, secs, nsecs, pid, comm):
print "%-20s %5u %05u.%09u %8u %-20s " % \
(event_name, cpu, secs, nsecs, pid, comm),
# print trace fields not included in handler args
def print_uncommon(context):
print "common_preempt_count=%d, common_flags=%s, common_lock_depth=%d, " \
% (common_pc(context), trace_flag_str(common_flags(context)), \
common_lock_depth(context))
def print_unhandled():
keys = unhandled.keys()
if not keys:
return
print "\nunhandled events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for event_name in keys:
print "%-40s %10d\n" % (event_name, unhandled[event_name])
|
gpl-2.0
|
staciecampbell/allsun
|
node_modules/gulp-sass/node_modules/node-sass/node_modules/node-gyp/gyp/pylib/gyp/generator/ninja_test.py
|
1843
|
1786
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Unit tests for the ninja.py file. """
import gyp.generator.ninja as ninja
import unittest
import StringIO
import sys
import TestCommon
class TestPrefixesAndSuffixes(unittest.TestCase):
def test_BinaryNamesWindows(self):
# These cannot run on non-Windows as they require a VS installation to
# correctly handle variable expansion.
if sys.platform.startswith('win'):
writer = ninja.NinjaWriter('foo', 'wee', '.', '.', 'build.ninja', '.',
'build.ninja', 'win')
spec = { 'target_name': 'wee' }
self.assertTrue(writer.ComputeOutputFileName(spec, 'executable').
endswith('.exe'))
self.assertTrue(writer.ComputeOutputFileName(spec, 'shared_library').
endswith('.dll'))
self.assertTrue(writer.ComputeOutputFileName(spec, 'static_library').
endswith('.lib'))
def test_BinaryNamesLinux(self):
writer = ninja.NinjaWriter('foo', 'wee', '.', '.', 'build.ninja', '.',
'build.ninja', 'linux')
spec = { 'target_name': 'wee' }
self.assertTrue('.' not in writer.ComputeOutputFileName(spec,
'executable'))
self.assertTrue(writer.ComputeOutputFileName(spec, 'shared_library').
startswith('lib'))
self.assertTrue(writer.ComputeOutputFileName(spec, 'static_library').
startswith('lib'))
self.assertTrue(writer.ComputeOutputFileName(spec, 'shared_library').
endswith('.so'))
self.assertTrue(writer.ComputeOutputFileName(spec, 'static_library').
endswith('.a'))
if __name__ == '__main__':
unittest.main()
|
gpl-2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.