repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
determinedcheetahs/cheetah_juniper | hadoop/src/examples/org/apache/hadoop/examples/terasort/job_history_summary.py | 323 | 3444 | #!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import sys
pat = re.compile('(?P<name>[^=]+)="(?P<value>[^"]*)" *')
counterPat = re.compile('(?P<name>[^:]+):(?P<value>[^,]*),?')
def parse(tail):
result = {}
for n,v in re.findall(pat, tail):
result[n] = v
return result
mapStartTime = {}
mapEndTime = {}
reduceStartTime = {}
reduceShuffleTime = {}
reduceSortTime = {}
reduceEndTime = {}
reduceBytes = {}
for line in sys.stdin:
words = line.split(" ",1)
event = words[0]
attrs = parse(words[1])
if event == 'MapAttempt':
if attrs.has_key("START_TIME"):
mapStartTime[attrs["TASKID"]] = int(attrs["START_TIME"])/1000
elif attrs.has_key("FINISH_TIME"):
mapEndTime[attrs["TASKID"]] = int(attrs["FINISH_TIME"])/1000
elif event == 'ReduceAttempt':
if attrs.has_key("START_TIME"):
reduceStartTime[attrs["TASKID"]] = int(attrs["START_TIME"]) / 1000
elif attrs.has_key("FINISH_TIME"):
reduceShuffleTime[attrs["TASKID"]] = int(attrs["SHUFFLE_FINISHED"])/1000
reduceSortTime[attrs["TASKID"]] = int(attrs["SORT_FINISHED"])/1000
reduceEndTime[attrs["TASKID"]] = int(attrs["FINISH_TIME"])/1000
elif event == 'Task':
if attrs["TASK_TYPE"] == "REDUCE" and attrs.has_key("COUNTERS"):
for n,v in re.findall(counterPat, attrs["COUNTERS"]):
if n == "File Systems.HDFS bytes written":
reduceBytes[attrs["TASKID"]] = int(v)
runningMaps = {}
shufflingReduces = {}
sortingReduces = {}
runningReduces = {}
startTime = min(reduce(min, mapStartTime.values()),
reduce(min, reduceStartTime.values()))
endTime = max(reduce(max, mapEndTime.values()),
reduce(max, reduceEndTime.values()))
reduces = reduceBytes.keys()
reduces.sort()
print "Name reduce-output-bytes shuffle-finish reduce-finish"
for r in reduces:
print r, reduceBytes[r], reduceShuffleTime[r] - startTime,
print reduceEndTime[r] - startTime
print
for t in range(startTime, endTime):
runningMaps[t] = 0
shufflingReduces[t] = 0
sortingReduces[t] = 0
runningReduces[t] = 0
for map in mapStartTime.keys():
for t in range(mapStartTime[map], mapEndTime[map]):
runningMaps[t] += 1
for reduce in reduceStartTime.keys():
for t in range(reduceStartTime[reduce], reduceShuffleTime[reduce]):
shufflingReduces[t] += 1
for t in range(reduceShuffleTime[reduce], reduceSortTime[reduce]):
sortingReduces[t] += 1
for t in range(reduceSortTime[reduce], reduceEndTime[reduce]):
runningReduces[t] += 1
print "time maps shuffle merge reduce"
for t in range(startTime, endTime):
print t - startTime, runningMaps[t], shufflingReduces[t], sortingReduces[t],
print runningReduces[t]
| apache-2.0 |
reingart/gui2py | gui/menu.py | 14 | 13472 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"gui2py's Menu Model (encapsulates wx.MenuBar, wx.Menu and wx.MenuItems)"
__author__ = "Mariano Reingart (reingart@gmail.com)"
__copyright__ = "Copyright (C) 2013- Mariano Reingart"
__license__ = "LGPL 3.0"
# Initial implementation was inspired on PythonCard's menu module, altought
# it was almost completely discarded and re-written from scratch to make it
# simpler and cleaner, following the general gui2py component object model
# Note: some wx stubs ("dummy window") where necessary for this simplification
import wx
from .event import FormEvent
from .component import Component, Spec, StyleSpec, EventSpec, InitSpec, DimensionSpec
from . import images
from . import registry
DEBUG = False
class wx_DummyWindow:
"Class to emulate (and normalize) menues in wx whit gui object model"
# if using custom-draw menues (agw.FlatMenu) this would not be necesary
# (so wx_Menu* could be replaced someday..)
# note that wx ignores dimension and almost all event on menus
# Font and Background/Textcolour seems to work only on MSW
def __init__(self, parent, *args, **kwargs):
self.parent = parent
def GetParent(self):
return self.parent
def Reparent(self, new_parent):
self.parent = new_parent
def GetSize(self):
return [-1, -1]
GetSizeTuple = GetClientSize = GetSize
GetPositionTuple = GetSizeTuple
def GetCharWidth(self):
return 0
def GetCharHeight(self):
return 0
def Dummy(self, *args, **kwargs):
pass
Show = SetSize = Refresh = Move = SetToolTip = Dummy
SetClientSize = Dummy
IsShown = lambda self: True
def Bind(self, evt, handler, id=None):
# this should reach top level window:
if evt == wx.EVT_SIZE:
pass
else:
if DEBUG: print "binding MENU", self.__class__.__name__, id, handler
self.parent.Bind(evt, handler, id=id or self.GetId())
def Unbind(self, evt, id=None):
if DEBUG: print "unbinding MENU", self.Text, self.GetId()
self.parent.Unbind(evt, id=id or self.GetId())
class wx_MenuItem(wx_DummyWindow, wx.MenuItem):
def __init__(self, parent, *args, **kwargs):
wx_DummyWindow.__init__(self, parent, *args, **kwargs)
wx.MenuItem.__init__(self, parentMenu=parent,
id=kwargs['id'],
text=kwargs['label'],
kind=kwargs['style'],
#subMenu=None,
)
if self.GetKind() == wx.ITEM_SEPARATOR:
self.parent.AppendSeparator() # do not use AppendItem on MSW
#elif self.GetKind() == wx.ITEM_CHECK:
# self.parent.AppendCheckItem(wx.NewId(), self.GetText())
else:
# in phoenix (2.9.5), kwargs is helpString, so set it here:
self.SetHelp(kwargs['help'])
self.parent.AppendItem(self)
def Enable(self, value):
# avoid assertion in Enable: invalid menu item
if not self.GetKind() == wx.ITEM_SEPARATOR:
wx.MenuItem.Enable(self, value)
def Destroy(self):
self.parent.RemoveItem(self)
wx.MenuItem.Destroy(self)
def Check(self, value):
# avoid assertion in Check(): invalid menu item
if self.GetKind() == wx.ITEM_CHECK:
wx.MenuItem.Check(self, value)
GetForegroundColour = wx.MenuItem.GetTextColour
SetForegroundColour = wx.MenuItem.SetTextColour
class MenuItem(Component):
"A MenuItem represents one selectable item in a Menu"
_wx_class = wx_MenuItem
_registry = registry.MENU
label = InitSpec(lambda self: self.wx_obj.GetText(),
lambda self, label: self.wx_obj.SetText(label),
optional=False, default='MenuItem', type="string",
doc="text to show as caption")
help = InitSpec(lambda self: self.wx_obj.GetHelp(),
lambda self, label: self.wx_obj.SetHelp(label),
optional=True, default='', type="string",
doc="text to show as help in the status bar?")
onclick = EventSpec('click', binding=wx.EVT_MENU, kind=FormEvent)
def rebuild(self, **kwargs):
# avoid recreating the object (not supported yet!)
Component.rebuild(self, False, **kwargs)
class MenuItemCheckable(MenuItem):
"A MenuItem represents one selectable item in a Menu"
_wx_class = wx_MenuItem
_registry = registry.MENU
_style = wx.ITEM_CHECK
checked = Spec(lambda self: self.wx_obj.IsChecked(),
lambda self, value: self.wx_obj.Check(value),
default=False, type="boolean")
class MenuItemSeparator(MenuItem):
_style = wx.ITEM_SEPARATOR
class wx_Menu(wx_DummyWindow, wx.Menu):
def __init__(self, parent, *args, **kwargs):
wx_DummyWindow.__init__(self, parent, *args, **kwargs)
# if this is a popup menu, call constructor with:
# kwargs.get("label"), kwargs.get("style")
wx.Menu.__init__(self)
if isinstance(parent, wx.MenuBar):
self.parent.Append(self, kwargs.get("label"))
else:
self.parent.AppendSubMenu(submenu=self,
text=kwargs.get("label"))
id = self.parent.GetLastId()
self.GetId = lambda: id
def Destroy(self):
if isinstance(self.parent, wx.MenuBar):
self.parent.RemoveItem(self)
else:
self.parent.Remove(self.GetId())
try:
wx.Menu.Destroy(self)
except TypeError:
# we were removed! ignore "got _wxPyDeadObject instance instead"
pass
# unsupported methods:
GetBackgroundColour = SetBackgroundColour = wx_DummyWindow.Dummy
SetFont = wx_DummyWindow.Dummy
GetFont = lambda self: wx.SystemSettings.GetFont(wx.SYS_DEFAULT_GUI_FONT)
GetForegroundColour = lambda self: 'black'
SetForegroundColour = wx_DummyWindow.Dummy
def Enable(self, value):
"enable or disable all menu items"
for i in range(self.GetMenuItemCount()):
it = self.FindItemByPosition(i)
it.Enable(value)
def IsEnabled(self, *args, **kwargs):
"check if all menu items are enabled"
for i in range(self.GetMenuItemCount()):
it = self.FindItemByPosition(i)
if not it.IsEnabled():
return False
return True
def SetItemLabel(self, menu, label):
#return #return menu.GetTitle()
id = menu.GetId()
print "MENUID", id
self.SetLabel(id, label)
#menu.SetLabel(menu.GetTitle())
pass
def GetItemLabel(self, menu):
#return menu.GetTitle()
try:
return self.GetLabel(menu.GetId())
except:
import pdb; pdb.set_trace()
def GetLastId(self):
return list(self.GetMenuItems())[-1].GetId()
class Menu(Component):
"A Menu contains 0..n MenuItem objects."
_wx_class = wx_Menu
_registry = registry.MENU
def _set_label(self, value):
# note that wx.Menu.SetTitle() does not work on gtk for menubars
#self.wx_obj.SetTitle(value) # do not use SetTitle (in msw is shown)
self.wx_obj.parent.SetItemLabel(self.wx_obj, value)
def _get_label(self):
# note that wx.Menu.GetTitle() does not work on windows for menubars
return self.wx_obj.parent.GetItemLabel(self.wx_obj)
def find(self, item_id=None):
"Recursively find a menu item by its id (useful for event handlers)"
for it in self:
if it.id == item_id:
return it
elif isinstance(it, Menu):
found = it.find(item_id)
if found:
return found
def rebuild(self, **kwargs):
# avoid recreating the object (not supported yet!)
Component.rebuild(self, False, **kwargs)
label = InitSpec(_get_label, _set_label,
optional=False, default='Menu', type="string",
doc="text to show as caption")
class wx_MenuBar(wx_DummyWindow, wx.MenuBar):
def __init__(self, parent, *args, **kwargs):
# it should receive (self, parent, id, pos, size, style, name)
# but it doesnt!
# TypeError: new_MenuBar() takes at most 1 argument (7 given)
wx_DummyWindow.__init__(self, parent, *args, **kwargs)
wx.MenuBar.__init__(self)
# unsupported methods:
GetBackgroundColour = SetBackgroundColour = wx_DummyWindow.Dummy
SetFont = wx_DummyWindow.Dummy
GetFont = lambda self: wx.SystemSettings.GetFont(wx.SYS_DEFAULT_GUI_FONT)
GetForegroundColour = lambda self: 'black'
SetForegroundColour = wx_DummyWindow.Dummy
def Enable(self, value):
"enable or disable all top menus"
for i in range(self.GetMenuCount()):
self.EnableTop(i, value)
def IsEnabled(self, *args, **kwargs):
"check if all top menus are enabled"
for i in range(self.GetMenuCount()):
if not self.IsEnabledTop(i):
return False
return True
def RemoveItem(self, menu):
"Helper method to remove a menu avoiding using its position"
menus = self.GetMenus() # get the list of (menu, title)
menus = [submenu for submenu in menus if submenu[0] != menu]
self.SetMenus(menus)
def SetItemLabel(self, menu, label):
menus = self.GetMenus() # get the list of (menu, title)
pos = [submenu[0] for submenu in menus].index(menu)
self.SetMenuLabel(pos, label)
def GetItemLabel(self, menu):
menus = self.GetMenus() # get the list of (menu, title)
for submenu, title in menus:
if submenu == menu:
return title
def GetLastId(self):
return -1 #self.GetMenus()[-1][0].GetId()
class MenuBar(Component):
_wx_class = wx_MenuBar
_image = images.menubar
_registry = registry.CONTROLS
def __init__(self, *args, **kwargs):
Component.__init__(self, *args, **kwargs)
if hasattr(self, "_designer") and self.designer:
# create a basic menu
id = wx.NewId()
m = Menu(self, label='Menu', name="menu_%s" % id, id=id)
id = wx.NewId()
mi = MenuItem(m, label='MenuItem', name='menu_item_%s' % id, id=id)
mi.designer = self.designer
self._parent.menubar = self # add the menubar to the window
def set_parent(self, new_parent, init=False):
Component.set_parent(self, new_parent, init)
# if new_parent is rebuild, reparent (even to None) to avoid segv:
if not init:
wx_obj = new_parent and new_parent.wx_obj
self.wx_obj.Reparent(wx_obj)
def find(self, item_id=None):
"Recursively find a menu item by its id (useful for event handlers)"
for it in self:
found = it.find(item_id)
if found:
return found
# update metadata for the add context menu at the designer:
MenuBar._meta.valid_children = [Menu, ]
Menu._meta.valid_children = [MenuItem, MenuItemCheckable, MenuItemSeparator, Menu]
# Unit Test
if __name__ == '__main__' :
import sys, os
# disable ubuntu unity menubar
os.environ['UBUNTU_MENUPROXY'] = '0'
app = wx.App(redirect=False)
from gui.windows import Window
w = Window(title="hello world", name="frmTest", tool_window=False,
resizable=True, visible=False, pos=(180, 0))
mb = MenuBar(w, name="menubar")
m1 = Menu(mb, label='File', name="mnu_file")
mi11 = MenuItem(m1, label='Open', name='menu_file_open')
mi12 = MenuItem(m1, label='Save', name='menu_file_save', enabled=False)
mi13 = MenuItem(m1, label='Quit', name='menu_file_quit')
m11 = Menu(m1, label='Recent files', name="mnu_recent_file")
mi111 = MenuItem(m11, label='file1', name='menu_recent_file1')
mi112 = MenuItem(m11, label='file2', name='menu_recent_file2')
mi113 = MenuItem(m11, label='file3', name='menu_recent_file3')
m2 = Menu(mb, label='Edit', name="mnu_edit")
mi21 = MenuItem(m2, label='Copy', name='menu_edit_copy')
mi22 = MenuItem(m2, label='Cut', name='menu_edit_cut')
mi23 = MenuItem(m2, label='Paste', name='menu_edit_paste')
m2.enabled = False # disable a whole menu
def disable_all(event):
mb.enabled = False # disable the menubar
def enable_edit(event):
m2.enabled = not m2.enabled
mi11.label = "Close" if m2.enabled else "Open"
mi12.enabled = not mi12.enabled
mi11.onclick = enable_edit
mi13.onclick = disable_all
from gui.tools.inspector import InspectorTool
InspectorTool().show(w)
w.show()
app.MainLoop()
| lgpl-3.0 |
gojira/tensorflow | tensorflow/contrib/boosted_trees/examples/boston_combined.py | 41 | 5885 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Regression on Boston housing data using DNNBoostedTreeCombinedRegressor.
Example Usage:
python tensorflow/contrib/boosted_trees/examples/boston_combined.py \
--batch_size=404 --output_dir="/tmp/boston" \
--dnn_hidden_units="8,4" --dnn_steps_to_train=1000 \
--tree_depth=4 --tree_learning_rate=0.1 \
--num_trees=100 --tree_l2=0.001 --num_eval_steps=1 \
--vmodule=training_ops=1
When training is done, mean squared error on eval data is reported.
Point tensorboard to the directory for the run to see how the training
progresses:
tensorboard --logdir=/tmp/boston
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import tensorflow as tf
from tensorflow.contrib.boosted_trees.estimator_batch.dnn_tree_combined_estimator import DNNBoostedTreeCombinedRegressor
from tensorflow.contrib.boosted_trees.proto import learner_pb2
from tensorflow.contrib.layers.python.layers import feature_column
from tensorflow.contrib.learn.python.learn import learn_runner
from tensorflow.contrib.learn.python.learn.utils import input_fn_utils
from tensorflow.contrib.learn.python.learn.utils import saved_model_export_utils
_BOSTON_NUM_FEATURES = 13
def _get_estimator(output_dir, feature_cols):
"""Configures DNNBoostedTreeCombinedRegressor based on flags."""
learner_config = learner_pb2.LearnerConfig()
learner_config.learning_rate_tuner.fixed.learning_rate = (
FLAGS.tree_learning_rate)
learner_config.regularization.l1 = 0.0
learner_config.regularization.l2 = FLAGS.tree_l2
learner_config.constraints.max_tree_depth = FLAGS.tree_depth
run_config = tf.contrib.learn.RunConfig(save_summary_steps=1)
# Create a DNNBoostedTreeCombinedRegressor estimator.
estimator = DNNBoostedTreeCombinedRegressor(
dnn_hidden_units=[int(x) for x in FLAGS.dnn_hidden_units.split(",")],
dnn_feature_columns=feature_cols,
tree_learner_config=learner_config,
num_trees=FLAGS.num_trees,
# This should be the number of examples. For large datasets it can be
# larger than the batch_size.
tree_examples_per_layer=FLAGS.batch_size,
model_dir=output_dir,
config=run_config,
dnn_input_layer_to_tree=True,
dnn_steps_to_train=FLAGS.dnn_steps_to_train)
return estimator
def _make_experiment_fn(output_dir):
"""Creates experiment for DNNBoostedTreeCombinedRegressor."""
(x_train, y_train), (x_test,
y_test) = tf.keras.datasets.boston_housing.load_data()
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": x_train},
y=y_train,
batch_size=FLAGS.batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": x_test}, y=y_test, num_epochs=1, shuffle=False)
feature_columns = [
feature_column.real_valued_column("x", dimension=_BOSTON_NUM_FEATURES)
]
feature_spec = tf.contrib.layers.create_feature_spec_for_parsing(
feature_columns)
serving_input_fn = input_fn_utils.build_parsing_serving_input_fn(feature_spec)
export_strategies = [
saved_model_export_utils.make_export_strategy(serving_input_fn)]
return tf.contrib.learn.Experiment(
estimator=_get_estimator(output_dir, feature_columns),
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
train_steps=None,
eval_steps=FLAGS.num_eval_steps,
eval_metrics=None,
export_strategies=export_strategies)
def main(unused_argv):
learn_runner.run(
experiment_fn=_make_experiment_fn,
output_dir=FLAGS.output_dir,
schedule="train_and_evaluate")
if __name__ == "__main__":
tf.logging.set_verbosity(tf.logging.INFO)
parser = argparse.ArgumentParser()
# Define the list of flags that users can change.
parser.add_argument(
"--batch_size",
type=int,
default=1000,
help="The batch size for reading data.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Choose the dir for the output.")
parser.add_argument(
"--num_eval_steps",
type=int,
default=1,
help="The number of steps to run evaluation for.")
# Flags for configuring DNNBoostedTreeCombinedRegressor.
parser.add_argument(
"--dnn_hidden_units",
type=str,
default="8,4",
help="Hidden layers for DNN.")
parser.add_argument(
"--dnn_steps_to_train",
type=int,
default=1000,
help="Number of steps to train DNN.")
parser.add_argument(
"--tree_depth", type=int, default=4, help="Maximum depth of trees.")
parser.add_argument(
"--tree_l2", type=float, default=1.0, help="l2 regularization per batch.")
parser.add_argument(
"--tree_learning_rate",
type=float,
default=0.1,
help=("Learning rate (shrinkage weight) with which each "
"new tree is added."))
parser.add_argument(
"--num_trees",
type=int,
default=None,
required=True,
help="Number of trees to grow before stopping.")
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
mrbox/django | tests/field_deconstruction/tests.py | 189 | 18358 | from __future__ import unicode_literals
from django.apps import apps
from django.db import models
from django.test import SimpleTestCase, override_settings
from django.test.utils import isolate_lru_cache
from django.utils import six
class FieldDeconstructionTests(SimpleTestCase):
"""
Tests the deconstruct() method on all core fields.
"""
def test_name(self):
"""
Tests the outputting of the correct name if assigned one.
"""
# First try using a "normal" field
field = models.CharField(max_length=65)
name, path, args, kwargs = field.deconstruct()
self.assertIsNone(name)
field.set_attributes_from_name("is_awesome_test")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(name, "is_awesome_test")
self.assertIsInstance(name, six.text_type)
# Now try with a ForeignKey
field = models.ForeignKey("some_fake.ModelName", models.CASCADE)
name, path, args, kwargs = field.deconstruct()
self.assertIsNone(name)
field.set_attributes_from_name("author")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(name, "author")
def test_auto_field(self):
field = models.AutoField(primary_key=True)
field.set_attributes_from_name("id")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.AutoField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"primary_key": True})
def test_big_integer_field(self):
field = models.BigIntegerField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.BigIntegerField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
def test_boolean_field(self):
field = models.BooleanField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.BooleanField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
field = models.BooleanField(default=True)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.BooleanField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"default": True})
def test_char_field(self):
field = models.CharField(max_length=65)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.CharField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"max_length": 65})
field = models.CharField(max_length=65, null=True, blank=True)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.CharField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"max_length": 65, "null": True, "blank": True})
def test_char_field_choices(self):
field = models.CharField(max_length=1, choices=(("A", "One"), ("B", "Two")))
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.CharField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"choices": [("A", "One"), ("B", "Two")], "max_length": 1})
def test_csi_field(self):
field = models.CommaSeparatedIntegerField(max_length=100)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.CommaSeparatedIntegerField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"max_length": 100})
def test_date_field(self):
field = models.DateField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.DateField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
field = models.DateField(auto_now=True)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.DateField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"auto_now": True})
def test_datetime_field(self):
field = models.DateTimeField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.DateTimeField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
field = models.DateTimeField(auto_now_add=True)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.DateTimeField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"auto_now_add": True})
# Bug #21785
field = models.DateTimeField(auto_now=True, auto_now_add=True)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.DateTimeField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"auto_now_add": True, "auto_now": True})
def test_decimal_field(self):
field = models.DecimalField(max_digits=5, decimal_places=2)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.DecimalField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"max_digits": 5, "decimal_places": 2})
def test_decimal_field_0_decimal_places(self):
"""
A DecimalField with decimal_places=0 should work (#22272).
"""
field = models.DecimalField(max_digits=5, decimal_places=0)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.DecimalField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"max_digits": 5, "decimal_places": 0})
def test_email_field(self):
field = models.EmailField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.EmailField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"max_length": 254})
field = models.EmailField(max_length=255)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.EmailField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"max_length": 255})
def test_file_field(self):
field = models.FileField(upload_to="foo/bar")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.FileField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"upload_to": "foo/bar"})
# Test max_length
field = models.FileField(upload_to="foo/bar", max_length=200)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.FileField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"upload_to": "foo/bar", "max_length": 200})
def test_file_path_field(self):
field = models.FilePathField(match=".*\.txt$")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.FilePathField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"match": ".*\.txt$"})
field = models.FilePathField(recursive=True, allow_folders=True, max_length=123)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.FilePathField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"recursive": True, "allow_folders": True, "max_length": 123})
def test_float_field(self):
field = models.FloatField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.FloatField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
def test_foreign_key(self):
# Test basic pointing
from django.contrib.auth.models import Permission
field = models.ForeignKey("auth.Permission", models.CASCADE)
field.remote_field.model = Permission
field.remote_field.field_name = "id"
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ForeignKey")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "auth.Permission", "on_delete": models.CASCADE})
self.assertFalse(hasattr(kwargs['to'], "setting_name"))
# Test swap detection for swappable model
field = models.ForeignKey("auth.User", models.CASCADE)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ForeignKey")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "auth.User", "on_delete": models.CASCADE})
self.assertEqual(kwargs['to'].setting_name, "AUTH_USER_MODEL")
# Test nonexistent (for now) model
field = models.ForeignKey("something.Else", models.CASCADE)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ForeignKey")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "something.Else", "on_delete": models.CASCADE})
# Test on_delete
field = models.ForeignKey("auth.User", models.SET_NULL)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ForeignKey")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "auth.User", "on_delete": models.SET_NULL})
# Test to_field preservation
field = models.ForeignKey("auth.Permission", models.CASCADE, to_field="foobar")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ForeignKey")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "auth.Permission", "to_field": "foobar", "on_delete": models.CASCADE})
# Test related_name preservation
field = models.ForeignKey("auth.Permission", models.CASCADE, related_name="foobar")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ForeignKey")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "auth.Permission", "related_name": "foobar", "on_delete": models.CASCADE})
@override_settings(AUTH_USER_MODEL="auth.Permission")
def test_foreign_key_swapped(self):
with isolate_lru_cache(apps.get_swappable_settings_name):
# It doesn't matter that we swapped out user for permission;
# there's no validation. We just want to check the setting stuff works.
field = models.ForeignKey("auth.Permission", models.CASCADE)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ForeignKey")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "auth.Permission", "on_delete": models.CASCADE})
self.assertEqual(kwargs['to'].setting_name, "AUTH_USER_MODEL")
def test_image_field(self):
field = models.ImageField(upload_to="foo/barness", width_field="width", height_field="height")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ImageField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"upload_to": "foo/barness", "width_field": "width", "height_field": "height"})
def test_integer_field(self):
field = models.IntegerField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.IntegerField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
def test_ip_address_field(self):
field = models.IPAddressField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.IPAddressField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
def test_generic_ip_address_field(self):
field = models.GenericIPAddressField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.GenericIPAddressField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
field = models.GenericIPAddressField(protocol="IPv6")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.GenericIPAddressField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"protocol": "IPv6"})
def test_many_to_many_field(self):
# Test normal
field = models.ManyToManyField("auth.Permission")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ManyToManyField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "auth.Permission"})
self.assertFalse(hasattr(kwargs['to'], "setting_name"))
# Test swappable
field = models.ManyToManyField("auth.User")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ManyToManyField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "auth.User"})
self.assertEqual(kwargs['to'].setting_name, "AUTH_USER_MODEL")
# Test through
field = models.ManyToManyField("auth.Permission", through="auth.Group")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ManyToManyField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "auth.Permission", "through": "auth.Group"})
# Test custom db_table
field = models.ManyToManyField("auth.Permission", db_table="custom_table")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ManyToManyField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "auth.Permission", "db_table": "custom_table"})
# Test related_name
field = models.ManyToManyField("auth.Permission", related_name="custom_table")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ManyToManyField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "auth.Permission", "related_name": "custom_table"})
@override_settings(AUTH_USER_MODEL="auth.Permission")
def test_many_to_many_field_swapped(self):
with isolate_lru_cache(apps.get_swappable_settings_name):
# It doesn't matter that we swapped out user for permission;
# there's no validation. We just want to check the setting stuff works.
field = models.ManyToManyField("auth.Permission")
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.ManyToManyField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"to": "auth.Permission"})
self.assertEqual(kwargs['to'].setting_name, "AUTH_USER_MODEL")
def test_null_boolean_field(self):
field = models.NullBooleanField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.NullBooleanField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
def test_positive_integer_field(self):
field = models.PositiveIntegerField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.PositiveIntegerField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
def test_positive_small_integer_field(self):
field = models.PositiveSmallIntegerField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.PositiveSmallIntegerField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
def test_slug_field(self):
field = models.SlugField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.SlugField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
field = models.SlugField(db_index=False, max_length=231)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.SlugField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"db_index": False, "max_length": 231})
def test_small_integer_field(self):
field = models.SmallIntegerField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.SmallIntegerField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
def test_text_field(self):
field = models.TextField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.TextField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
def test_time_field(self):
field = models.TimeField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.TimeField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
field = models.TimeField(auto_now=True)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(args, [])
self.assertEqual(kwargs, {'auto_now': True})
field = models.TimeField(auto_now_add=True)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(args, [])
self.assertEqual(kwargs, {'auto_now_add': True})
def test_url_field(self):
field = models.URLField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.URLField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
field = models.URLField(max_length=231)
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.URLField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {"max_length": 231})
def test_binary_field(self):
field = models.BinaryField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(path, "django.db.models.BinaryField")
self.assertEqual(args, [])
self.assertEqual(kwargs, {})
| bsd-3-clause |
eniac/faas | factor/linalg.py | 1 | 5126 | import sys
import os
import re
import math
import utils
import logging
logger = logging.getLogger('Linalg')
cmd_logger = logging.getLogger('cmd')
# Exact configuration here will depends on instance/hardware type.
def run_rankfile(linalg_params):
logger.info("--- Generating rankfile ---")
machines = linalg_params['machines']
num_of_mpi = linalg_params['mpi_rows'] * linalg_params['mpi_cols']
num_of_mach = len(machines)
num_of_sock = linalg_params['phys_socks_per_machine']
num_of_cores_per_sock = linalg_params['phys_core_per_sock']
jobs_assigned_to_mach = 0
with open(linalg_params['rankfile'], 'wt', encoding='utf-8') as rfile:
for mach_no in range(0, num_of_mach):
if mach_no < num_of_mpi % num_of_mach:
num_of_jobs = num_of_mpi // num_of_mach + 1
else:
num_of_jobs = num_of_mpi // num_of_mach
cores_unassigned = num_of_cores_per_sock * num_of_sock
socket_counter = {}
for sock in range(0, num_of_sock):
socket_counter[sock] = 0
for job_id in range(0, num_of_jobs):
rank_no = jobs_assigned_to_mach + job_id
sock_no = job_id % num_of_sock
start_core = socket_counter[sock_no]
cores_to_use = int(math.ceil(cores_unassigned // (num_of_jobs - job_id)))
end_core = socket_counter[sock_no] + cores_to_use - 1
# Case for socket splitting
if end_core >= num_of_cores_per_sock:
core_needed = cores_to_use
slot_str = ""
while core_needed > 0:
sock = min(socket_counter, key=socket_counter.get)
core_use = (num_of_cores_per_sock - socket_counter[sock] if core_needed >= num_of_cores_per_sock - socket_counter[sock] else core_needed)
core_needed -= core_use
start_core = socket_counter[sock]
end_core = socket_counter[sock] + core_use - 1
slot_str += ("{sock}:{start}-{end},"
.format(sock=sock, start=socket_counter[sock], end=end_core))
socket_counter[sock] += core_use
slot_str = slot_str[0:-1]
rfile.write("rank {n}={mach} slot={slot}\n"
.format(n=rank_no, mach=machines[mach_no], slot=slot_str))
cores_unassigned -= cores_to_use
continue
rfile.write("rank {n}={mach} slot={sock}:{start}-{end}\n"
.format(n=rank_no, mach=machines[mach_no], sock=sock_no, start=start_core, end=end_core))
socket_counter[sock_no] += cores_to_use
cores_unassigned -= cores_to_use
jobs_assigned_to_mach += num_of_jobs
logger.info("--- End of generating rankfile ---")
def run_linalg(linalg_params):
logger.info("--- Beginning MSieve linear algebra ---")
linalg_cmd = "mpirun -np " + str(linalg_params['mpi_rows'] * linalg_params['mpi_cols'])
linalg_cmd += " -H " + ",".join(linalg_params['machines'])
linalg_cmd += " -rf " + linalg_params['rankfile']
linalg_cmd += " " + os.path.join(linalg_params['msievedir'], 'msieve')
linalg_cmd += " -nf " + linalg_params['fb_path']
linalg_cmd += (" -nc2 \"mpi_nrows={rows} mpi_ncols={cols} target_density={td}\""
.format(rows=linalg_params['mpi_rows'], cols=linalg_params['mpi_cols'], td=linalg_params['target_density']))
linalg_cmd += " -v -t " + str(linalg_params['threads'])
linalg_cmd += " -l " + linalg_params['log_path']
linalg_cmd += " -s " + linalg_params['dat_path']
linalg_cmd += " " + str(linalg_params['N'])
cmd_logger.info(linalg_cmd)
stdout, stderr, ret = utils.run_command(linalg_cmd, include_stdout=True, include_stderr=True, include_returncode=True, logger=logger)
if ret != 0:
logger.error("Received error code " + str(ret) + " from Msieve linear algebra. Exiting...")
sys.exit(1)
logger.info("--- End of MSieve linear algebra ---")
def run(parameters):
linalg_paths = ['tasks', 'msieve', 'linalg']
linalg_keys = {
"N": int,
"msievedir": str,
"mpi": str,
"hosts": str,
"target_density": int,
"phys_socks_per_machine": int,
"phys_core_per_sock": int,
"threads_per_core": int,
"threads": int,
"rankfile": str,
"fb_path": str,
"log_path": str,
"dat_path": str,
}
linalg_params = parameters.myparams(linalg_keys, linalg_paths)
linalg_params['machines'] = [ m.strip() for m in linalg_params['hosts'].split(',') if len(m) > 0 ]
linalg_params['mpi_rows'], linalg_params['mpi_cols'] = [ int(x) for x in linalg_params['mpi'].split("x") ]
# Create a rankfile based on current mpi configuration
run_rankfile(linalg_params)
# Run linear algebra
run_linalg(linalg_params)
| lgpl-3.0 |
lordmuffin/aws-cfn-plex | functions/credstash/setuptools/command/upload_docs.py | 110 | 7275 | # -*- coding: utf-8 -*-
"""upload_docs
Implements a Distutils 'upload_docs' subcommand (upload documentation to
PyPI's pythonhosted.org).
"""
from base64 import standard_b64encode
from distutils import log
from distutils.errors import DistutilsOptionError
import os
import socket
import zipfile
import tempfile
import shutil
import itertools
import functools
from setuptools.extern import six
from setuptools.extern.six.moves import http_client, urllib
from pkg_resources import iter_entry_points
from .upload import upload
def _encode(s):
errors = 'surrogateescape' if six.PY3 else 'strict'
return s.encode('utf-8', errors)
class upload_docs(upload):
# override the default repository as upload_docs isn't
# supported by Warehouse (and won't be).
DEFAULT_REPOSITORY = 'https://pypi.python.org/pypi/'
description = 'Upload documentation to PyPI'
user_options = [
('repository=', 'r',
"url of repository [default: %s]" % upload.DEFAULT_REPOSITORY),
('show-response', None,
'display full response text from server'),
('upload-dir=', None, 'directory to upload'),
]
boolean_options = upload.boolean_options
def has_sphinx(self):
if self.upload_dir is None:
for ep in iter_entry_points('distutils.commands', 'build_sphinx'):
return True
sub_commands = [('build_sphinx', has_sphinx)]
def initialize_options(self):
upload.initialize_options(self)
self.upload_dir = None
self.target_dir = None
def finalize_options(self):
log.warn("Upload_docs command is deprecated. Use RTD instead.")
upload.finalize_options(self)
if self.upload_dir is None:
if self.has_sphinx():
build_sphinx = self.get_finalized_command('build_sphinx')
self.target_dir = build_sphinx.builder_target_dir
else:
build = self.get_finalized_command('build')
self.target_dir = os.path.join(build.build_base, 'docs')
else:
self.ensure_dirname('upload_dir')
self.target_dir = self.upload_dir
self.announce('Using upload directory %s' % self.target_dir)
def create_zipfile(self, filename):
zip_file = zipfile.ZipFile(filename, "w")
try:
self.mkpath(self.target_dir) # just in case
for root, dirs, files in os.walk(self.target_dir):
if root == self.target_dir and not files:
raise DistutilsOptionError(
"no files found in upload directory '%s'"
% self.target_dir)
for name in files:
full = os.path.join(root, name)
relative = root[len(self.target_dir):].lstrip(os.path.sep)
dest = os.path.join(relative, name)
zip_file.write(full, dest)
finally:
zip_file.close()
def run(self):
# Run sub commands
for cmd_name in self.get_sub_commands():
self.run_command(cmd_name)
tmp_dir = tempfile.mkdtemp()
name = self.distribution.metadata.get_name()
zip_file = os.path.join(tmp_dir, "%s.zip" % name)
try:
self.create_zipfile(zip_file)
self.upload_file(zip_file)
finally:
shutil.rmtree(tmp_dir)
@staticmethod
def _build_part(item, sep_boundary):
key, values = item
title = '\nContent-Disposition: form-data; name="%s"' % key
# handle multiple entries for the same name
if not isinstance(values, list):
values = [values]
for value in values:
if isinstance(value, tuple):
title += '; filename="%s"' % value[0]
value = value[1]
else:
value = _encode(value)
yield sep_boundary
yield _encode(title)
yield b"\n\n"
yield value
if value and value[-1:] == b'\r':
yield b'\n' # write an extra newline (lurve Macs)
@classmethod
def _build_multipart(cls, data):
"""
Build up the MIME payload for the POST data
"""
boundary = b'--------------GHSKFJDLGDS7543FJKLFHRE75642756743254'
sep_boundary = b'\n--' + boundary
end_boundary = sep_boundary + b'--'
end_items = end_boundary, b"\n",
builder = functools.partial(
cls._build_part,
sep_boundary=sep_boundary,
)
part_groups = map(builder, data.items())
parts = itertools.chain.from_iterable(part_groups)
body_items = itertools.chain(parts, end_items)
content_type = 'multipart/form-data; boundary=%s' % boundary
return b''.join(body_items), content_type
def upload_file(self, filename):
with open(filename, 'rb') as f:
content = f.read()
meta = self.distribution.metadata
data = {
':action': 'doc_upload',
'name': meta.get_name(),
'content': (os.path.basename(filename), content),
}
# set up the authentication
credentials = _encode(self.username + ':' + self.password)
credentials = standard_b64encode(credentials)
if six.PY3:
credentials = credentials.decode('ascii')
auth = "Basic " + credentials
body, ct = self._build_multipart(data)
self.announce("Submitting documentation to %s" % (self.repository),
log.INFO)
# build the Request
# We can't use urllib2 since we need to send the Basic
# auth right with the first request
schema, netloc, url, params, query, fragments = \
urllib.parse.urlparse(self.repository)
assert not params and not query and not fragments
if schema == 'http':
conn = http_client.HTTPConnection(netloc)
elif schema == 'https':
conn = http_client.HTTPSConnection(netloc)
else:
raise AssertionError("unsupported schema " + schema)
data = ''
try:
conn.connect()
conn.putrequest("POST", url)
content_type = ct
conn.putheader('Content-type', content_type)
conn.putheader('Content-length', str(len(body)))
conn.putheader('Authorization', auth)
conn.endheaders()
conn.send(body)
except socket.error as e:
self.announce(str(e), log.ERROR)
return
r = conn.getresponse()
if r.status == 200:
self.announce('Server response (%s): %s' % (r.status, r.reason),
log.INFO)
elif r.status == 301:
location = r.getheader('Location')
if location is None:
location = 'https://pythonhosted.org/%s/' % meta.get_name()
self.announce('Upload successful. Visit %s' % location,
log.INFO)
else:
self.announce('Upload failed (%s): %s' % (r.status, r.reason),
log.ERROR)
if self.show_response:
print('-' * 75, r.read(), '-' * 75)
| mit |
ojengwa/talk | venv/lib/python2.7/site-packages/pip/_vendor/distlib/database.py | 203 | 49199 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2014 The Python Software Foundation.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
"""PEP 376 implementation."""
from __future__ import unicode_literals
import base64
import codecs
import contextlib
import hashlib
import logging
import os
import posixpath
import sys
import zipimport
from . import DistlibException, resources
from .compat import StringIO
from .version import get_scheme, UnsupportedVersionError
from .metadata import Metadata, METADATA_FILENAME
from .util import (parse_requirement, cached_property, parse_name_and_version,
read_exports, write_exports, CSVReader, CSVWriter)
__all__ = ['Distribution', 'BaseInstalledDistribution',
'InstalledDistribution', 'EggInfoDistribution',
'DistributionPath']
logger = logging.getLogger(__name__)
EXPORTS_FILENAME = 'pydist-exports.json'
COMMANDS_FILENAME = 'pydist-commands.json'
DIST_FILES = ('INSTALLER', METADATA_FILENAME, 'RECORD', 'REQUESTED',
'RESOURCES', EXPORTS_FILENAME, 'SHARED')
DISTINFO_EXT = '.dist-info'
class _Cache(object):
"""
A simple cache mapping names and .dist-info paths to distributions
"""
def __init__(self):
"""
Initialise an instance. There is normally one for each DistributionPath.
"""
self.name = {}
self.path = {}
self.generated = False
def clear(self):
"""
Clear the cache, setting it to its initial state.
"""
self.name.clear()
self.path.clear()
self.generated = False
def add(self, dist):
"""
Add a distribution to the cache.
:param dist: The distribution to add.
"""
if dist.path not in self.path:
self.path[dist.path] = dist
self.name.setdefault(dist.key, []).append(dist)
class DistributionPath(object):
"""
Represents a set of distributions installed on a path (typically sys.path).
"""
def __init__(self, path=None, include_egg=False):
"""
Create an instance from a path, optionally including legacy (distutils/
setuptools/distribute) distributions.
:param path: The path to use, as a list of directories. If not specified,
sys.path is used.
:param include_egg: If True, this instance will look for and return legacy
distributions as well as those based on PEP 376.
"""
if path is None:
path = sys.path
self.path = path
self._include_dist = True
self._include_egg = include_egg
self._cache = _Cache()
self._cache_egg = _Cache()
self._cache_enabled = True
self._scheme = get_scheme('default')
def _get_cache_enabled(self):
return self._cache_enabled
def _set_cache_enabled(self, value):
self._cache_enabled = value
cache_enabled = property(_get_cache_enabled, _set_cache_enabled)
def clear_cache(self):
"""
Clears the internal cache.
"""
self._cache.clear()
self._cache_egg.clear()
def _yield_distributions(self):
"""
Yield .dist-info and/or .egg(-info) distributions.
"""
# We need to check if we've seen some resources already, because on
# some Linux systems (e.g. some Debian/Ubuntu variants) there are
# symlinks which alias other files in the environment.
seen = set()
for path in self.path:
finder = resources.finder_for_path(path)
if finder is None:
continue
r = finder.find('')
if not r or not r.is_container:
continue
rset = sorted(r.resources)
for entry in rset:
r = finder.find(entry)
if not r or r.path in seen:
continue
if self._include_dist and entry.endswith(DISTINFO_EXT):
metadata_path = posixpath.join(entry, METADATA_FILENAME)
pydist = finder.find(metadata_path)
if not pydist:
continue
metadata = Metadata(fileobj=pydist.as_stream(),
scheme='legacy')
logger.debug('Found %s', r.path)
seen.add(r.path)
yield new_dist_class(r.path, metadata=metadata,
env=self)
elif self._include_egg and entry.endswith(('.egg-info',
'.egg')):
logger.debug('Found %s', r.path)
seen.add(r.path)
yield old_dist_class(r.path, self)
def _generate_cache(self):
"""
Scan the path for distributions and populate the cache with
those that are found.
"""
gen_dist = not self._cache.generated
gen_egg = self._include_egg and not self._cache_egg.generated
if gen_dist or gen_egg:
for dist in self._yield_distributions():
if isinstance(dist, InstalledDistribution):
self._cache.add(dist)
else:
self._cache_egg.add(dist)
if gen_dist:
self._cache.generated = True
if gen_egg:
self._cache_egg.generated = True
@classmethod
def distinfo_dirname(cls, name, version):
"""
The *name* and *version* parameters are converted into their
filename-escaped form, i.e. any ``'-'`` characters are replaced
with ``'_'`` other than the one in ``'dist-info'`` and the one
separating the name from the version number.
:parameter name: is converted to a standard distribution name by replacing
any runs of non- alphanumeric characters with a single
``'-'``.
:type name: string
:parameter version: is converted to a standard version string. Spaces
become dots, and all other non-alphanumeric characters
(except dots) become dashes, with runs of multiple
dashes condensed to a single dash.
:type version: string
:returns: directory name
:rtype: string"""
name = name.replace('-', '_')
return '-'.join([name, version]) + DISTINFO_EXT
def get_distributions(self):
"""
Provides an iterator that looks for distributions and returns
:class:`InstalledDistribution` or
:class:`EggInfoDistribution` instances for each one of them.
:rtype: iterator of :class:`InstalledDistribution` and
:class:`EggInfoDistribution` instances
"""
if not self._cache_enabled:
for dist in self._yield_distributions():
yield dist
else:
self._generate_cache()
for dist in self._cache.path.values():
yield dist
if self._include_egg:
for dist in self._cache_egg.path.values():
yield dist
def get_distribution(self, name):
"""
Looks for a named distribution on the path.
This function only returns the first result found, as no more than one
value is expected. If nothing is found, ``None`` is returned.
:rtype: :class:`InstalledDistribution`, :class:`EggInfoDistribution`
or ``None``
"""
result = None
name = name.lower()
if not self._cache_enabled:
for dist in self._yield_distributions():
if dist.key == name:
result = dist
break
else:
self._generate_cache()
if name in self._cache.name:
result = self._cache.name[name][0]
elif self._include_egg and name in self._cache_egg.name:
result = self._cache_egg.name[name][0]
return result
def provides_distribution(self, name, version=None):
"""
Iterates over all distributions to find which distributions provide *name*.
If a *version* is provided, it will be used to filter the results.
This function only returns the first result found, since no more than
one values are expected. If the directory is not found, returns ``None``.
:parameter version: a version specifier that indicates the version
required, conforming to the format in ``PEP-345``
:type name: string
:type version: string
"""
matcher = None
if not version is None:
try:
matcher = self._scheme.matcher('%s (%s)' % (name, version))
except ValueError:
raise DistlibException('invalid name or version: %r, %r' %
(name, version))
for dist in self.get_distributions():
provided = dist.provides
for p in provided:
p_name, p_ver = parse_name_and_version(p)
if matcher is None:
if p_name == name:
yield dist
break
else:
if p_name == name and matcher.match(p_ver):
yield dist
break
def get_file_path(self, name, relative_path):
"""
Return the path to a resource file.
"""
dist = self.get_distribution(name)
if dist is None:
raise LookupError('no distribution named %r found' % name)
return dist.get_resource_path(relative_path)
def get_exported_entries(self, category, name=None):
"""
Return all of the exported entries in a particular category.
:param category: The category to search for entries.
:param name: If specified, only entries with that name are returned.
"""
for dist in self.get_distributions():
r = dist.exports
if category in r:
d = r[category]
if name is not None:
if name in d:
yield d[name]
else:
for v in d.values():
yield v
class Distribution(object):
"""
A base class for distributions, whether installed or from indexes.
Either way, it must have some metadata, so that's all that's needed
for construction.
"""
build_time_dependency = False
"""
Set to True if it's known to be only a build-time dependency (i.e.
not needed after installation).
"""
requested = False
"""A boolean that indicates whether the ``REQUESTED`` metadata file is
present (in other words, whether the package was installed by user
request or it was installed as a dependency)."""
def __init__(self, metadata):
"""
Initialise an instance.
:param metadata: The instance of :class:`Metadata` describing this
distribution.
"""
self.metadata = metadata
self.name = metadata.name
self.key = self.name.lower() # for case-insensitive comparisons
self.version = metadata.version
self.locator = None
self.digest = None
self.extras = None # additional features requested
self.context = None # environment marker overrides
self.download_urls = set()
self.digests = {}
@property
def source_url(self):
"""
The source archive download URL for this distribution.
"""
return self.metadata.source_url
download_url = source_url # Backward compatibility
@property
def name_and_version(self):
"""
A utility property which displays the name and version in parentheses.
"""
return '%s (%s)' % (self.name, self.version)
@property
def provides(self):
"""
A set of distribution names and versions provided by this distribution.
:return: A set of "name (version)" strings.
"""
plist = self.metadata.provides
s = '%s (%s)' % (self.name, self.version)
if s not in plist:
plist.append(s)
return plist
def _get_requirements(self, req_attr):
reqts = getattr(self.metadata, req_attr)
return set(self.metadata.get_requirements(reqts, extras=self.extras,
env=self.context))
@property
def run_requires(self):
return self._get_requirements('run_requires')
@property
def meta_requires(self):
return self._get_requirements('meta_requires')
@property
def build_requires(self):
return self._get_requirements('build_requires')
@property
def test_requires(self):
return self._get_requirements('test_requires')
@property
def dev_requires(self):
return self._get_requirements('dev_requires')
def matches_requirement(self, req):
"""
Say if this instance matches (fulfills) a requirement.
:param req: The requirement to match.
:rtype req: str
:return: True if it matches, else False.
"""
# Requirement may contain extras - parse to lose those
# from what's passed to the matcher
r = parse_requirement(req)
scheme = get_scheme(self.metadata.scheme)
try:
matcher = scheme.matcher(r.requirement)
except UnsupportedVersionError:
# XXX compat-mode if cannot read the version
logger.warning('could not read version %r - using name only',
req)
name = req.split()[0]
matcher = scheme.matcher(name)
name = matcher.key # case-insensitive
result = False
for p in self.provides:
p_name, p_ver = parse_name_and_version(p)
if p_name != name:
continue
try:
result = matcher.match(p_ver)
break
except UnsupportedVersionError:
pass
return result
def __repr__(self):
"""
Return a textual representation of this instance,
"""
if self.source_url:
suffix = ' [%s]' % self.source_url
else:
suffix = ''
return '<Distribution %s (%s)%s>' % (self.name, self.version, suffix)
def __eq__(self, other):
"""
See if this distribution is the same as another.
:param other: The distribution to compare with. To be equal to one
another. distributions must have the same type, name,
version and source_url.
:return: True if it is the same, else False.
"""
if type(other) is not type(self):
result = False
else:
result = (self.name == other.name and
self.version == other.version and
self.source_url == other.source_url)
return result
def __hash__(self):
"""
Compute hash in a way which matches the equality test.
"""
return hash(self.name) + hash(self.version) + hash(self.source_url)
class BaseInstalledDistribution(Distribution):
"""
This is the base class for installed distributions (whether PEP 376 or
legacy).
"""
hasher = None
def __init__(self, metadata, path, env=None):
"""
Initialise an instance.
:param metadata: An instance of :class:`Metadata` which describes the
distribution. This will normally have been initialised
from a metadata file in the ``path``.
:param path: The path of the ``.dist-info`` or ``.egg-info``
directory for the distribution.
:param env: This is normally the :class:`DistributionPath`
instance where this distribution was found.
"""
super(BaseInstalledDistribution, self).__init__(metadata)
self.path = path
self.dist_path = env
def get_hash(self, data, hasher=None):
"""
Get the hash of some data, using a particular hash algorithm, if
specified.
:param data: The data to be hashed.
:type data: bytes
:param hasher: The name of a hash implementation, supported by hashlib,
or ``None``. Examples of valid values are ``'sha1'``,
``'sha224'``, ``'sha384'``, '``sha256'``, ``'md5'`` and
``'sha512'``. If no hasher is specified, the ``hasher``
attribute of the :class:`InstalledDistribution` instance
is used. If the hasher is determined to be ``None``, MD5
is used as the hashing algorithm.
:returns: The hash of the data. If a hasher was explicitly specified,
the returned hash will be prefixed with the specified hasher
followed by '='.
:rtype: str
"""
if hasher is None:
hasher = self.hasher
if hasher is None:
hasher = hashlib.md5
prefix = ''
else:
hasher = getattr(hashlib, hasher)
prefix = '%s=' % self.hasher
digest = hasher(data).digest()
digest = base64.urlsafe_b64encode(digest).rstrip(b'=').decode('ascii')
return '%s%s' % (prefix, digest)
class InstalledDistribution(BaseInstalledDistribution):
"""
Created with the *path* of the ``.dist-info`` directory provided to the
constructor. It reads the metadata contained in ``pydist.json`` when it is
instantiated., or uses a passed in Metadata instance (useful for when
dry-run mode is being used).
"""
hasher = 'sha256'
def __init__(self, path, metadata=None, env=None):
self.finder = finder = resources.finder_for_path(path)
if finder is None:
import pdb; pdb.set_trace ()
if env and env._cache_enabled and path in env._cache.path:
metadata = env._cache.path[path].metadata
elif metadata is None:
r = finder.find(METADATA_FILENAME)
# Temporary - for legacy support
if r is None:
r = finder.find('METADATA')
if r is None:
raise ValueError('no %s found in %s' % (METADATA_FILENAME,
path))
with contextlib.closing(r.as_stream()) as stream:
metadata = Metadata(fileobj=stream, scheme='legacy')
super(InstalledDistribution, self).__init__(metadata, path, env)
if env and env._cache_enabled:
env._cache.add(self)
try:
r = finder.find('REQUESTED')
except AttributeError:
import pdb; pdb.set_trace ()
self.requested = r is not None
def __repr__(self):
return '<InstalledDistribution %r %s at %r>' % (
self.name, self.version, self.path)
def __str__(self):
return "%s %s" % (self.name, self.version)
def _get_records(self):
"""
Get the list of installed files for the distribution
:return: A list of tuples of path, hash and size. Note that hash and
size might be ``None`` for some entries. The path is exactly
as stored in the file (which is as in PEP 376).
"""
results = []
r = self.get_distinfo_resource('RECORD')
with contextlib.closing(r.as_stream()) as stream:
with CSVReader(stream=stream) as record_reader:
# Base location is parent dir of .dist-info dir
#base_location = os.path.dirname(self.path)
#base_location = os.path.abspath(base_location)
for row in record_reader:
missing = [None for i in range(len(row), 3)]
path, checksum, size = row + missing
#if not os.path.isabs(path):
# path = path.replace('/', os.sep)
# path = os.path.join(base_location, path)
results.append((path, checksum, size))
return results
@cached_property
def exports(self):
"""
Return the information exported by this distribution.
:return: A dictionary of exports, mapping an export category to a dict
of :class:`ExportEntry` instances describing the individual
export entries, and keyed by name.
"""
result = {}
r = self.get_distinfo_resource(EXPORTS_FILENAME)
if r:
result = self.read_exports()
return result
def read_exports(self):
"""
Read exports data from a file in .ini format.
:return: A dictionary of exports, mapping an export category to a list
of :class:`ExportEntry` instances describing the individual
export entries.
"""
result = {}
r = self.get_distinfo_resource(EXPORTS_FILENAME)
if r:
with contextlib.closing(r.as_stream()) as stream:
result = read_exports(stream)
return result
def write_exports(self, exports):
"""
Write a dictionary of exports to a file in .ini format.
:param exports: A dictionary of exports, mapping an export category to
a list of :class:`ExportEntry` instances describing the
individual export entries.
"""
rf = self.get_distinfo_file(EXPORTS_FILENAME)
with open(rf, 'w') as f:
write_exports(exports, f)
def get_resource_path(self, relative_path):
"""
NOTE: This API may change in the future.
Return the absolute path to a resource file with the given relative
path.
:param relative_path: The path, relative to .dist-info, of the resource
of interest.
:return: The absolute path where the resource is to be found.
"""
r = self.get_distinfo_resource('RESOURCES')
with contextlib.closing(r.as_stream()) as stream:
with CSVReader(stream=stream) as resources_reader:
for relative, destination in resources_reader:
if relative == relative_path:
return destination
raise KeyError('no resource file with relative path %r '
'is installed' % relative_path)
def list_installed_files(self):
"""
Iterates over the ``RECORD`` entries and returns a tuple
``(path, hash, size)`` for each line.
:returns: iterator of (path, hash, size)
"""
for result in self._get_records():
yield result
def write_installed_files(self, paths, prefix, dry_run=False):
"""
Writes the ``RECORD`` file, using the ``paths`` iterable passed in. Any
existing ``RECORD`` file is silently overwritten.
prefix is used to determine when to write absolute paths.
"""
prefix = os.path.join(prefix, '')
base = os.path.dirname(self.path)
base_under_prefix = base.startswith(prefix)
base = os.path.join(base, '')
record_path = self.get_distinfo_file('RECORD')
logger.info('creating %s', record_path)
if dry_run:
return None
with CSVWriter(record_path) as writer:
for path in paths:
if os.path.isdir(path) or path.endswith(('.pyc', '.pyo')):
# do not put size and hash, as in PEP-376
hash_value = size = ''
else:
size = '%d' % os.path.getsize(path)
with open(path, 'rb') as fp:
hash_value = self.get_hash(fp.read())
if path.startswith(base) or (base_under_prefix and
path.startswith(prefix)):
path = os.path.relpath(path, base)
writer.writerow((path, hash_value, size))
# add the RECORD file itself
if record_path.startswith(base):
record_path = os.path.relpath(record_path, base)
writer.writerow((record_path, '', ''))
return record_path
def check_installed_files(self):
"""
Checks that the hashes and sizes of the files in ``RECORD`` are
matched by the files themselves. Returns a (possibly empty) list of
mismatches. Each entry in the mismatch list will be a tuple consisting
of the path, 'exists', 'size' or 'hash' according to what didn't match
(existence is checked first, then size, then hash), the expected
value and the actual value.
"""
mismatches = []
base = os.path.dirname(self.path)
record_path = self.get_distinfo_file('RECORD')
for path, hash_value, size in self.list_installed_files():
if not os.path.isabs(path):
path = os.path.join(base, path)
if path == record_path:
continue
if not os.path.exists(path):
mismatches.append((path, 'exists', True, False))
elif os.path.isfile(path):
actual_size = str(os.path.getsize(path))
if size and actual_size != size:
mismatches.append((path, 'size', size, actual_size))
elif hash_value:
if '=' in hash_value:
hasher = hash_value.split('=', 1)[0]
else:
hasher = None
with open(path, 'rb') as f:
actual_hash = self.get_hash(f.read(), hasher)
if actual_hash != hash_value:
mismatches.append((path, 'hash', hash_value, actual_hash))
return mismatches
@cached_property
def shared_locations(self):
"""
A dictionary of shared locations whose keys are in the set 'prefix',
'purelib', 'platlib', 'scripts', 'headers', 'data' and 'namespace'.
The corresponding value is the absolute path of that category for
this distribution, and takes into account any paths selected by the
user at installation time (e.g. via command-line arguments). In the
case of the 'namespace' key, this would be a list of absolute paths
for the roots of namespace packages in this distribution.
The first time this property is accessed, the relevant information is
read from the SHARED file in the .dist-info directory.
"""
result = {}
shared_path = os.path.join(self.path, 'SHARED')
if os.path.isfile(shared_path):
with codecs.open(shared_path, 'r', encoding='utf-8') as f:
lines = f.read().splitlines()
for line in lines:
key, value = line.split('=', 1)
if key == 'namespace':
result.setdefault(key, []).append(value)
else:
result[key] = value
return result
def write_shared_locations(self, paths, dry_run=False):
"""
Write shared location information to the SHARED file in .dist-info.
:param paths: A dictionary as described in the documentation for
:meth:`shared_locations`.
:param dry_run: If True, the action is logged but no file is actually
written.
:return: The path of the file written to.
"""
shared_path = os.path.join(self.path, 'SHARED')
logger.info('creating %s', shared_path)
if dry_run:
return None
lines = []
for key in ('prefix', 'lib', 'headers', 'scripts', 'data'):
path = paths[key]
if os.path.isdir(paths[key]):
lines.append('%s=%s' % (key, path))
for ns in paths.get('namespace', ()):
lines.append('namespace=%s' % ns)
with codecs.open(shared_path, 'w', encoding='utf-8') as f:
f.write('\n'.join(lines))
return shared_path
def get_distinfo_resource(self, path):
if path not in DIST_FILES:
raise DistlibException('invalid path for a dist-info file: '
'%r at %r' % (path, self.path))
finder = resources.finder_for_path(self.path)
if finder is None:
raise DistlibException('Unable to get a finder for %s' % self.path)
return finder.find(path)
def get_distinfo_file(self, path):
"""
Returns a path located under the ``.dist-info`` directory. Returns a
string representing the path.
:parameter path: a ``'/'``-separated path relative to the
``.dist-info`` directory or an absolute path;
If *path* is an absolute path and doesn't start
with the ``.dist-info`` directory path,
a :class:`DistlibException` is raised
:type path: str
:rtype: str
"""
# Check if it is an absolute path # XXX use relpath, add tests
if path.find(os.sep) >= 0:
# it's an absolute path?
distinfo_dirname, path = path.split(os.sep)[-2:]
if distinfo_dirname != self.path.split(os.sep)[-1]:
raise DistlibException(
'dist-info file %r does not belong to the %r %s '
'distribution' % (path, self.name, self.version))
# The file must be relative
if path not in DIST_FILES:
raise DistlibException('invalid path for a dist-info file: '
'%r at %r' % (path, self.path))
return os.path.join(self.path, path)
def list_distinfo_files(self):
"""
Iterates over the ``RECORD`` entries and returns paths for each line if
the path is pointing to a file located in the ``.dist-info`` directory
or one of its subdirectories.
:returns: iterator of paths
"""
base = os.path.dirname(self.path)
for path, checksum, size in self._get_records():
# XXX add separator or use real relpath algo
if not os.path.isabs(path):
path = os.path.join(base, path)
if path.startswith(self.path):
yield path
def __eq__(self, other):
return (isinstance(other, InstalledDistribution) and
self.path == other.path)
# See http://docs.python.org/reference/datamodel#object.__hash__
__hash__ = object.__hash__
class EggInfoDistribution(BaseInstalledDistribution):
"""Created with the *path* of the ``.egg-info`` directory or file provided
to the constructor. It reads the metadata contained in the file itself, or
if the given path happens to be a directory, the metadata is read from the
file ``PKG-INFO`` under that directory."""
requested = True # as we have no way of knowing, assume it was
shared_locations = {}
def __init__(self, path, env=None):
def set_name_and_version(s, n, v):
s.name = n
s.key = n.lower() # for case-insensitive comparisons
s.version = v
self.path = path
self.dist_path = env
if env and env._cache_enabled and path in env._cache_egg.path:
metadata = env._cache_egg.path[path].metadata
set_name_and_version(self, metadata.name, metadata.version)
else:
metadata = self._get_metadata(path)
# Need to be set before caching
set_name_and_version(self, metadata.name, metadata.version)
if env and env._cache_enabled:
env._cache_egg.add(self)
super(EggInfoDistribution, self).__init__(metadata, path, env)
def _get_metadata(self, path):
requires = None
def parse_requires_data(data):
"""Create a list of dependencies from a requires.txt file.
*data*: the contents of a setuptools-produced requires.txt file.
"""
reqs = []
lines = data.splitlines()
for line in lines:
line = line.strip()
if line.startswith('['):
logger.warning('Unexpected line: quitting requirement scan: %r',
line)
break
r = parse_requirement(line)
if not r:
logger.warning('Not recognised as a requirement: %r', line)
continue
if r.extras:
logger.warning('extra requirements in requires.txt are '
'not supported')
if not r.constraints:
reqs.append(r.name)
else:
cons = ', '.join('%s%s' % c for c in r.constraints)
reqs.append('%s (%s)' % (r.name, cons))
return reqs
def parse_requires_path(req_path):
"""Create a list of dependencies from a requires.txt file.
*req_path*: the path to a setuptools-produced requires.txt file.
"""
reqs = []
try:
with codecs.open(req_path, 'r', 'utf-8') as fp:
reqs = parse_requires_data(fp.read())
except IOError:
pass
return reqs
if path.endswith('.egg'):
if os.path.isdir(path):
meta_path = os.path.join(path, 'EGG-INFO', 'PKG-INFO')
metadata = Metadata(path=meta_path, scheme='legacy')
req_path = os.path.join(path, 'EGG-INFO', 'requires.txt')
requires = parse_requires_path(req_path)
else:
# FIXME handle the case where zipfile is not available
zipf = zipimport.zipimporter(path)
fileobj = StringIO(
zipf.get_data('EGG-INFO/PKG-INFO').decode('utf8'))
metadata = Metadata(fileobj=fileobj, scheme='legacy')
try:
data = zipf.get_data('EGG-INFO/requires.txt')
requires = parse_requires_data(data.decode('utf-8'))
except IOError:
requires = None
elif path.endswith('.egg-info'):
if os.path.isdir(path):
req_path = os.path.join(path, 'requires.txt')
requires = parse_requires_path(req_path)
path = os.path.join(path, 'PKG-INFO')
metadata = Metadata(path=path, scheme='legacy')
else:
raise DistlibException('path must end with .egg-info or .egg, '
'got %r' % path)
if requires:
metadata.add_requirements(requires)
return metadata
def __repr__(self):
return '<EggInfoDistribution %r %s at %r>' % (
self.name, self.version, self.path)
def __str__(self):
return "%s %s" % (self.name, self.version)
def check_installed_files(self):
"""
Checks that the hashes and sizes of the files in ``RECORD`` are
matched by the files themselves. Returns a (possibly empty) list of
mismatches. Each entry in the mismatch list will be a tuple consisting
of the path, 'exists', 'size' or 'hash' according to what didn't match
(existence is checked first, then size, then hash), the expected
value and the actual value.
"""
mismatches = []
record_path = os.path.join(self.path, 'installed-files.txt')
if os.path.exists(record_path):
for path, _, _ in self.list_installed_files():
if path == record_path:
continue
if not os.path.exists(path):
mismatches.append((path, 'exists', True, False))
return mismatches
def list_installed_files(self):
"""
Iterates over the ``installed-files.txt`` entries and returns a tuple
``(path, hash, size)`` for each line.
:returns: a list of (path, hash, size)
"""
def _md5(path):
f = open(path, 'rb')
try:
content = f.read()
finally:
f.close()
return hashlib.md5(content).hexdigest()
def _size(path):
return os.stat(path).st_size
record_path = os.path.join(self.path, 'installed-files.txt')
result = []
if os.path.exists(record_path):
with codecs.open(record_path, 'r', encoding='utf-8') as f:
for line in f:
line = line.strip()
p = os.path.normpath(os.path.join(self.path, line))
# "./" is present as a marker between installed files
# and installation metadata files
if not os.path.exists(p):
logger.warning('Non-existent file: %s', p)
if p.endswith(('.pyc', '.pyo')):
continue
#otherwise fall through and fail
if not os.path.isdir(p):
result.append((p, _md5(p), _size(p)))
result.append((record_path, None, None))
return result
def list_distinfo_files(self, absolute=False):
"""
Iterates over the ``installed-files.txt`` entries and returns paths for
each line if the path is pointing to a file located in the
``.egg-info`` directory or one of its subdirectories.
:parameter absolute: If *absolute* is ``True``, each returned path is
transformed into a local absolute path. Otherwise the
raw value from ``installed-files.txt`` is returned.
:type absolute: boolean
:returns: iterator of paths
"""
record_path = os.path.join(self.path, 'installed-files.txt')
skip = True
with codecs.open(record_path, 'r', encoding='utf-8') as f:
for line in f:
line = line.strip()
if line == './':
skip = False
continue
if not skip:
p = os.path.normpath(os.path.join(self.path, line))
if p.startswith(self.path):
if absolute:
yield p
else:
yield line
def __eq__(self, other):
return (isinstance(other, EggInfoDistribution) and
self.path == other.path)
# See http://docs.python.org/reference/datamodel#object.__hash__
__hash__ = object.__hash__
new_dist_class = InstalledDistribution
old_dist_class = EggInfoDistribution
class DependencyGraph(object):
"""
Represents a dependency graph between distributions.
The dependency relationships are stored in an ``adjacency_list`` that maps
distributions to a list of ``(other, label)`` tuples where ``other``
is a distribution and the edge is labeled with ``label`` (i.e. the version
specifier, if such was provided). Also, for more efficient traversal, for
every distribution ``x``, a list of predecessors is kept in
``reverse_list[x]``. An edge from distribution ``a`` to
distribution ``b`` means that ``a`` depends on ``b``. If any missing
dependencies are found, they are stored in ``missing``, which is a
dictionary that maps distributions to a list of requirements that were not
provided by any other distributions.
"""
def __init__(self):
self.adjacency_list = {}
self.reverse_list = {}
self.missing = {}
def add_distribution(self, distribution):
"""Add the *distribution* to the graph.
:type distribution: :class:`distutils2.database.InstalledDistribution`
or :class:`distutils2.database.EggInfoDistribution`
"""
self.adjacency_list[distribution] = []
self.reverse_list[distribution] = []
#self.missing[distribution] = []
def add_edge(self, x, y, label=None):
"""Add an edge from distribution *x* to distribution *y* with the given
*label*.
:type x: :class:`distutils2.database.InstalledDistribution` or
:class:`distutils2.database.EggInfoDistribution`
:type y: :class:`distutils2.database.InstalledDistribution` or
:class:`distutils2.database.EggInfoDistribution`
:type label: ``str`` or ``None``
"""
self.adjacency_list[x].append((y, label))
# multiple edges are allowed, so be careful
if x not in self.reverse_list[y]:
self.reverse_list[y].append(x)
def add_missing(self, distribution, requirement):
"""
Add a missing *requirement* for the given *distribution*.
:type distribution: :class:`distutils2.database.InstalledDistribution`
or :class:`distutils2.database.EggInfoDistribution`
:type requirement: ``str``
"""
logger.debug('%s missing %r', distribution, requirement)
self.missing.setdefault(distribution, []).append(requirement)
def _repr_dist(self, dist):
return '%s %s' % (dist.name, dist.version)
def repr_node(self, dist, level=1):
"""Prints only a subgraph"""
output = [self._repr_dist(dist)]
for other, label in self.adjacency_list[dist]:
dist = self._repr_dist(other)
if label is not None:
dist = '%s [%s]' % (dist, label)
output.append(' ' * level + str(dist))
suboutput = self.repr_node(other, level + 1)
subs = suboutput.split('\n')
output.extend(subs[1:])
return '\n'.join(output)
def to_dot(self, f, skip_disconnected=True):
"""Writes a DOT output for the graph to the provided file *f*.
If *skip_disconnected* is set to ``True``, then all distributions
that are not dependent on any other distribution are skipped.
:type f: has to support ``file``-like operations
:type skip_disconnected: ``bool``
"""
disconnected = []
f.write("digraph dependencies {\n")
for dist, adjs in self.adjacency_list.items():
if len(adjs) == 0 and not skip_disconnected:
disconnected.append(dist)
for other, label in adjs:
if not label is None:
f.write('"%s" -> "%s" [label="%s"]\n' %
(dist.name, other.name, label))
else:
f.write('"%s" -> "%s"\n' % (dist.name, other.name))
if not skip_disconnected and len(disconnected) > 0:
f.write('subgraph disconnected {\n')
f.write('label = "Disconnected"\n')
f.write('bgcolor = red\n')
for dist in disconnected:
f.write('"%s"' % dist.name)
f.write('\n')
f.write('}\n')
f.write('}\n')
def topological_sort(self):
"""
Perform a topological sort of the graph.
:return: A tuple, the first element of which is a topologically sorted
list of distributions, and the second element of which is a
list of distributions that cannot be sorted because they have
circular dependencies and so form a cycle.
"""
result = []
# Make a shallow copy of the adjacency list
alist = {}
for k, v in self.adjacency_list.items():
alist[k] = v[:]
while True:
# See what we can remove in this run
to_remove = []
for k, v in list(alist.items())[:]:
if not v:
to_remove.append(k)
del alist[k]
if not to_remove:
# What's left in alist (if anything) is a cycle.
break
# Remove from the adjacency list of others
for k, v in alist.items():
alist[k] = [(d, r) for d, r in v if d not in to_remove]
logger.debug('Moving to result: %s',
['%s (%s)' % (d.name, d.version) for d in to_remove])
result.extend(to_remove)
return result, list(alist.keys())
def __repr__(self):
"""Representation of the graph"""
output = []
for dist, adjs in self.adjacency_list.items():
output.append(self.repr_node(dist))
return '\n'.join(output)
def make_graph(dists, scheme='default'):
"""Makes a dependency graph from the given distributions.
:parameter dists: a list of distributions
:type dists: list of :class:`distutils2.database.InstalledDistribution` and
:class:`distutils2.database.EggInfoDistribution` instances
:rtype: a :class:`DependencyGraph` instance
"""
scheme = get_scheme(scheme)
graph = DependencyGraph()
provided = {} # maps names to lists of (version, dist) tuples
# first, build the graph and find out what's provided
for dist in dists:
graph.add_distribution(dist)
for p in dist.provides:
name, version = parse_name_and_version(p)
logger.debug('Add to provided: %s, %s, %s', name, version, dist)
provided.setdefault(name, []).append((version, dist))
# now make the edges
for dist in dists:
requires = (dist.run_requires | dist.meta_requires |
dist.build_requires | dist.dev_requires)
for req in requires:
try:
matcher = scheme.matcher(req)
except UnsupportedVersionError:
# XXX compat-mode if cannot read the version
logger.warning('could not read version %r - using name only',
req)
name = req.split()[0]
matcher = scheme.matcher(name)
name = matcher.key # case-insensitive
matched = False
if name in provided:
for version, provider in provided[name]:
try:
match = matcher.match(version)
except UnsupportedVersionError:
match = False
if match:
graph.add_edge(dist, provider, req)
matched = True
break
if not matched:
graph.add_missing(dist, req)
return graph
def get_dependent_dists(dists, dist):
"""Recursively generate a list of distributions from *dists* that are
dependent on *dist*.
:param dists: a list of distributions
:param dist: a distribution, member of *dists* for which we are interested
"""
if dist not in dists:
raise DistlibException('given distribution %r is not a member '
'of the list' % dist.name)
graph = make_graph(dists)
dep = [dist] # dependent distributions
todo = graph.reverse_list[dist] # list of nodes we should inspect
while todo:
d = todo.pop()
dep.append(d)
for succ in graph.reverse_list[d]:
if succ not in dep:
todo.append(succ)
dep.pop(0) # remove dist from dep, was there to prevent infinite loops
return dep
def get_required_dists(dists, dist):
"""Recursively generate a list of distributions from *dists* that are
required by *dist*.
:param dists: a list of distributions
:param dist: a distribution, member of *dists* for which we are interested
"""
if dist not in dists:
raise DistlibException('given distribution %r is not a member '
'of the list' % dist.name)
graph = make_graph(dists)
req = [] # required distributions
todo = graph.adjacency_list[dist] # list of nodes we should inspect
while todo:
d = todo.pop()[0]
req.append(d)
for pred in graph.adjacency_list[d]:
if pred not in req:
todo.append(pred)
return req
def make_dist(name, version, **kwargs):
"""
A convenience method for making a dist given just a name and version.
"""
summary = kwargs.pop('summary', 'Placeholder for summary')
md = Metadata(**kwargs)
md.name = name
md.version = version
md.summary = summary or 'Plaeholder for summary'
return Distribution(md)
| mit |
roadmapper/ansible | lib/ansible/modules/cloud/azure/azure_rm_virtualmachinescaleset_info.py | 20 | 15325 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Sertac Ozercan <seozerca@microsoft.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_virtualmachinescaleset_info
version_added: "2.9"
short_description: Get Virtual Machine Scale Set facts
description:
- Get facts for a virtual machine scale set.
- Note that this module was called M(azure_rm_virtualmachine_scaleset_facts) before Ansible 2.8. The usage did not change.
options:
name:
description:
- Limit results to a specific virtual machine scale set.
resource_group:
description:
- The resource group to search for the desired virtual machine scale set.
tags:
description:
- List of tags to be matched.
format:
description:
- Format of the data returned.
- If C(raw) is selected information will be returned in raw format from Azure Python SDK.
- If C(curated) is selected the structure will be identical to input parameters of M(azure_rm_virtualmachinescaleset) module.
- In Ansible 2.5 and lower facts are always returned in raw format.
- Please note that this option will be deprecated in 2.10 when curated format will become the only supported format.
default: 'raw'
choices:
- 'curated'
- 'raw'
version_added: "2.6"
extends_documentation_fragment:
- azure
author:
- Sertac Ozercan (@sozercan)
'''
EXAMPLES = '''
- name: Get facts for a virtual machine scale set
azure_rm_virtualmachinescaleset_info:
resource_group: myResourceGroup
name: testvmss001
format: curated
- name: Get facts for all virtual networks
azure_rm_virtualmachinescaleset_info:
resource_group: myResourceGroup
- name: Get facts by tags
azure_rm_virtualmachinescaleset_info:
resource_group: myResourceGroup
tags:
- testing
'''
RETURN = '''
vmss:
description:
- List of virtual machine scale sets.
returned: always
type: complex
contains:
id:
description:
- Resource ID.
returned: always
type: str
sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.Compute/scalesets/myscaleset
admin_username:
description:
- Admin username used to access the host after it is created.
returned: always
type: str
sample: adminuser
capacity:
description:
- Capacity of VMSS.
returned: always
type: int
sample: 2
data_disks:
description:
- List of attached data disks.
returned: always
type: complex
contains:
caching:
description:
- Type of data disk caching.
returned: always
type: str
sample: ReadOnly
disk_size_gb:
description:
- The initial disk size in GB for blank data disks.
returned: always
type: int
sample: 64
lun:
description:
- The logical unit number for data disk.
returned: always
type: int
sample: 0
managed_disk_type:
description:
- Managed data disk type.
returned: always
type: str
sample: Standard_LRS
image:
description:
- Image specification.
returned: always
type: complex
contains:
offer:
description:
- The offer of the platform image or marketplace image used to create the virtual machine.
returned: always
type: str
sample: RHEL
publisher:
description:
- Publisher name.
returned: always
type: str
sample: RedHat
sku:
description:
- SKU name.
returned: always
type: str
sample: 7-RAW
version:
description:
- Image version.
returned: always
type: str
sample: 7.5.2018050901
load_balancer:
description:
- Load balancer name.
returned: always
type: str
sample: testlb
location:
description:
- Resource location.
type: str
returned: always
sample: japaneast
managed_disk_type:
description:
- Managed data disk type.
type: str
returned: always
sample: Standard_LRS
name:
description:
- Resource name.
returned: always
type: str
sample: myvmss
os_disk_caching:
description:
- Type of OS disk caching.
type: str
returned: always
sample: ReadOnly
os_type:
description:
- Base type of operating system.
type: str
returned: always
sample: Linux
overprovision:
description:
- Specifies whether the Virtual Machine Scale Set should be overprovisioned.
type: bool
sample: true
resource_group:
description:
- Resource group.
type: str
returned: always
sample: myResourceGroup
ssh_password_enabled:
description:
- Is SSH password authentication enabled. Valid only for Linux.
type: bool
returned: always
sample: true
subnet_name:
description:
- Subnet name.
type: str
returned: always
sample: testsubnet
tier:
description:
- SKU Tier.
type: str
returned: always
sample: Basic
upgrade_policy:
description:
- Upgrade policy.
type: str
returned: always
sample: Manual
virtual_network_name:
description:
- Associated virtual network name.
type: str
returned: always
sample: testvn
vm_size:
description:
- Virtual machine size.
type: str
returned: always
sample: Standard_D4
tags:
description:
- Tags assigned to the resource. Dictionary of string:string pairs.
returned: always
type: dict
sample: { "tag1": "abc" }
''' # NOQA
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
import re
try:
from msrestazure.azure_exceptions import CloudError
except Exception:
# handled in azure_rm_common
pass
AZURE_OBJECT_CLASS = 'VirtualMachineScaleSet'
AZURE_ENUM_MODULES = ['azure.mgmt.compute.models']
class AzureRMVirtualMachineScaleSetInfo(AzureRMModuleBase):
"""Utility class to get virtual machine scale set facts"""
def __init__(self):
self.module_args = dict(
name=dict(type='str'),
resource_group=dict(type='str'),
tags=dict(type='list'),
format=dict(
type='str',
choices=['curated',
'raw'],
default='raw'
)
)
self.results = dict(
changed=False,
)
self.name = None
self.resource_group = None
self.format = None
self.tags = None
super(AzureRMVirtualMachineScaleSetInfo, self).__init__(
derived_arg_spec=self.module_args,
supports_tags=False,
facts_module=True
)
def exec_module(self, **kwargs):
is_old_facts = self.module._name == 'azure_rm_virtualmachinescaleset_facts'
if is_old_facts:
self.module.deprecate("The 'azure_rm_virtualmachinescaleset_facts' module has been renamed to 'azure_rm_virtualmachinescaleset_info'",
version='2.13')
for key in self.module_args:
setattr(self, key, kwargs[key])
if self.name and not self.resource_group:
self.fail("Parameter error: resource group required when filtering by name.")
if self.name:
result = self.get_item()
else:
result = self.list_items()
if self.format == 'curated':
for index in range(len(result)):
vmss = result[index]
subnet_name = None
load_balancer_name = None
virtual_network_name = None
ssh_password_enabled = False
try:
subnet_id = (vmss['properties']['virtualMachineProfile']['networkProfile']['networkInterfaceConfigurations'][0]
['properties']['ipConfigurations'][0]['properties']['subnet']['id'])
subnet_name = re.sub('.*subnets\\/', '', subnet_id)
except Exception:
self.log('Could not extract subnet name')
try:
backend_address_pool_id = (vmss['properties']['virtualMachineProfile']['networkProfile']['networkInterfaceConfigurations'][0]
['properties']['ipConfigurations'][0]['properties']['loadBalancerBackendAddressPools'][0]['id'])
load_balancer_name = re.sub('\\/backendAddressPools.*', '', re.sub('.*loadBalancers\\/', '', backend_address_pool_id))
virtual_network_name = re.sub('.*virtualNetworks\\/', '', re.sub('\\/subnets.*', '', subnet_id))
except Exception:
self.log('Could not extract load balancer / virtual network name')
try:
ssh_password_enabled = (not vmss['properties']['virtualMachineProfile']['osProfile']
['linuxConfiguration']['disablePasswordAuthentication'])
except Exception:
self.log('Could not extract SSH password enabled')
data_disks = vmss['properties']['virtualMachineProfile']['storageProfile'].get('dataDisks', [])
for disk_index in range(len(data_disks)):
old_disk = data_disks[disk_index]
new_disk = {
'lun': old_disk['lun'],
'disk_size_gb': old_disk['diskSizeGB'],
'managed_disk_type': old_disk['managedDisk']['storageAccountType'],
'caching': old_disk['caching']
}
data_disks[disk_index] = new_disk
updated = {
'id': vmss['id'],
'resource_group': self.resource_group,
'name': vmss['name'],
'state': 'present',
'location': vmss['location'],
'vm_size': vmss['sku']['name'],
'capacity': vmss['sku']['capacity'],
'tier': vmss['sku']['tier'],
'upgrade_policy': vmss['properties']['upgradePolicy']['mode'],
'admin_username': vmss['properties']['virtualMachineProfile']['osProfile']['adminUsername'],
'admin_password': vmss['properties']['virtualMachineProfile']['osProfile'].get('adminPassword'),
'ssh_password_enabled': ssh_password_enabled,
'image': vmss['properties']['virtualMachineProfile']['storageProfile']['imageReference'],
'os_disk_caching': vmss['properties']['virtualMachineProfile']['storageProfile']['osDisk']['caching'],
'os_type': 'Linux' if (vmss['properties']['virtualMachineProfile']['osProfile'].get('linuxConfiguration') is not None) else 'Windows',
'overprovision': vmss['properties']['overprovision'],
'managed_disk_type': vmss['properties']['virtualMachineProfile']['storageProfile']['osDisk']['managedDisk']['storageAccountType'],
'data_disks': data_disks,
'virtual_network_name': virtual_network_name,
'subnet_name': subnet_name,
'load_balancer': load_balancer_name,
'tags': vmss.get('tags')
}
result[index] = updated
if is_old_facts:
self.results['ansible_facts'] = {
'azure_vmss': result
}
if self.format == 'curated':
# proper result format we want to support in the future
# dropping 'ansible_facts' and shorter name 'vmss'
self.results['vmss'] = result
else:
self.results['vmss'] = result
return self.results
def get_item(self):
"""Get a single virtual machine scale set"""
self.log('Get properties for {0}'.format(self.name))
item = None
results = []
try:
item = self.compute_client.virtual_machine_scale_sets.get(self.resource_group, self.name)
except CloudError:
pass
if item and self.has_tags(item.tags, self.tags):
results = [self.serialize_obj(item, AZURE_OBJECT_CLASS, enum_modules=AZURE_ENUM_MODULES)]
return results
def list_items(self):
"""Get all virtual machine scale sets"""
self.log('List all virtual machine scale sets')
try:
response = self.compute_client.virtual_machine_scale_sets.list(self.resource_group)
except CloudError as exc:
self.fail('Failed to list all items - {0}'.format(str(exc)))
results = []
for item in response:
if self.has_tags(item.tags, self.tags):
results.append(self.serialize_obj(item, AZURE_OBJECT_CLASS, enum_modules=AZURE_ENUM_MODULES))
return results
def main():
"""Main module execution code path"""
AzureRMVirtualMachineScaleSetInfo()
if __name__ == '__main__':
main()
| gpl-3.0 |
eirmag/weboob | modules/arte/browser.py | 2 | 2140 | # -*- coding: utf-8 -*-
# Copyright(C) 2010-2011 Romain Bignon
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.tools.browser import BaseBrowser
from weboob.tools.browser.decorators import id2url
from .pages import IndexPage, VideoPage
from .video import ArteVideo
__all__ = ['ArteBrowser']
class ArteBrowser(BaseBrowser):
DOMAIN = u'videos.arte.tv'
ENCODING = None
PAGES = {r'http://videos.arte.tv/\w+/videos/toutesLesVideos.*': IndexPage,
r'http://videos.arte.tv/\w+/do_search/videos/.*': IndexPage,
r'http://videos.arte.tv/\w+/videos/(?P<id>.+)\.html': VideoPage
}
SEARCH_LANG = {'fr': 'recherche', 'de': 'suche', 'en': 'search'}
def __init__(self, lang, quality, *args, **kwargs):
BaseBrowser.__init__(self, *args, **kwargs)
self.lang = lang
self.quality = quality
@id2url(ArteVideo.id2url)
def get_video(self, url, video=None):
self.location(url)
return self.page.get_video(video, self.lang, self.quality)
def home(self):
self.location('http://videos.arte.tv/fr/videos/toutesLesVideos')
def search_videos(self, pattern):
self.location(self.buildurl('/%s/do_search/videos/%s' % (self.lang, self.SEARCH_LANG[self.lang]), q=pattern.encode('utf-8')))
assert self.is_on_page(IndexPage)
return self.page.iter_videos()
def latest_videos(self):
self.home()
assert self.is_on_page(IndexPage)
return self.page.iter_videos()
| agpl-3.0 |
tlatzko/spmcluster | .tox/2.6-nocov/lib/python2.6/site-packages/pip/vcs/bazaar.py | 280 | 4427 | from __future__ import absolute_import
import logging
import os
import tempfile
import re
# TODO: Get this into six.moves.urllib.parse
try:
from urllib import parse as urllib_parse
except ImportError:
import urlparse as urllib_parse
from pip.utils import rmtree, display_path
from pip.vcs import vcs, VersionControl
from pip.download import path_to_url
logger = logging.getLogger(__name__)
class Bazaar(VersionControl):
name = 'bzr'
dirname = '.bzr'
repo_name = 'branch'
schemes = (
'bzr', 'bzr+http', 'bzr+https', 'bzr+ssh', 'bzr+sftp', 'bzr+ftp',
'bzr+lp',
)
def __init__(self, url=None, *args, **kwargs):
super(Bazaar, self).__init__(url, *args, **kwargs)
# Python >= 2.7.4, 3.3 doesn't have uses_fragment or non_hierarchical
# Register lp but do not expose as a scheme to support bzr+lp.
if getattr(urllib_parse, 'uses_fragment', None):
urllib_parse.uses_fragment.extend(['lp'])
urllib_parse.non_hierarchical.extend(['lp'])
def export(self, location):
"""
Export the Bazaar repository at the url to the destination location
"""
temp_dir = tempfile.mkdtemp('-export', 'pip-')
self.unpack(temp_dir)
if os.path.exists(location):
# Remove the location to make sure Bazaar can export it correctly
rmtree(location)
try:
self.run_command(['export', location], cwd=temp_dir,
show_stdout=False)
finally:
rmtree(temp_dir)
def switch(self, dest, url, rev_options):
self.run_command(['switch', url], cwd=dest)
def update(self, dest, rev_options):
self.run_command(['pull', '-q'] + rev_options, cwd=dest)
def obtain(self, dest):
url, rev = self.get_url_rev()
if rev:
rev_options = ['-r', rev]
rev_display = ' (to revision %s)' % rev
else:
rev_options = []
rev_display = ''
if self.check_destination(dest, url, rev_options, rev_display):
logger.info(
'Checking out %s%s to %s',
url,
rev_display,
display_path(dest),
)
self.run_command(['branch', '-q'] + rev_options + [url, dest])
def get_url_rev(self):
# hotfix the URL scheme after removing bzr+ from bzr+ssh:// readd it
url, rev = super(Bazaar, self).get_url_rev()
if url.startswith('ssh://'):
url = 'bzr+' + url
return url, rev
def get_url(self, location):
urls = self.run_command(['info'], show_stdout=False, cwd=location)
for line in urls.splitlines():
line = line.strip()
for x in ('checkout of branch: ',
'parent branch: '):
if line.startswith(x):
repo = line.split(x)[1]
if self._is_local_repository(repo):
return path_to_url(repo)
return repo
return None
def get_revision(self, location):
revision = self.run_command(
['revno'], show_stdout=False, cwd=location)
return revision.splitlines()[-1]
def get_tag_revs(self, location):
tags = self.run_command(
['tags'], show_stdout=False, cwd=location)
tag_revs = []
for line in tags.splitlines():
tags_match = re.search(r'([.\w-]+)\s*(.*)$', line)
if tags_match:
tag = tags_match.group(1)
rev = tags_match.group(2)
tag_revs.append((rev.strip(), tag.strip()))
return dict(tag_revs)
def get_src_requirement(self, dist, location, find_tags):
repo = self.get_url(location)
if not repo:
return None
if not repo.lower().startswith('bzr:'):
repo = 'bzr+' + repo
egg_project_name = dist.egg_name().split('-', 1)[0]
current_rev = self.get_revision(location)
tag_revs = self.get_tag_revs(location)
if current_rev in tag_revs:
# It's a tag
full_egg_name = '%s-%s' % (egg_project_name, tag_revs[current_rev])
else:
full_egg_name = '%s-dev_r%s' % (dist.egg_name(), current_rev)
return '%s@%s#egg=%s' % (repo, current_rev, full_egg_name)
vcs.register(Bazaar)
| bsd-2-clause |
jckhang/gensim | gensim/models/wrappers/ldamallet.py | 28 | 10798 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2014 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Python wrapper for Latent Dirichlet Allocation (LDA) from MALLET, the Java topic modelling
toolkit [1]_.
This module allows both LDA model estimation from a training corpus and inference of topic
distribution on new, unseen documents, using an (optimized version of) collapsed
gibbs sampling from MALLET.
MALLET's LDA training requires O(#corpus_words) of memory, keeping the entire corpus in RAM.
If you find yourself running out of memory, either decrease the `workers` constructor
parameter, or use `LdaModel` which needs only O(1) memory.
The wrapped model can NOT be updated with new documents for online training -- use gensim's `LdaModel` for that.
Example:
>>> model = gensim.models.wrappers.LdaMallet('/Users/kofola/mallet-2.0.7/bin/mallet', corpus=my_corpus, num_topics=20, id2word=dictionary)
>>> print model[my_vector] # print LDA topics of a document
.. [1] http://mallet.cs.umass.edu/
"""
import logging
import random
import tempfile
import os
from subprocess import call
import numpy
from gensim import utils, matutils
logger = logging.getLogger('gensim.models.wrappers.ldamallet')
def read_doctopics(fname, eps=1e-6):
"""
Yield document topic vectors from MALLET's "doc-topics" format, as sparse gensim vectors.
"""
with utils.smart_open(fname) as fin:
next(fin) # skip the header line
for lineno, line in enumerate(fin):
parts = line.split()[2:] # skip "doc" and "source" columns
if len(parts) % 2 != 0:
raise RuntimeError("invalid doc topics format at line %i in %s" % (lineno + 1, fname))
doc = [(int(id), float(weight)) for id, weight in zip(parts[::2], parts[1::2]) if abs(float(weight)) > eps]
# explicitly normalize probs to sum up to 1.0, just to be sure...
weights = float(sum([weight for _, weight in doc]))
yield [] if weights == 0 else sorted((id, 1.0 * weight / weights) for id, weight in doc)
class LdaMallet(utils.SaveLoad):
"""
Class for LDA training using MALLET. Communication between MALLET and Python
takes place by passing around data files on disk and calling Java with subprocess.call().
"""
def __init__(self, mallet_path, corpus=None, num_topics=100, id2word=None, workers=4, prefix=None,
optimize_interval=0, iterations=1000):
"""
`mallet_path` is path to the mallet executable, e.g. `/home/kofola/mallet-2.0.7/bin/mallet`.
`corpus` is a gensim corpus, aka a stream of sparse document vectors.
`id2word` is a mapping between tokens ids and token.
`workers` is the number of threads, for parallel training.
`prefix` is the string prefix under which all data files will be stored; default: system temp + random filename prefix.
`optimize_interval` optimize hyperparameters every N iterations (sometimes leads to Java exception; 0 to switch off hyperparameter optimization).
`iterations` is the number of sampling iterations.
"""
self.mallet_path = mallet_path
self.id2word = id2word
if self.id2word is None:
logger.warning("no word id mapping provided; initializing from corpus, assuming identity")
self.id2word = utils.dict_from_corpus(corpus)
self.num_terms = len(self.id2word)
else:
self.num_terms = 0 if not self.id2word else 1 + max(self.id2word.keys())
if self.num_terms == 0:
raise ValueError("cannot compute LDA over an empty collection (no terms)")
self.num_topics = num_topics
if prefix is None:
rand_prefix = hex(random.randint(0, 0xffffff))[2:] + '_'
prefix = os.path.join(tempfile.gettempdir(), rand_prefix)
self.prefix = prefix
self.workers = workers
self.optimize_interval = optimize_interval
self.iterations = iterations
if corpus is not None:
self.train(corpus)
def finferencer(self):
return self.prefix + 'inferencer.mallet'
def ftopickeys(self):
return self.prefix + 'topickeys.txt'
def fstate(self):
return self.prefix + 'state.mallet.gz'
def fdoctopics(self):
return self.prefix + 'doctopics.txt'
def fcorpustxt(self):
return self.prefix + 'corpus.txt'
def fcorpusmallet(self):
return self.prefix + 'corpus.mallet'
def fwordweights(self):
return self.prefix + 'wordweights.txt'
def convert_input(self, corpus, infer=False):
"""
Serialize documents (lists of unicode tokens) to a temporary text file,
then convert that text file to MALLET format `outfile`.
"""
logger.info("serializing temporary corpus to %s" % self.fcorpustxt())
# write out the corpus in a file format that MALLET understands: one document per line:
# document id[SPACE]label (not used)[SPACE]whitespace delimited utf8-encoded tokens
with utils.smart_open(self.fcorpustxt(), 'wb') as fout:
for docno, doc in enumerate(corpus):
if self.id2word:
tokens = sum(([self.id2word[tokenid]] * int(cnt) for tokenid, cnt in doc), [])
else:
tokens = sum(([str(tokenid)] * int(cnt) for tokenid, cnt in doc), [])
fout.write(utils.to_utf8("%s 0 %s\n" % (docno, ' '.join(tokens))))
# convert the text file above into MALLET's internal format
cmd = self.mallet_path + " import-file --preserve-case --keep-sequence --remove-stopwords --token-regex '\S+' --input %s --output %s"
if infer:
cmd += ' --use-pipe-from ' + self.fcorpusmallet()
cmd = cmd % (self.fcorpustxt(), self.fcorpusmallet() + '.infer')
else:
cmd = cmd % (self.fcorpustxt(), self.fcorpusmallet())
logger.info("converting temporary corpus to MALLET format with %s" % cmd)
call(cmd, shell=True)
def train(self, corpus):
self.convert_input(corpus, infer=False)
cmd = self.mallet_path + " train-topics --input %s --num-topics %s --optimize-interval %s "\
"--num-threads %s --output-state %s --output-doc-topics %s --output-topic-keys %s "\
"--num-iterations %s --inferencer-filename %s"
cmd = cmd % (self.fcorpusmallet(), self.num_topics, self.optimize_interval, self.workers,
self.fstate(), self.fdoctopics(), self.ftopickeys(), self.iterations, self.finferencer())
# NOTE "--keep-sequence-bigrams" / "--use-ngrams true" poorer results + runs out of memory
logger.info("training MALLET LDA with %s" % cmd)
call(cmd, shell=True)
self.word_topics = self.load_word_topics()
def __getitem__(self, bow, iterations=100):
is_corpus, corpus = utils.is_corpus(bow)
if not is_corpus:
# query is a single document => make a corpus out of it
bow = [bow]
self.convert_input(bow, infer=True)
cmd = self.mallet_path + " infer-topics --input %s --inferencer %s --output-doc-topics %s --num-iterations %s"
cmd = cmd % (self.fcorpusmallet() + '.infer', self.finferencer(), self.fdoctopics() + '.infer', iterations)
logger.info("inferring topics with MALLET LDA '%s'" % cmd)
retval = call(cmd, shell=True)
if retval != 0:
raise RuntimeError("MALLET failed with error %s on return" % retval)
result = list(read_doctopics(self.fdoctopics() + '.infer'))
return result if is_corpus else result[0]
def load_word_topics(self):
logger.info("loading assigned topics from %s" % self.fstate())
wordtopics = numpy.zeros((self.num_topics, self.num_terms), dtype=numpy.float32)
with utils.smart_open(self.fstate()) as fin:
_ = next(fin) # header
self.alpha = numpy.array([float(val) for val in next(fin).split()[2:]])
assert len(self.alpha) == self.num_topics, "mismatch between MALLET vs. requested topics"
_ = next(fin) # beta
for lineno, line in enumerate(fin):
line = utils.to_unicode(line)
doc, source, pos, typeindex, token, topic = line.split(" ")
tokenid = self.id2word.token2id[token] if hasattr(self.id2word, 'token2id') else int(token)
wordtopics[int(topic), tokenid] += 1
logger.info("loaded assigned topics for %i tokens" % wordtopics.sum())
self.wordtopics = wordtopics
self.print_topics(15)
def print_topics(self, num_topics=10, num_words=10):
return self.show_topics(num_topics, num_words, log=True)
def load_document_topics(self):
"""
Return an iterator over the topic distribution of training corpus, by reading
the doctopics.txt generated during training.
"""
return read_doctopics(self.fdoctopics())
def show_topics(self, num_topics=10, num_words=10, log=False, formatted=True):
"""
Print the `num_words` most probable words for `num_topics` number of topics.
Set `num_topics=-1` to print all topics.
Set `formatted=True` to return the topics as a list of strings, or `False` as lists of (weight, word) pairs.
"""
if num_topics < 0 or num_topics >= self.num_topics:
num_topics = self.num_topics
chosen_topics = range(num_topics)
else:
num_topics = min(num_topics, self.num_topics)
sort_alpha = self.alpha + 0.0001 * numpy.random.rand(len(self.alpha)) # add a little random jitter, to randomize results around the same alpha
sorted_topics = list(matutils.argsort(sort_alpha))
chosen_topics = sorted_topics[ : num_topics//2] + sorted_topics[-num_topics//2 : ]
shown = []
for i in chosen_topics:
if formatted:
topic = self.print_topic(i, topn=num_words)
else:
topic = self.show_topic(i, topn=num_words)
shown.append(topic)
if log:
logger.info("topic #%i (%.3f): %s" % (i, self.alpha[i], topic))
return shown
def show_topic(self, topicid, topn=10):
topic = self.wordtopics[topicid]
topic = topic / topic.sum() # normalize to probability dist
bestn = matutils.argsort(topic, topn, reverse=True)
beststr = [(topic[id], self.id2word[id]) for id in bestn]
return beststr
def print_topic(self, topicid, topn=10):
return ' + '.join(['%.3f*%s' % v for v in self.show_topic(topicid, topn)])
| gpl-3.0 |
sexroute/commandergenius | project/jni/python/src/Lib/xml/sax/handler.py | 79 | 13968 | """
This module contains the core classes of version 2.0 of SAX for Python.
This file provides only default classes with absolutely minimum
functionality, from which drivers and applications can be subclassed.
Many of these classes are empty and are included only as documentation
of the interfaces.
$Id: handler.py 35816 2004-05-06 03:47:48Z fdrake $
"""
version = '2.0beta'
#============================================================================
#
# HANDLER INTERFACES
#
#============================================================================
# ===== ERRORHANDLER =====
class ErrorHandler:
"""Basic interface for SAX error handlers.
If you create an object that implements this interface, then
register the object with your XMLReader, the parser will call the
methods in your object to report all warnings and errors. There
are three levels of errors available: warnings, (possibly)
recoverable errors, and unrecoverable errors. All methods take a
SAXParseException as the only parameter."""
def error(self, exception):
"Handle a recoverable error."
raise exception
def fatalError(self, exception):
"Handle a non-recoverable error."
raise exception
def warning(self, exception):
"Handle a warning."
print exception
# ===== CONTENTHANDLER =====
class ContentHandler:
"""Interface for receiving logical document content events.
This is the main callback interface in SAX, and the one most
important to applications. The order of events in this interface
mirrors the order of the information in the document."""
def __init__(self):
self._locator = None
def setDocumentLocator(self, locator):
"""Called by the parser to give the application a locator for
locating the origin of document events.
SAX parsers are strongly encouraged (though not absolutely
required) to supply a locator: if it does so, it must supply
the locator to the application by invoking this method before
invoking any of the other methods in the DocumentHandler
interface.
The locator allows the application to determine the end
position of any document-related event, even if the parser is
not reporting an error. Typically, the application will use
this information for reporting its own errors (such as
character content that does not match an application's
business rules). The information returned by the locator is
probably not sufficient for use with a search engine.
Note that the locator will return correct information only
during the invocation of the events in this interface. The
application should not attempt to use it at any other time."""
self._locator = locator
def startDocument(self):
"""Receive notification of the beginning of a document.
The SAX parser will invoke this method only once, before any
other methods in this interface or in DTDHandler (except for
setDocumentLocator)."""
def endDocument(self):
"""Receive notification of the end of a document.
The SAX parser will invoke this method only once, and it will
be the last method invoked during the parse. The parser shall
not invoke this method until it has either abandoned parsing
(because of an unrecoverable error) or reached the end of
input."""
def startPrefixMapping(self, prefix, uri):
"""Begin the scope of a prefix-URI Namespace mapping.
The information from this event is not necessary for normal
Namespace processing: the SAX XML reader will automatically
replace prefixes for element and attribute names when the
http://xml.org/sax/features/namespaces feature is true (the
default).
There are cases, however, when applications need to use
prefixes in character data or in attribute values, where they
cannot safely be expanded automatically; the
start/endPrefixMapping event supplies the information to the
application to expand prefixes in those contexts itself, if
necessary.
Note that start/endPrefixMapping events are not guaranteed to
be properly nested relative to each-other: all
startPrefixMapping events will occur before the corresponding
startElement event, and all endPrefixMapping events will occur
after the corresponding endElement event, but their order is
not guaranteed."""
def endPrefixMapping(self, prefix):
"""End the scope of a prefix-URI mapping.
See startPrefixMapping for details. This event will always
occur after the corresponding endElement event, but the order
of endPrefixMapping events is not otherwise guaranteed."""
def startElement(self, name, attrs):
"""Signals the start of an element in non-namespace mode.
The name parameter contains the raw XML 1.0 name of the
element type as a string and the attrs parameter holds an
instance of the Attributes class containing the attributes of
the element."""
def endElement(self, name):
"""Signals the end of an element in non-namespace mode.
The name parameter contains the name of the element type, just
as with the startElement event."""
def startElementNS(self, name, qname, attrs):
"""Signals the start of an element in namespace mode.
The name parameter contains the name of the element type as a
(uri, localname) tuple, the qname parameter the raw XML 1.0
name used in the source document, and the attrs parameter
holds an instance of the Attributes class containing the
attributes of the element.
The uri part of the name tuple is None for elements which have
no namespace."""
def endElementNS(self, name, qname):
"""Signals the end of an element in namespace mode.
The name parameter contains the name of the element type, just
as with the startElementNS event."""
def characters(self, content):
"""Receive notification of character data.
The Parser will call this method to report each chunk of
character data. SAX parsers may return all contiguous
character data in a single chunk, or they may split it into
several chunks; however, all of the characters in any single
event must come from the same external entity so that the
Locator provides useful information."""
def ignorableWhitespace(self, whitespace):
"""Receive notification of ignorable whitespace in element content.
Validating Parsers must use this method to report each chunk
of ignorable whitespace (see the W3C XML 1.0 recommendation,
section 2.10): non-validating parsers may also use this method
if they are capable of parsing and using content models.
SAX parsers may return all contiguous whitespace in a single
chunk, or they may split it into several chunks; however, all
of the characters in any single event must come from the same
external entity, so that the Locator provides useful
information."""
def processingInstruction(self, target, data):
"""Receive notification of a processing instruction.
The Parser will invoke this method once for each processing
instruction found: note that processing instructions may occur
before or after the main document element.
A SAX parser should never report an XML declaration (XML 1.0,
section 2.8) or a text declaration (XML 1.0, section 4.3.1)
using this method."""
def skippedEntity(self, name):
"""Receive notification of a skipped entity.
The Parser will invoke this method once for each entity
skipped. Non-validating processors may skip entities if they
have not seen the declarations (because, for example, the
entity was declared in an external DTD subset). All processors
may skip external entities, depending on the values of the
http://xml.org/sax/features/external-general-entities and the
http://xml.org/sax/features/external-parameter-entities
properties."""
# ===== DTDHandler =====
class DTDHandler:
"""Handle DTD events.
This interface specifies only those DTD events required for basic
parsing (unparsed entities and attributes)."""
def notationDecl(self, name, publicId, systemId):
"Handle a notation declaration event."
def unparsedEntityDecl(self, name, publicId, systemId, ndata):
"Handle an unparsed entity declaration event."
# ===== ENTITYRESOLVER =====
class EntityResolver:
"""Basic interface for resolving entities. If you create an object
implementing this interface, then register the object with your
Parser, the parser will call the method in your object to
resolve all external entities. Note that DefaultHandler implements
this interface with the default behaviour."""
def resolveEntity(self, publicId, systemId):
"""Resolve the system identifier of an entity and return either
the system identifier to read from as a string, or an InputSource
to read from."""
return systemId
#============================================================================
#
# CORE FEATURES
#
#============================================================================
feature_namespaces = "http://xml.org/sax/features/namespaces"
# true: Perform Namespace processing (default).
# false: Optionally do not perform Namespace processing
# (implies namespace-prefixes).
# access: (parsing) read-only; (not parsing) read/write
feature_namespace_prefixes = "http://xml.org/sax/features/namespace-prefixes"
# true: Report the original prefixed names and attributes used for Namespace
# declarations.
# false: Do not report attributes used for Namespace declarations, and
# optionally do not report original prefixed names (default).
# access: (parsing) read-only; (not parsing) read/write
feature_string_interning = "http://xml.org/sax/features/string-interning"
# true: All element names, prefixes, attribute names, Namespace URIs, and
# local names are interned using the built-in intern function.
# false: Names are not necessarily interned, although they may be (default).
# access: (parsing) read-only; (not parsing) read/write
feature_validation = "http://xml.org/sax/features/validation"
# true: Report all validation errors (implies external-general-entities and
# external-parameter-entities).
# false: Do not report validation errors.
# access: (parsing) read-only; (not parsing) read/write
feature_external_ges = "http://xml.org/sax/features/external-general-entities"
# true: Include all external general (text) entities.
# false: Do not include external general entities.
# access: (parsing) read-only; (not parsing) read/write
feature_external_pes = "http://xml.org/sax/features/external-parameter-entities"
# true: Include all external parameter entities, including the external
# DTD subset.
# false: Do not include any external parameter entities, even the external
# DTD subset.
# access: (parsing) read-only; (not parsing) read/write
all_features = [feature_namespaces,
feature_namespace_prefixes,
feature_string_interning,
feature_validation,
feature_external_ges,
feature_external_pes]
#============================================================================
#
# CORE PROPERTIES
#
#============================================================================
property_lexical_handler = "http://xml.org/sax/properties/lexical-handler"
# data type: xml.sax.sax2lib.LexicalHandler
# description: An optional extension handler for lexical events like comments.
# access: read/write
property_declaration_handler = "http://xml.org/sax/properties/declaration-handler"
# data type: xml.sax.sax2lib.DeclHandler
# description: An optional extension handler for DTD-related events other
# than notations and unparsed entities.
# access: read/write
property_dom_node = "http://xml.org/sax/properties/dom-node"
# data type: org.w3c.dom.Node
# description: When parsing, the current DOM node being visited if this is
# a DOM iterator; when not parsing, the root DOM node for
# iteration.
# access: (parsing) read-only; (not parsing) read/write
property_xml_string = "http://xml.org/sax/properties/xml-string"
# data type: String
# description: The literal string of characters that was the source for
# the current event.
# access: read-only
property_encoding = "http://www.python.org/sax/properties/encoding"
# data type: String
# description: The name of the encoding to assume for input data.
# access: write: set the encoding, e.g. established by a higher-level
# protocol. May change during parsing (e.g. after
# processing a META tag)
# read: return the current encoding (possibly established through
# auto-detection.
# initial value: UTF-8
#
property_interning_dict = "http://www.python.org/sax/properties/interning-dict"
# data type: Dictionary
# description: The dictionary used to intern common strings in the document
# access: write: Request that the parser uses a specific dictionary, to
# allow interning across different documents
# read: return the current interning dictionary, or None
#
all_properties = [property_lexical_handler,
property_dom_node,
property_declaration_handler,
property_xml_string,
property_encoding,
property_interning_dict]
| lgpl-2.1 |
sauravpratihar/sugar | extensions/cpsection/power/model.py | 12 | 2930 | # Copyright (C) 2008 One Laptop Per Child
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
import os
from gettext import gettext as _
import logging
from gi.repository import Gio
import dbus
OHM_SERVICE_NAME = 'org.freedesktop.ohm'
OHM_SERVICE_PATH = '/org/freedesktop/ohm/Keystore'
OHM_SERVICE_IFACE = 'org.freedesktop.ohm.Keystore'
POWERD_FLAG_DIR = '/etc/powerd/flags'
POWERD_INHIBIT_FLAG = '/etc/powerd/flags/inhibit-suspend'
_logger = logging.getLogger('ControlPanel - Power')
class ReadError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def using_powerd():
# directory exists if powerd running, and it's recent
# enough to be controllable.
return os.access(POWERD_FLAG_DIR, os.W_OK)
def get_automatic_pm():
if using_powerd():
return not os.access(POWERD_INHIBIT_FLAG, os.R_OK)
# ohmd
settings = Gio.Settings('org.sugarlabs.power')
return settings.get_boolean('automatic')
def print_automatic_pm():
print ('off', 'on')[get_automatic_pm()]
def set_automatic_pm(enabled):
"""Automatic suspends on/off."""
if using_powerd():
# powerd
if enabled == 'off' or enabled == 0:
try:
fd = open(POWERD_INHIBIT_FLAG, 'w')
except IOError:
_logger.debug('File %s is not writeable' % POWERD_INHIBIT_FLAG)
else:
fd.close()
else:
os.unlink(POWERD_INHIBIT_FLAG)
return
# ohmd
bus = dbus.SystemBus()
proxy = bus.get_object(OHM_SERVICE_NAME, OHM_SERVICE_PATH)
keystore = dbus.Interface(proxy, OHM_SERVICE_IFACE)
if enabled == 'on' or enabled == 1:
keystore.SetKey('suspend.automatic_pm', 1)
enabled = True
elif enabled == 'off' or enabled == 0:
keystore.SetKey('suspend.automatic_pm', 0)
enabled = False
else:
raise ValueError(_('Error in automatic pm argument, use on/off.'))
settings = Gio.Settings('org.sugarlabs.power')
settings.set_boolean('automatic', enabled)
# DEPRECATED
from gi.repository import GConf
client = GConf.Client.get_default()
client.set_string('/desktop/sugar/power/automatic', enabled)
return
| gpl-2.0 |
eeshangarg/oh-mainline | vendor/packages/kombu/kombu/clocks.py | 35 | 4494 | """
kombu.clocks
============
Logical Clocks and Synchronization.
"""
from __future__ import absolute_import
from threading import Lock
from itertools import islice
from operator import itemgetter
from .five import zip
__all__ = ['LamportClock', 'timetuple']
R_CLOCK = '_lamport(clock={0}, timestamp={1}, id={2} {3!r})'
class timetuple(tuple):
"""Tuple of event clock information.
Can be used as part of a heap to keep events ordered.
:param clock: Event clock value.
:param timestamp: Event UNIX timestamp value.
:param id: Event host id (e.g. ``hostname:pid``).
:param obj: Optional obj to associate with this event.
"""
__slots__ = ()
def __new__(cls, clock, timestamp, id, obj=None):
return tuple.__new__(cls, (clock, timestamp, id, obj))
def __repr__(self):
return R_CLOCK.format(*self)
def __getnewargs__(self):
return tuple(self)
def __lt__(self, other):
# 0: clock 1: timestamp 3: process id
try:
A, B = self[0], other[0]
# uses logical clock value first
if A and B: # use logical clock if available
if A == B: # equal clocks use lower process id
return self[2] < other[2]
return A < B
return self[1] < other[1] # ... or use timestamp
except IndexError:
return NotImplemented
__gt__ = lambda self, other: other < self
__le__ = lambda self, other: not other < self
__ge__ = lambda self, other: not self < other
clock = property(itemgetter(0))
timestamp = property(itemgetter(1))
id = property(itemgetter(2))
obj = property(itemgetter(3))
class LamportClock(object):
"""Lamport's logical clock.
From Wikipedia:
A Lamport logical clock is a monotonically incrementing software counter
maintained in each process. It follows some simple rules:
* A process increments its counter before each event in that process;
* When a process sends a message, it includes its counter value with
the message;
* On receiving a message, the receiver process sets its counter to be
greater than the maximum of its own value and the received value
before it considers the message received.
Conceptually, this logical clock can be thought of as a clock that only
has meaning in relation to messages moving between processes. When a
process receives a message, it resynchronizes its logical clock with
the sender.
.. seealso::
* `Lamport timestamps`_
* `Lamports distributed mutex`_
.. _`Lamport Timestamps`: http://en.wikipedia.org/wiki/Lamport_timestamps
.. _`Lamports distributed mutex`: http://bit.ly/p99ybE
*Usage*
When sending a message use :meth:`forward` to increment the clock,
when receiving a message use :meth:`adjust` to sync with
the time stamp of the incoming message.
"""
#: The clocks current value.
value = 0
def __init__(self, initial_value=0, Lock=Lock):
self.value = initial_value
self.mutex = Lock()
def adjust(self, other):
with self.mutex:
value = self.value = max(self.value, other) + 1
return value
def forward(self):
with self.mutex:
self.value += 1
return self.value
def sort_heap(self, h):
"""List of tuples containing at least two elements, representing
an event, where the first element is the event's scalar clock value,
and the second element is the id of the process (usually
``"hostname:pid"``): ``sh([(clock, processid, ...?), (...)])``
The list must already be sorted, which is why we refer to it as a
heap.
The tuple will not be unpacked, so more than two elements can be
present.
Will return the latest event.
"""
if h[0][0] == h[1][0]:
same = []
for PN in zip(h, islice(h, 1, None)):
if PN[0][0] != PN[1][0]:
break # Prev and Next's clocks differ
same.append(PN[0])
# return first item sorted by process id
return sorted(same, key=lambda event: event[1])[0]
# clock values unique, return first item
return h[0]
def __str__(self):
return str(self.value)
def __repr__(self):
return '<LamportClock: {0.value}>'.format(self)
| agpl-3.0 |
awkspace/ansible | lib/ansible/plugins/inventory/linode.py | 31 | 6600 | # Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = r'''
name: linode
plugin_type: inventory
authors:
- Luke Murphy (@lwm)
short_description: Ansible dynamic inventory plugin for Linode.
version_added: "2.8"
requirements:
- python >= 2.7
- linode_api4 >= 2.0.0
description:
- Reads inventories from the Linode API v4.
- Uses a YAML configuration file that ends with linode.(yml|yaml).
- Linode labels are used by default as the hostnames.
- The inventory groups are built from groups and not tags.
options:
plugin:
description: marks this as an instance of the 'linode' plugin
required: true
choices: ['linode']
access_token:
description: The Linode account personal access token.
required: true
env:
- name: LINODE_ACCESS_TOKEN
regions:
description: Populate inventory with instances in this region.
default: []
type: list
required: false
types:
description: Populate inventory with instances with this type.
default: []
type: list
required: false
'''
EXAMPLES = r'''
# Minimal example. `LINODE_ACCESS_TOKEN` is exposed in environment.
plugin: linode
# Example with regions, types, groups and access token
plugin: linode
access_token: foobar
regions:
- eu-west
types:
- g5-standard-2
'''
import os
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.module_utils.six import string_types
from ansible.plugins.inventory import BaseInventoryPlugin
try:
from linode_api4 import LinodeClient
from linode_api4.errors import ApiError as LinodeApiError
except ImportError:
raise AnsibleError('the Linode dynamic inventory plugin requires linode_api4.')
class InventoryModule(BaseInventoryPlugin):
NAME = 'linode'
def _build_client(self):
"""Build the Linode client."""
access_token = self.get_option('access_token')
if access_token is None:
try:
access_token = os.environ['LINODE_ACCESS_TOKEN']
except KeyError:
pass
if access_token is None:
raise AnsibleError((
'Could not retrieve Linode access token '
'from plugin configuration or environment'
))
self.client = LinodeClient(access_token)
def _get_instances_inventory(self):
"""Retrieve Linode instance information from cloud inventory."""
try:
self.instances = self.client.linode.instances()
except LinodeApiError as exception:
raise AnsibleError('Linode client raised: %s' % exception)
def _add_groups(self):
"""Add Linode instance groups to the dynamic inventory."""
self.linode_groups = set(
filter(None, [
instance.group
for instance
in self.instances
])
)
for linode_group in self.linode_groups:
self.inventory.add_group(linode_group)
def _filter_by_config(self, regions, types):
"""Filter instances by user specified configuration."""
if regions:
self.instances = [
instance for instance in self.instances
if instance.region.id in regions
]
if types:
self.instances = [
instance for instance in self.instances
if instance.type.id in types
]
def _add_instances_to_groups(self):
"""Add instance names to their dynamic inventory groups."""
for instance in self.instances:
self.inventory.add_host(instance.label, group=instance.group)
def _add_hostvars_for_instances(self):
"""Add hostvars for instances in the dynamic inventory."""
for instance in self.instances:
hostvars = instance._raw_json
for hostvar_key in hostvars:
self.inventory.set_variable(
instance.label,
hostvar_key,
hostvars[hostvar_key]
)
def _validate_option(self, name, desired_type, option_value):
"""Validate user specified configuration data against types."""
if isinstance(option_value, string_types) and desired_type == list:
option_value = [option_value]
if option_value is None:
option_value = desired_type()
if not isinstance(option_value, desired_type):
raise AnsibleParserError(
'The option %s (%s) must be a %s' % (
name, option_value, desired_type
)
)
return option_value
def _get_query_options(self, config_data):
"""Get user specified query options from the configuration."""
options = {
'regions': {
'type_to_be': list,
'value': config_data.get('regions', [])
},
'types': {
'type_to_be': list,
'value': config_data.get('types', [])
},
}
for name in options:
options[name]['value'] = self._validate_option(
name,
options[name]['type_to_be'],
options[name]['value']
)
regions = options['regions']['value']
types = options['types']['value']
return regions, types
def verify_file(self, path):
"""Verify the Linode configuration file."""
if super(InventoryModule, self).verify_file(path):
endings = ('linode.yaml', 'linode.yml')
if any((path.endswith(ending) for ending in endings)):
return True
return False
def parse(self, inventory, loader, path, cache=True):
"""Dynamically parse Linode the cloud inventory."""
super(InventoryModule, self).parse(inventory, loader, path)
self._build_client()
self._get_instances_inventory()
config_data = self._read_config_data(path)
regions, types = self._get_query_options(config_data)
self._filter_by_config(regions, types)
self._add_groups()
self._add_instances_to_groups()
self._add_hostvars_for_instances()
| gpl-3.0 |
ryano144/intellij-community | python/lib/Lib/encodings/cp775.py | 593 | 34732 | """ Python Character Mapping Codec cp775 generated from 'VENDORS/MICSFT/PC/CP775.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp775',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x0106, # LATIN CAPITAL LETTER C WITH ACUTE
0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x0083: 0x0101, # LATIN SMALL LETTER A WITH MACRON
0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
0x0085: 0x0123, # LATIN SMALL LETTER G WITH CEDILLA
0x0086: 0x00e5, # LATIN SMALL LETTER A WITH RING ABOVE
0x0087: 0x0107, # LATIN SMALL LETTER C WITH ACUTE
0x0088: 0x0142, # LATIN SMALL LETTER L WITH STROKE
0x0089: 0x0113, # LATIN SMALL LETTER E WITH MACRON
0x008a: 0x0156, # LATIN CAPITAL LETTER R WITH CEDILLA
0x008b: 0x0157, # LATIN SMALL LETTER R WITH CEDILLA
0x008c: 0x012b, # LATIN SMALL LETTER I WITH MACRON
0x008d: 0x0179, # LATIN CAPITAL LETTER Z WITH ACUTE
0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x008f: 0x00c5, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0091: 0x00e6, # LATIN SMALL LIGATURE AE
0x0092: 0x00c6, # LATIN CAPITAL LIGATURE AE
0x0093: 0x014d, # LATIN SMALL LETTER O WITH MACRON
0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
0x0095: 0x0122, # LATIN CAPITAL LETTER G WITH CEDILLA
0x0096: 0x00a2, # CENT SIGN
0x0097: 0x015a, # LATIN CAPITAL LETTER S WITH ACUTE
0x0098: 0x015b, # LATIN SMALL LETTER S WITH ACUTE
0x0099: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x009b: 0x00f8, # LATIN SMALL LETTER O WITH STROKE
0x009c: 0x00a3, # POUND SIGN
0x009d: 0x00d8, # LATIN CAPITAL LETTER O WITH STROKE
0x009e: 0x00d7, # MULTIPLICATION SIGN
0x009f: 0x00a4, # CURRENCY SIGN
0x00a0: 0x0100, # LATIN CAPITAL LETTER A WITH MACRON
0x00a1: 0x012a, # LATIN CAPITAL LETTER I WITH MACRON
0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x00a3: 0x017b, # LATIN CAPITAL LETTER Z WITH DOT ABOVE
0x00a4: 0x017c, # LATIN SMALL LETTER Z WITH DOT ABOVE
0x00a5: 0x017a, # LATIN SMALL LETTER Z WITH ACUTE
0x00a6: 0x201d, # RIGHT DOUBLE QUOTATION MARK
0x00a7: 0x00a6, # BROKEN BAR
0x00a8: 0x00a9, # COPYRIGHT SIGN
0x00a9: 0x00ae, # REGISTERED SIGN
0x00aa: 0x00ac, # NOT SIGN
0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER
0x00ad: 0x0141, # LATIN CAPITAL LETTER L WITH STROKE
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x0104, # LATIN CAPITAL LETTER A WITH OGONEK
0x00b6: 0x010c, # LATIN CAPITAL LETTER C WITH CARON
0x00b7: 0x0118, # LATIN CAPITAL LETTER E WITH OGONEK
0x00b8: 0x0116, # LATIN CAPITAL LETTER E WITH DOT ABOVE
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x012e, # LATIN CAPITAL LETTER I WITH OGONEK
0x00be: 0x0160, # LATIN CAPITAL LETTER S WITH CARON
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x0172, # LATIN CAPITAL LETTER U WITH OGONEK
0x00c7: 0x016a, # LATIN CAPITAL LETTER U WITH MACRON
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x017d, # LATIN CAPITAL LETTER Z WITH CARON
0x00d0: 0x0105, # LATIN SMALL LETTER A WITH OGONEK
0x00d1: 0x010d, # LATIN SMALL LETTER C WITH CARON
0x00d2: 0x0119, # LATIN SMALL LETTER E WITH OGONEK
0x00d3: 0x0117, # LATIN SMALL LETTER E WITH DOT ABOVE
0x00d4: 0x012f, # LATIN SMALL LETTER I WITH OGONEK
0x00d5: 0x0161, # LATIN SMALL LETTER S WITH CARON
0x00d6: 0x0173, # LATIN SMALL LETTER U WITH OGONEK
0x00d7: 0x016b, # LATIN SMALL LETTER U WITH MACRON
0x00d8: 0x017e, # LATIN SMALL LETTER Z WITH CARON
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x258c, # LEFT HALF BLOCK
0x00de: 0x2590, # RIGHT HALF BLOCK
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE
0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S (GERMAN)
0x00e2: 0x014c, # LATIN CAPITAL LETTER O WITH MACRON
0x00e3: 0x0143, # LATIN CAPITAL LETTER N WITH ACUTE
0x00e4: 0x00f5, # LATIN SMALL LETTER O WITH TILDE
0x00e5: 0x00d5, # LATIN CAPITAL LETTER O WITH TILDE
0x00e6: 0x00b5, # MICRO SIGN
0x00e7: 0x0144, # LATIN SMALL LETTER N WITH ACUTE
0x00e8: 0x0136, # LATIN CAPITAL LETTER K WITH CEDILLA
0x00e9: 0x0137, # LATIN SMALL LETTER K WITH CEDILLA
0x00ea: 0x013b, # LATIN CAPITAL LETTER L WITH CEDILLA
0x00eb: 0x013c, # LATIN SMALL LETTER L WITH CEDILLA
0x00ec: 0x0146, # LATIN SMALL LETTER N WITH CEDILLA
0x00ed: 0x0112, # LATIN CAPITAL LETTER E WITH MACRON
0x00ee: 0x0145, # LATIN CAPITAL LETTER N WITH CEDILLA
0x00ef: 0x2019, # RIGHT SINGLE QUOTATION MARK
0x00f0: 0x00ad, # SOFT HYPHEN
0x00f1: 0x00b1, # PLUS-MINUS SIGN
0x00f2: 0x201c, # LEFT DOUBLE QUOTATION MARK
0x00f3: 0x00be, # VULGAR FRACTION THREE QUARTERS
0x00f4: 0x00b6, # PILCROW SIGN
0x00f5: 0x00a7, # SECTION SIGN
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x201e, # DOUBLE LOW-9 QUOTATION MARK
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x2219, # BULLET OPERATOR
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x00b9, # SUPERSCRIPT ONE
0x00fc: 0x00b3, # SUPERSCRIPT THREE
0x00fd: 0x00b2, # SUPERSCRIPT TWO
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
u'\x00' # 0x0000 -> NULL
u'\x01' # 0x0001 -> START OF HEADING
u'\x02' # 0x0002 -> START OF TEXT
u'\x03' # 0x0003 -> END OF TEXT
u'\x04' # 0x0004 -> END OF TRANSMISSION
u'\x05' # 0x0005 -> ENQUIRY
u'\x06' # 0x0006 -> ACKNOWLEDGE
u'\x07' # 0x0007 -> BELL
u'\x08' # 0x0008 -> BACKSPACE
u'\t' # 0x0009 -> HORIZONTAL TABULATION
u'\n' # 0x000a -> LINE FEED
u'\x0b' # 0x000b -> VERTICAL TABULATION
u'\x0c' # 0x000c -> FORM FEED
u'\r' # 0x000d -> CARRIAGE RETURN
u'\x0e' # 0x000e -> SHIFT OUT
u'\x0f' # 0x000f -> SHIFT IN
u'\x10' # 0x0010 -> DATA LINK ESCAPE
u'\x11' # 0x0011 -> DEVICE CONTROL ONE
u'\x12' # 0x0012 -> DEVICE CONTROL TWO
u'\x13' # 0x0013 -> DEVICE CONTROL THREE
u'\x14' # 0x0014 -> DEVICE CONTROL FOUR
u'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x0016 -> SYNCHRONOUS IDLE
u'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x0018 -> CANCEL
u'\x19' # 0x0019 -> END OF MEDIUM
u'\x1a' # 0x001a -> SUBSTITUTE
u'\x1b' # 0x001b -> ESCAPE
u'\x1c' # 0x001c -> FILE SEPARATOR
u'\x1d' # 0x001d -> GROUP SEPARATOR
u'\x1e' # 0x001e -> RECORD SEPARATOR
u'\x1f' # 0x001f -> UNIT SEPARATOR
u' ' # 0x0020 -> SPACE
u'!' # 0x0021 -> EXCLAMATION MARK
u'"' # 0x0022 -> QUOTATION MARK
u'#' # 0x0023 -> NUMBER SIGN
u'$' # 0x0024 -> DOLLAR SIGN
u'%' # 0x0025 -> PERCENT SIGN
u'&' # 0x0026 -> AMPERSAND
u"'" # 0x0027 -> APOSTROPHE
u'(' # 0x0028 -> LEFT PARENTHESIS
u')' # 0x0029 -> RIGHT PARENTHESIS
u'*' # 0x002a -> ASTERISK
u'+' # 0x002b -> PLUS SIGN
u',' # 0x002c -> COMMA
u'-' # 0x002d -> HYPHEN-MINUS
u'.' # 0x002e -> FULL STOP
u'/' # 0x002f -> SOLIDUS
u'0' # 0x0030 -> DIGIT ZERO
u'1' # 0x0031 -> DIGIT ONE
u'2' # 0x0032 -> DIGIT TWO
u'3' # 0x0033 -> DIGIT THREE
u'4' # 0x0034 -> DIGIT FOUR
u'5' # 0x0035 -> DIGIT FIVE
u'6' # 0x0036 -> DIGIT SIX
u'7' # 0x0037 -> DIGIT SEVEN
u'8' # 0x0038 -> DIGIT EIGHT
u'9' # 0x0039 -> DIGIT NINE
u':' # 0x003a -> COLON
u';' # 0x003b -> SEMICOLON
u'<' # 0x003c -> LESS-THAN SIGN
u'=' # 0x003d -> EQUALS SIGN
u'>' # 0x003e -> GREATER-THAN SIGN
u'?' # 0x003f -> QUESTION MARK
u'@' # 0x0040 -> COMMERCIAL AT
u'A' # 0x0041 -> LATIN CAPITAL LETTER A
u'B' # 0x0042 -> LATIN CAPITAL LETTER B
u'C' # 0x0043 -> LATIN CAPITAL LETTER C
u'D' # 0x0044 -> LATIN CAPITAL LETTER D
u'E' # 0x0045 -> LATIN CAPITAL LETTER E
u'F' # 0x0046 -> LATIN CAPITAL LETTER F
u'G' # 0x0047 -> LATIN CAPITAL LETTER G
u'H' # 0x0048 -> LATIN CAPITAL LETTER H
u'I' # 0x0049 -> LATIN CAPITAL LETTER I
u'J' # 0x004a -> LATIN CAPITAL LETTER J
u'K' # 0x004b -> LATIN CAPITAL LETTER K
u'L' # 0x004c -> LATIN CAPITAL LETTER L
u'M' # 0x004d -> LATIN CAPITAL LETTER M
u'N' # 0x004e -> LATIN CAPITAL LETTER N
u'O' # 0x004f -> LATIN CAPITAL LETTER O
u'P' # 0x0050 -> LATIN CAPITAL LETTER P
u'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
u'R' # 0x0052 -> LATIN CAPITAL LETTER R
u'S' # 0x0053 -> LATIN CAPITAL LETTER S
u'T' # 0x0054 -> LATIN CAPITAL LETTER T
u'U' # 0x0055 -> LATIN CAPITAL LETTER U
u'V' # 0x0056 -> LATIN CAPITAL LETTER V
u'W' # 0x0057 -> LATIN CAPITAL LETTER W
u'X' # 0x0058 -> LATIN CAPITAL LETTER X
u'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
u'Z' # 0x005a -> LATIN CAPITAL LETTER Z
u'[' # 0x005b -> LEFT SQUARE BRACKET
u'\\' # 0x005c -> REVERSE SOLIDUS
u']' # 0x005d -> RIGHT SQUARE BRACKET
u'^' # 0x005e -> CIRCUMFLEX ACCENT
u'_' # 0x005f -> LOW LINE
u'`' # 0x0060 -> GRAVE ACCENT
u'a' # 0x0061 -> LATIN SMALL LETTER A
u'b' # 0x0062 -> LATIN SMALL LETTER B
u'c' # 0x0063 -> LATIN SMALL LETTER C
u'd' # 0x0064 -> LATIN SMALL LETTER D
u'e' # 0x0065 -> LATIN SMALL LETTER E
u'f' # 0x0066 -> LATIN SMALL LETTER F
u'g' # 0x0067 -> LATIN SMALL LETTER G
u'h' # 0x0068 -> LATIN SMALL LETTER H
u'i' # 0x0069 -> LATIN SMALL LETTER I
u'j' # 0x006a -> LATIN SMALL LETTER J
u'k' # 0x006b -> LATIN SMALL LETTER K
u'l' # 0x006c -> LATIN SMALL LETTER L
u'm' # 0x006d -> LATIN SMALL LETTER M
u'n' # 0x006e -> LATIN SMALL LETTER N
u'o' # 0x006f -> LATIN SMALL LETTER O
u'p' # 0x0070 -> LATIN SMALL LETTER P
u'q' # 0x0071 -> LATIN SMALL LETTER Q
u'r' # 0x0072 -> LATIN SMALL LETTER R
u's' # 0x0073 -> LATIN SMALL LETTER S
u't' # 0x0074 -> LATIN SMALL LETTER T
u'u' # 0x0075 -> LATIN SMALL LETTER U
u'v' # 0x0076 -> LATIN SMALL LETTER V
u'w' # 0x0077 -> LATIN SMALL LETTER W
u'x' # 0x0078 -> LATIN SMALL LETTER X
u'y' # 0x0079 -> LATIN SMALL LETTER Y
u'z' # 0x007a -> LATIN SMALL LETTER Z
u'{' # 0x007b -> LEFT CURLY BRACKET
u'|' # 0x007c -> VERTICAL LINE
u'}' # 0x007d -> RIGHT CURLY BRACKET
u'~' # 0x007e -> TILDE
u'\x7f' # 0x007f -> DELETE
u'\u0106' # 0x0080 -> LATIN CAPITAL LETTER C WITH ACUTE
u'\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS
u'\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE
u'\u0101' # 0x0083 -> LATIN SMALL LETTER A WITH MACRON
u'\xe4' # 0x0084 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\u0123' # 0x0085 -> LATIN SMALL LETTER G WITH CEDILLA
u'\xe5' # 0x0086 -> LATIN SMALL LETTER A WITH RING ABOVE
u'\u0107' # 0x0087 -> LATIN SMALL LETTER C WITH ACUTE
u'\u0142' # 0x0088 -> LATIN SMALL LETTER L WITH STROKE
u'\u0113' # 0x0089 -> LATIN SMALL LETTER E WITH MACRON
u'\u0156' # 0x008a -> LATIN CAPITAL LETTER R WITH CEDILLA
u'\u0157' # 0x008b -> LATIN SMALL LETTER R WITH CEDILLA
u'\u012b' # 0x008c -> LATIN SMALL LETTER I WITH MACRON
u'\u0179' # 0x008d -> LATIN CAPITAL LETTER Z WITH ACUTE
u'\xc4' # 0x008e -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc5' # 0x008f -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xe6' # 0x0091 -> LATIN SMALL LIGATURE AE
u'\xc6' # 0x0092 -> LATIN CAPITAL LIGATURE AE
u'\u014d' # 0x0093 -> LATIN SMALL LETTER O WITH MACRON
u'\xf6' # 0x0094 -> LATIN SMALL LETTER O WITH DIAERESIS
u'\u0122' # 0x0095 -> LATIN CAPITAL LETTER G WITH CEDILLA
u'\xa2' # 0x0096 -> CENT SIGN
u'\u015a' # 0x0097 -> LATIN CAPITAL LETTER S WITH ACUTE
u'\u015b' # 0x0098 -> LATIN SMALL LETTER S WITH ACUTE
u'\xd6' # 0x0099 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xf8' # 0x009b -> LATIN SMALL LETTER O WITH STROKE
u'\xa3' # 0x009c -> POUND SIGN
u'\xd8' # 0x009d -> LATIN CAPITAL LETTER O WITH STROKE
u'\xd7' # 0x009e -> MULTIPLICATION SIGN
u'\xa4' # 0x009f -> CURRENCY SIGN
u'\u0100' # 0x00a0 -> LATIN CAPITAL LETTER A WITH MACRON
u'\u012a' # 0x00a1 -> LATIN CAPITAL LETTER I WITH MACRON
u'\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
u'\u017b' # 0x00a3 -> LATIN CAPITAL LETTER Z WITH DOT ABOVE
u'\u017c' # 0x00a4 -> LATIN SMALL LETTER Z WITH DOT ABOVE
u'\u017a' # 0x00a5 -> LATIN SMALL LETTER Z WITH ACUTE
u'\u201d' # 0x00a6 -> RIGHT DOUBLE QUOTATION MARK
u'\xa6' # 0x00a7 -> BROKEN BAR
u'\xa9' # 0x00a8 -> COPYRIGHT SIGN
u'\xae' # 0x00a9 -> REGISTERED SIGN
u'\xac' # 0x00aa -> NOT SIGN
u'\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF
u'\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER
u'\u0141' # 0x00ad -> LATIN CAPITAL LETTER L WITH STROKE
u'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u2591' # 0x00b0 -> LIGHT SHADE
u'\u2592' # 0x00b1 -> MEDIUM SHADE
u'\u2593' # 0x00b2 -> DARK SHADE
u'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
u'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
u'\u0104' # 0x00b5 -> LATIN CAPITAL LETTER A WITH OGONEK
u'\u010c' # 0x00b6 -> LATIN CAPITAL LETTER C WITH CARON
u'\u0118' # 0x00b7 -> LATIN CAPITAL LETTER E WITH OGONEK
u'\u0116' # 0x00b8 -> LATIN CAPITAL LETTER E WITH DOT ABOVE
u'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
u'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
u'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
u'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
u'\u012e' # 0x00bd -> LATIN CAPITAL LETTER I WITH OGONEK
u'\u0160' # 0x00be -> LATIN CAPITAL LETTER S WITH CARON
u'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
u'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
u'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
u'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
u'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
u'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
u'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
u'\u0172' # 0x00c6 -> LATIN CAPITAL LETTER U WITH OGONEK
u'\u016a' # 0x00c7 -> LATIN CAPITAL LETTER U WITH MACRON
u'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
u'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
u'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
u'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
u'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
u'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
u'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
u'\u017d' # 0x00cf -> LATIN CAPITAL LETTER Z WITH CARON
u'\u0105' # 0x00d0 -> LATIN SMALL LETTER A WITH OGONEK
u'\u010d' # 0x00d1 -> LATIN SMALL LETTER C WITH CARON
u'\u0119' # 0x00d2 -> LATIN SMALL LETTER E WITH OGONEK
u'\u0117' # 0x00d3 -> LATIN SMALL LETTER E WITH DOT ABOVE
u'\u012f' # 0x00d4 -> LATIN SMALL LETTER I WITH OGONEK
u'\u0161' # 0x00d5 -> LATIN SMALL LETTER S WITH CARON
u'\u0173' # 0x00d6 -> LATIN SMALL LETTER U WITH OGONEK
u'\u016b' # 0x00d7 -> LATIN SMALL LETTER U WITH MACRON
u'\u017e' # 0x00d8 -> LATIN SMALL LETTER Z WITH CARON
u'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
u'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
u'\u2588' # 0x00db -> FULL BLOCK
u'\u2584' # 0x00dc -> LOWER HALF BLOCK
u'\u258c' # 0x00dd -> LEFT HALF BLOCK
u'\u2590' # 0x00de -> RIGHT HALF BLOCK
u'\u2580' # 0x00df -> UPPER HALF BLOCK
u'\xd3' # 0x00e0 -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S (GERMAN)
u'\u014c' # 0x00e2 -> LATIN CAPITAL LETTER O WITH MACRON
u'\u0143' # 0x00e3 -> LATIN CAPITAL LETTER N WITH ACUTE
u'\xf5' # 0x00e4 -> LATIN SMALL LETTER O WITH TILDE
u'\xd5' # 0x00e5 -> LATIN CAPITAL LETTER O WITH TILDE
u'\xb5' # 0x00e6 -> MICRO SIGN
u'\u0144' # 0x00e7 -> LATIN SMALL LETTER N WITH ACUTE
u'\u0136' # 0x00e8 -> LATIN CAPITAL LETTER K WITH CEDILLA
u'\u0137' # 0x00e9 -> LATIN SMALL LETTER K WITH CEDILLA
u'\u013b' # 0x00ea -> LATIN CAPITAL LETTER L WITH CEDILLA
u'\u013c' # 0x00eb -> LATIN SMALL LETTER L WITH CEDILLA
u'\u0146' # 0x00ec -> LATIN SMALL LETTER N WITH CEDILLA
u'\u0112' # 0x00ed -> LATIN CAPITAL LETTER E WITH MACRON
u'\u0145' # 0x00ee -> LATIN CAPITAL LETTER N WITH CEDILLA
u'\u2019' # 0x00ef -> RIGHT SINGLE QUOTATION MARK
u'\xad' # 0x00f0 -> SOFT HYPHEN
u'\xb1' # 0x00f1 -> PLUS-MINUS SIGN
u'\u201c' # 0x00f2 -> LEFT DOUBLE QUOTATION MARK
u'\xbe' # 0x00f3 -> VULGAR FRACTION THREE QUARTERS
u'\xb6' # 0x00f4 -> PILCROW SIGN
u'\xa7' # 0x00f5 -> SECTION SIGN
u'\xf7' # 0x00f6 -> DIVISION SIGN
u'\u201e' # 0x00f7 -> DOUBLE LOW-9 QUOTATION MARK
u'\xb0' # 0x00f8 -> DEGREE SIGN
u'\u2219' # 0x00f9 -> BULLET OPERATOR
u'\xb7' # 0x00fa -> MIDDLE DOT
u'\xb9' # 0x00fb -> SUPERSCRIPT ONE
u'\xb3' # 0x00fc -> SUPERSCRIPT THREE
u'\xb2' # 0x00fd -> SUPERSCRIPT TWO
u'\u25a0' # 0x00fe -> BLACK SQUARE
u'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a2: 0x0096, # CENT SIGN
0x00a3: 0x009c, # POUND SIGN
0x00a4: 0x009f, # CURRENCY SIGN
0x00a6: 0x00a7, # BROKEN BAR
0x00a7: 0x00f5, # SECTION SIGN
0x00a9: 0x00a8, # COPYRIGHT SIGN
0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00ac: 0x00aa, # NOT SIGN
0x00ad: 0x00f0, # SOFT HYPHEN
0x00ae: 0x00a9, # REGISTERED SIGN
0x00b0: 0x00f8, # DEGREE SIGN
0x00b1: 0x00f1, # PLUS-MINUS SIGN
0x00b2: 0x00fd, # SUPERSCRIPT TWO
0x00b3: 0x00fc, # SUPERSCRIPT THREE
0x00b5: 0x00e6, # MICRO SIGN
0x00b6: 0x00f4, # PILCROW SIGN
0x00b7: 0x00fa, # MIDDLE DOT
0x00b9: 0x00fb, # SUPERSCRIPT ONE
0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER
0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF
0x00be: 0x00f3, # VULGAR FRACTION THREE QUARTERS
0x00c4: 0x008e, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x00c5: 0x008f, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x00c6: 0x0092, # LATIN CAPITAL LIGATURE AE
0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE
0x00d3: 0x00e0, # LATIN CAPITAL LETTER O WITH ACUTE
0x00d5: 0x00e5, # LATIN CAPITAL LETTER O WITH TILDE
0x00d6: 0x0099, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x00d7: 0x009e, # MULTIPLICATION SIGN
0x00d8: 0x009d, # LATIN CAPITAL LETTER O WITH STROKE
0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S (GERMAN)
0x00e4: 0x0084, # LATIN SMALL LETTER A WITH DIAERESIS
0x00e5: 0x0086, # LATIN SMALL LETTER A WITH RING ABOVE
0x00e6: 0x0091, # LATIN SMALL LIGATURE AE
0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE
0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
0x00f5: 0x00e4, # LATIN SMALL LETTER O WITH TILDE
0x00f6: 0x0094, # LATIN SMALL LETTER O WITH DIAERESIS
0x00f7: 0x00f6, # DIVISION SIGN
0x00f8: 0x009b, # LATIN SMALL LETTER O WITH STROKE
0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS
0x0100: 0x00a0, # LATIN CAPITAL LETTER A WITH MACRON
0x0101: 0x0083, # LATIN SMALL LETTER A WITH MACRON
0x0104: 0x00b5, # LATIN CAPITAL LETTER A WITH OGONEK
0x0105: 0x00d0, # LATIN SMALL LETTER A WITH OGONEK
0x0106: 0x0080, # LATIN CAPITAL LETTER C WITH ACUTE
0x0107: 0x0087, # LATIN SMALL LETTER C WITH ACUTE
0x010c: 0x00b6, # LATIN CAPITAL LETTER C WITH CARON
0x010d: 0x00d1, # LATIN SMALL LETTER C WITH CARON
0x0112: 0x00ed, # LATIN CAPITAL LETTER E WITH MACRON
0x0113: 0x0089, # LATIN SMALL LETTER E WITH MACRON
0x0116: 0x00b8, # LATIN CAPITAL LETTER E WITH DOT ABOVE
0x0117: 0x00d3, # LATIN SMALL LETTER E WITH DOT ABOVE
0x0118: 0x00b7, # LATIN CAPITAL LETTER E WITH OGONEK
0x0119: 0x00d2, # LATIN SMALL LETTER E WITH OGONEK
0x0122: 0x0095, # LATIN CAPITAL LETTER G WITH CEDILLA
0x0123: 0x0085, # LATIN SMALL LETTER G WITH CEDILLA
0x012a: 0x00a1, # LATIN CAPITAL LETTER I WITH MACRON
0x012b: 0x008c, # LATIN SMALL LETTER I WITH MACRON
0x012e: 0x00bd, # LATIN CAPITAL LETTER I WITH OGONEK
0x012f: 0x00d4, # LATIN SMALL LETTER I WITH OGONEK
0x0136: 0x00e8, # LATIN CAPITAL LETTER K WITH CEDILLA
0x0137: 0x00e9, # LATIN SMALL LETTER K WITH CEDILLA
0x013b: 0x00ea, # LATIN CAPITAL LETTER L WITH CEDILLA
0x013c: 0x00eb, # LATIN SMALL LETTER L WITH CEDILLA
0x0141: 0x00ad, # LATIN CAPITAL LETTER L WITH STROKE
0x0142: 0x0088, # LATIN SMALL LETTER L WITH STROKE
0x0143: 0x00e3, # LATIN CAPITAL LETTER N WITH ACUTE
0x0144: 0x00e7, # LATIN SMALL LETTER N WITH ACUTE
0x0145: 0x00ee, # LATIN CAPITAL LETTER N WITH CEDILLA
0x0146: 0x00ec, # LATIN SMALL LETTER N WITH CEDILLA
0x014c: 0x00e2, # LATIN CAPITAL LETTER O WITH MACRON
0x014d: 0x0093, # LATIN SMALL LETTER O WITH MACRON
0x0156: 0x008a, # LATIN CAPITAL LETTER R WITH CEDILLA
0x0157: 0x008b, # LATIN SMALL LETTER R WITH CEDILLA
0x015a: 0x0097, # LATIN CAPITAL LETTER S WITH ACUTE
0x015b: 0x0098, # LATIN SMALL LETTER S WITH ACUTE
0x0160: 0x00be, # LATIN CAPITAL LETTER S WITH CARON
0x0161: 0x00d5, # LATIN SMALL LETTER S WITH CARON
0x016a: 0x00c7, # LATIN CAPITAL LETTER U WITH MACRON
0x016b: 0x00d7, # LATIN SMALL LETTER U WITH MACRON
0x0172: 0x00c6, # LATIN CAPITAL LETTER U WITH OGONEK
0x0173: 0x00d6, # LATIN SMALL LETTER U WITH OGONEK
0x0179: 0x008d, # LATIN CAPITAL LETTER Z WITH ACUTE
0x017a: 0x00a5, # LATIN SMALL LETTER Z WITH ACUTE
0x017b: 0x00a3, # LATIN CAPITAL LETTER Z WITH DOT ABOVE
0x017c: 0x00a4, # LATIN SMALL LETTER Z WITH DOT ABOVE
0x017d: 0x00cf, # LATIN CAPITAL LETTER Z WITH CARON
0x017e: 0x00d8, # LATIN SMALL LETTER Z WITH CARON
0x2019: 0x00ef, # RIGHT SINGLE QUOTATION MARK
0x201c: 0x00f2, # LEFT DOUBLE QUOTATION MARK
0x201d: 0x00a6, # RIGHT DOUBLE QUOTATION MARK
0x201e: 0x00f7, # DOUBLE LOW-9 QUOTATION MARK
0x2219: 0x00f9, # BULLET OPERATOR
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x258c: 0x00dd, # LEFT HALF BLOCK
0x2590: 0x00de, # RIGHT HALF BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
| apache-2.0 |
JSkelly/Toast | examples/sprite/sprite_test.py | 2 | 1043 | from toast.scene_graph import Scene
from toast.image_sheet import ImageSheet
from toast.sprite import Sprite
from toast.resource_loader import ResourceLoader
from toast.animation import Animation
from examples.demo_game import DemoGame
class NewScene(Scene):
def __init__(self):
super(NewScene, self).__init__()
dimension = (32, 37)
data = ResourceLoader.load('data//player_run.png')
sheet = ImageSheet(data, dimension)
interval = 80
run = [(sheet[i], interval) for i in range(8)]
for i in range(8):
animation = Animation('run_cycle', run)
animation.play('run_cycle', i)
staticSprite = Sprite(sheet[i])
staticSprite.position = (i + 1) * 32 + 16, 84
self.add(staticSprite)
animatedSprite = Sprite(animation)
animatedSprite.position = (i + 1) * 32 + 16, 148
self.add(animatedSprite)
game = DemoGame((640, 480), NewScene)
game.run()
| mit |
NunoEdgarGub1/scikit-learn | examples/classification/plot_digits_classification.py | 289 | 2397 | """
================================
Recognizing hand-written digits
================================
An example showing how the scikit-learn can be used to recognize images of
hand-written digits.
This example is commented in the
:ref:`tutorial section of the user manual <introduction>`.
"""
print(__doc__)
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# License: BSD 3 clause
# Standard scientific Python imports
import matplotlib.pyplot as plt
# Import datasets, classifiers and performance metrics
from sklearn import datasets, svm, metrics
# The digits dataset
digits = datasets.load_digits()
# The data that we are interested in is made of 8x8 images of digits, let's
# have a look at the first 3 images, stored in the `images` attribute of the
# dataset. If we were working from image files, we could load them using
# pylab.imread. Note that each image must have the same size. For these
# images, we know which digit they represent: it is given in the 'target' of
# the dataset.
images_and_labels = list(zip(digits.images, digits.target))
for index, (image, label) in enumerate(images_and_labels[:4]):
plt.subplot(2, 4, index + 1)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
plt.title('Training: %i' % label)
# To apply a classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.images)
data = digits.images.reshape((n_samples, -1))
# Create a classifier: a support vector classifier
classifier = svm.SVC(gamma=0.001)
# We learn the digits on the first half of the digits
classifier.fit(data[:n_samples / 2], digits.target[:n_samples / 2])
# Now predict the value of the digit on the second half:
expected = digits.target[n_samples / 2:]
predicted = classifier.predict(data[n_samples / 2:])
print("Classification report for classifier %s:\n%s\n"
% (classifier, metrics.classification_report(expected, predicted)))
print("Confusion matrix:\n%s" % metrics.confusion_matrix(expected, predicted))
images_and_predictions = list(zip(digits.images[n_samples / 2:], predicted))
for index, (image, prediction) in enumerate(images_and_predictions[:4]):
plt.subplot(2, 4, index + 5)
plt.axis('off')
plt.imshow(image, cmap=plt.cm.gray_r, interpolation='nearest')
plt.title('Prediction: %i' % prediction)
plt.show()
| bsd-3-clause |
schambers/civmarket | civmarket/lib/python2.7/site-packages/django/conf/locale/sk/formats.py | 118 | 1173 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j. F Y'
TIME_FORMAT = 'G:i:s'
DATETIME_FORMAT = 'j. F Y G:i:s'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'd.m.Y'
SHORT_DATETIME_FORMAT = 'd.m.Y G:i:s'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d.%m.%Y', '%d.%m.%y', # '25.10.2006', '25.10.06'
'%y-%m-%d', # '06-10-25'
# '%d. %B %Y', '%d. %b. %Y', # '25. October 2006', '25. Oct. 2006'
)
DATETIME_INPUT_FORMATS = (
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M:%S.%f', # '25.10.2006 14:30:59.000200'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '\xa0' # non-breaking space
NUMBER_GROUPING = 3
| apache-2.0 |
timevortexproject/timevortex | timevortex/utils/timeserieslogger.py | 1 | 1057 | #!/usr/bin/python3
# -*- coding: utf8 -*-
# -*- Mode: Python; py-indent-offset: 4 -*-
"""TSL functions"""
from timevortex.utils.globals import ERROR_TIMESERIES_NOT_DEFINED
KEY_TSL_BAD_JSON = "ts_without_json_message"
KEY_TSL_NO_SITE_ID = "ts_without_site_id"
KEY_TSL_NO_VARIABLE_ID = "ts_without_variable_id"
KEY_TSL_NO_VALUE = "ts_without_message"
KEY_TSL_NO_DATE = "ts_without_date"
KEY_TSL_NO_DST_TIMEZONE = "ts_without_dst_timezone"
KEY_TSL_NO_NON_DST_TIMEZONE = "ts_without_non_dst_timezone"
INCORRECT_MESSAGE = "Receive incorrect message => %s"
ERROR_TSL = {
KEY_TSL_BAD_JSON: ERROR_TIMESERIES_NOT_DEFINED,
KEY_TSL_NO_SITE_ID: INCORRECT_MESSAGE % "missing siteID in %s",
KEY_TSL_NO_VARIABLE_ID: INCORRECT_MESSAGE % "missing variableID in %s",
KEY_TSL_NO_VALUE: INCORRECT_MESSAGE % "missing value in %s",
KEY_TSL_NO_DATE: INCORRECT_MESSAGE % "missing date in %s",
KEY_TSL_NO_DST_TIMEZONE: INCORRECT_MESSAGE % "missing dstTimezone in %s",
KEY_TSL_NO_NON_DST_TIMEZONE: INCORRECT_MESSAGE % "missing nonDstTimezone in %s",
}
| mit |
openstack/tempest | tempest/tests/fake_tempest_plugin.py | 7 | 1674 | # Copyright (c) 2015 Deutsche Telekom AG
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.test_discover import plugins
class FakePlugin(plugins.TempestPlugin):
expected_load_test = ["my/test/path", "/home/dir"]
expected_service_clients = [{'foo': 'bar'}]
def load_tests(self):
return self.expected_load_test
def register_opts(self, conf):
return
def get_opt_lists(self):
return []
def get_service_clients(self):
return self.expected_service_clients
class FakeStevedoreObj(object):
obj = FakePlugin()
@property
def name(self):
return self._name
def __init__(self, name='Test1'):
self._name = name
class FakePluginNoServiceClients(plugins.TempestPlugin):
def load_tests(self):
return []
def register_opts(self, conf):
return
def get_opt_lists(self):
return []
class FakeStevedoreObjNoServiceClients(object):
obj = FakePluginNoServiceClients()
@property
def name(self):
return self._name
def __init__(self, name='Test2'):
self._name = name
| apache-2.0 |
fos/fos-legacy | fos/actor/odfslicer.py | 1 | 2054 | import numpy as np
class ODF_Slice(object):
def __init__(self,odfs,vertices,faces,noiso,batch,group=None):
J=0
self.odfs_no=J
self.vertex_list=(odfs.shape[0]*odfs.shape[1])*[None]
for index in np.ndindex(odfs.shape[:2]):
values=odfs[index]
if noiso:
values=np.interp(values,[values.min(),values.max()],[0,.5])
inds=faces.ravel().tolist()
shift=index+(0,)
print J,odfs.shape[0]*odfs.shape[1]
points=np.dot(np.diag(values),vertices)
points=points+np.array(shift)
verx=points.ravel().tolist()
normals=np.zeros((len(vertices),3))
ones_=np.ones(len(values))
colors=np.vstack((values,ones_,ones_)).T
colors=colors.ravel().tolist()
p=vertices
l=faces
trinormals=np.cross(p[l[:,0]]-p[l[:,1]],\
p[l[:,1]]-p[l[:,2]],\
axisa=1,axisb=1)
for (i,lp) in enumerate(faces):
normals[lp]+=trinormals[i]
div=np.sqrt(np.sum(normals**2,axis=1))
div=div.reshape(len(div),1)
normals=(normals/div)
norms=np.array(normals).ravel().tolist()
self.vertex_list[i] = batch.add_indexed(len(vertices),\
GL_TRIANGLES,\
group,\
inds,\
('v3d/static',verx),\
('n3d/static',norms),\
('c3d/static',colors))
J+=1
def update(self):
pass
def delete(self):
for i in range(self.odfs_no):
self.vertex_list.delete()
| bsd-3-clause |
isnnn/Sick-Beard-TPB | lib/requests/packages/charade/sbcsgroupprober.py | 2936 | 3291 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .charsetgroupprober import CharSetGroupProber
from .sbcharsetprober import SingleByteCharSetProber
from .langcyrillicmodel import (Win1251CyrillicModel, Koi8rModel,
Latin5CyrillicModel, MacCyrillicModel,
Ibm866Model, Ibm855Model)
from .langgreekmodel import Latin7GreekModel, Win1253GreekModel
from .langbulgarianmodel import Latin5BulgarianModel, Win1251BulgarianModel
from .langhungarianmodel import Latin2HungarianModel, Win1250HungarianModel
from .langthaimodel import TIS620ThaiModel
from .langhebrewmodel import Win1255HebrewModel
from .hebrewprober import HebrewProber
class SBCSGroupProber(CharSetGroupProber):
def __init__(self):
CharSetGroupProber.__init__(self)
self._mProbers = [
SingleByteCharSetProber(Win1251CyrillicModel),
SingleByteCharSetProber(Koi8rModel),
SingleByteCharSetProber(Latin5CyrillicModel),
SingleByteCharSetProber(MacCyrillicModel),
SingleByteCharSetProber(Ibm866Model),
SingleByteCharSetProber(Ibm855Model),
SingleByteCharSetProber(Latin7GreekModel),
SingleByteCharSetProber(Win1253GreekModel),
SingleByteCharSetProber(Latin5BulgarianModel),
SingleByteCharSetProber(Win1251BulgarianModel),
SingleByteCharSetProber(Latin2HungarianModel),
SingleByteCharSetProber(Win1250HungarianModel),
SingleByteCharSetProber(TIS620ThaiModel),
]
hebrewProber = HebrewProber()
logicalHebrewProber = SingleByteCharSetProber(Win1255HebrewModel,
False, hebrewProber)
visualHebrewProber = SingleByteCharSetProber(Win1255HebrewModel, True,
hebrewProber)
hebrewProber.set_model_probers(logicalHebrewProber, visualHebrewProber)
self._mProbers.extend([hebrewProber, logicalHebrewProber,
visualHebrewProber])
self.reset()
| gpl-3.0 |
hojel/calibre | src/calibre/library/sqlite.py | 19 | 11844 | from __future__ import with_statement
__license__ = 'GPL v3'
__copyright__ = '2008, Kovid Goyal kovid@kovidgoyal.net'
__docformat__ = 'restructuredtext en'
'''
Wrapper for multi-threaded access to a single sqlite database connection. Serializes
all calls.
'''
import sqlite3 as sqlite, traceback, time, uuid, sys, os
import repr as reprlib
from sqlite3 import IntegrityError, OperationalError
from threading import Thread
from Queue import Queue
from threading import RLock
from datetime import datetime
from functools import partial
from calibre.ebooks.metadata import title_sort, author_to_author_sort
from calibre.utils.date import parse_date, isoformat, local_tz, UNDEFINED_DATE
from calibre import isbytestring, force_unicode
from calibre.constants import iswindows, DEBUG, plugins
from calibre.utils.icu import sort_key
from calibre import prints
from dateutil.tz import tzoffset
global_lock = RLock()
_c_speedup = plugins['speedup'][0]
def _c_convert_timestamp(val):
if not val:
return None
try:
ret = _c_speedup.parse_date(val.strip())
except:
ret = None
if ret is None:
return parse_date(val, as_utc=False)
year, month, day, hour, minutes, seconds, tzsecs = ret
try:
return datetime(year, month, day, hour, minutes, seconds,
tzinfo=tzoffset(None, tzsecs)).astimezone(local_tz)
except OverflowError:
return UNDEFINED_DATE.astimezone(local_tz)
def _py_convert_timestamp(val):
if val:
tzsecs = 0
try:
sign = {'+':1, '-':-1}.get(val[-6], None)
if sign is not None:
tzsecs = 60*((int(val[-5:-3])*60 + int(val[-2:])) * sign)
year = int(val[0:4])
month = int(val[5:7])
day = int(val[8:10])
hour = int(val[11:13])
min = int(val[14:16])
sec = int(val[17:19])
return datetime(year, month, day, hour, min, sec,
tzinfo=tzoffset(None, tzsecs))
except:
pass
return parse_date(val, as_utc=False)
return None
convert_timestamp = _py_convert_timestamp if _c_speedup is None else \
_c_convert_timestamp
def adapt_datetime(dt):
return isoformat(dt, sep=' ')
sqlite.register_adapter(datetime, adapt_datetime)
sqlite.register_converter('timestamp', convert_timestamp)
def convert_bool(val):
return val != '0'
sqlite.register_adapter(bool, lambda x : 1 if x else 0)
sqlite.register_converter('bool', convert_bool)
sqlite.register_converter('BOOL', convert_bool)
class DynamicFilter(object):
def __init__(self, name):
self.name = name
self.ids = frozenset([])
def __call__(self, id_):
return int(id_ in self.ids)
def change(self, ids):
self.ids = frozenset(ids)
class Concatenate(object):
'''String concatenation aggregator for sqlite'''
def __init__(self, sep=','):
self.sep = sep
self.ans = []
def step(self, value):
if value is not None:
self.ans.append(value)
def finalize(self):
if not self.ans:
return None
return self.sep.join(self.ans)
class SortedConcatenate(object):
'''String concatenation aggregator for sqlite, sorted by supplied index'''
sep = ','
def __init__(self):
self.ans = {}
def step(self, ndx, value):
if value is not None:
self.ans[ndx] = value
def finalize(self):
if len(self.ans) == 0:
return None
return self.sep.join(map(self.ans.get, sorted(self.ans.keys())))
class SortedConcatenateBar(SortedConcatenate):
sep = '|'
class SortedConcatenateAmper(SortedConcatenate):
sep = '&'
class IdentifiersConcat(object):
'''String concatenation aggregator for the identifiers map'''
def __init__(self):
self.ans = []
def step(self, key, val):
self.ans.append(u'%s:%s'%(key, val))
def finalize(self):
return ','.join(self.ans)
class AumSortedConcatenate(object):
'''String concatenation aggregator for the author sort map'''
def __init__(self):
self.ans = {}
def step(self, ndx, author, sort, link):
if author is not None:
self.ans[ndx] = ':::'.join((author, sort, link))
def finalize(self):
keys = self.ans.keys()
l = len(keys)
if l == 0:
return None
if l == 1:
return self.ans[keys[0]]
return ':#:'.join([self.ans[v] for v in sorted(keys)])
class Connection(sqlite.Connection):
def get(self, *args, **kw):
ans = self.execute(*args)
if not kw.get('all', True):
ans = ans.fetchone()
if not ans:
ans = [None]
return ans[0]
return ans.fetchall()
def _author_to_author_sort(x):
if not x: return ''
return author_to_author_sort(x.replace('|', ','))
def pynocase(one, two, encoding='utf-8'):
if isbytestring(one):
try:
one = one.decode(encoding, 'replace')
except:
pass
if isbytestring(two):
try:
two = two.decode(encoding, 'replace')
except:
pass
return cmp(one.lower(), two.lower())
def icu_collator(s1, s2):
return cmp(sort_key(force_unicode(s1, 'utf-8')),
sort_key(force_unicode(s2, 'utf-8')))
def load_c_extensions(conn, debug=DEBUG):
try:
conn.enable_load_extension(True)
ext_path = os.path.join(sys.extensions_location, 'sqlite_custom.'+
('pyd' if iswindows else 'so'))
conn.load_extension(ext_path)
conn.enable_load_extension(False)
return True
except Exception as e:
if debug:
print 'Failed to load high performance sqlite C extension'
print e
return False
def do_connect(path, row_factory=None):
conn = sqlite.connect(path, factory=Connection,
detect_types=sqlite.PARSE_DECLTYPES|sqlite.PARSE_COLNAMES)
conn.execute('pragma cache_size=5000')
encoding = conn.execute('pragma encoding').fetchone()[0]
conn.create_aggregate('sortconcat', 2, SortedConcatenate)
conn.create_aggregate('sortconcat_bar', 2, SortedConcatenateBar)
conn.create_aggregate('sortconcat_amper', 2, SortedConcatenateAmper)
conn.create_aggregate('identifiers_concat', 2, IdentifiersConcat)
load_c_extensions(conn)
conn.row_factory = sqlite.Row if row_factory else (lambda cursor, row : list(row))
conn.create_aggregate('concat', 1, Concatenate)
conn.create_aggregate('aum_sortconcat', 4, AumSortedConcatenate)
conn.create_collation('PYNOCASE', partial(pynocase,
encoding=encoding))
conn.create_function('title_sort', 1, title_sort)
conn.create_function('author_to_author_sort', 1,
_author_to_author_sort)
conn.create_function('uuid4', 0, lambda : str(uuid.uuid4()))
# Dummy functions for dynamically created filters
conn.create_function('books_list_filter', 1, lambda x: 1)
conn.create_collation('icucollate', icu_collator)
return conn
class DBThread(Thread):
CLOSE = '-------close---------'
def __init__(self, path, row_factory):
Thread.__init__(self)
self.setDaemon(True)
self.path = path
self.unhandled_error = (None, '')
self.row_factory = row_factory
self.requests = Queue(1)
self.results = Queue(1)
self.conn = None
def connect(self):
self.conn = do_connect(self.path, self.row_factory)
def run(self):
try:
self.connect()
while True:
func, args, kwargs = self.requests.get()
if func == self.CLOSE:
self.conn.close()
break
if func == 'dump':
try:
ok, res = True, tuple(self.conn.iterdump())
except Exception as err:
ok, res = False, (err, traceback.format_exc())
elif func == 'create_dynamic_filter':
try:
f = DynamicFilter(args[0])
self.conn.create_function(args[0], 1, f)
ok, res = True, f
except Exception as err:
ok, res = False, (err, traceback.format_exc())
else:
bfunc = getattr(self.conn, func)
try:
for i in range(3):
try:
ok, res = True, bfunc(*args, **kwargs)
break
except OperationalError as err:
# Retry if unable to open db file
e = str(err)
if 'unable to open' not in e or i == 2:
if 'unable to open' in e:
prints('Unable to open database for func',
func, reprlib.repr(args),
reprlib.repr(kwargs))
raise
time.sleep(0.5)
except Exception as err:
ok, res = False, (err, traceback.format_exc())
self.results.put((ok, res))
except Exception as err:
self.unhandled_error = (err, traceback.format_exc())
class DatabaseException(Exception):
def __init__(self, err, tb):
tb = '\n\t'.join(('\tRemote'+tb).splitlines())
try:
msg = unicode(err) +'\n' + tb
except:
msg = repr(err) + '\n' + tb
Exception.__init__(self, msg)
self.orig_err = err
self.orig_tb = tb
def proxy(fn):
''' Decorator to call methods on the database connection in the proxy thread '''
def run(self, *args, **kwargs):
if self.closed:
raise DatabaseException('Connection closed', '')
with global_lock:
if self.proxy.unhandled_error[0] is not None:
raise DatabaseException(*self.proxy.unhandled_error)
self.proxy.requests.put((fn.__name__, args, kwargs))
ok, res = self.proxy.results.get()
if not ok:
if isinstance(res[0], IntegrityError):
raise IntegrityError(unicode(res[0]))
raise DatabaseException(*res)
return res
return run
class ConnectionProxy(object):
def __init__(self, proxy):
self.proxy = proxy
self.closed = False
def close(self):
if self.proxy.unhandled_error[0] is None:
self.proxy.requests.put((self.proxy.CLOSE, [], {}))
self.closed = True
@proxy
def get(self, query, all=True): pass
@proxy
def commit(self): pass
@proxy
def execute(self): pass
@proxy
def executemany(self): pass
@proxy
def executescript(self): pass
@proxy
def create_aggregate(self): pass
@proxy
def create_function(self): pass
@proxy
def cursor(self): pass
@proxy
def dump(self): pass
@proxy
def create_dynamic_filter(self): pass
def connect(dbpath, row_factory=None):
conn = ConnectionProxy(DBThread(dbpath, row_factory))
conn.proxy.start()
while conn.proxy.unhandled_error[0] is None and conn.proxy.conn is None:
time.sleep(0.01)
if conn.proxy.unhandled_error[0] is not None:
raise DatabaseException(*conn.proxy.unhandled_error)
return conn
def test():
c = sqlite.connect(':memory:')
if load_c_extensions(c, True):
print 'Loaded C extension successfully'
| gpl-3.0 |
skevy/django | django/contrib/contenttypes/management.py | 315 | 2458 | from django.contrib.contenttypes.models import ContentType
from django.db.models import get_apps, get_models, signals
from django.utils.encoding import smart_unicode
def update_contenttypes(app, created_models, verbosity=2, **kwargs):
"""
Creates content types for models in the given app, removing any model
entries that no longer have a matching model class.
"""
ContentType.objects.clear_cache()
content_types = list(ContentType.objects.filter(app_label=app.__name__.split('.')[-2]))
app_models = get_models(app)
if not app_models:
return
for klass in app_models:
opts = klass._meta
try:
ct = ContentType.objects.get(app_label=opts.app_label,
model=opts.object_name.lower())
content_types.remove(ct)
except ContentType.DoesNotExist:
ct = ContentType(name=smart_unicode(opts.verbose_name_raw),
app_label=opts.app_label, model=opts.object_name.lower())
ct.save()
if verbosity >= 2:
print "Adding content type '%s | %s'" % (ct.app_label, ct.model)
# The presence of any remaining content types means the supplied app has an
# undefined model. Confirm that the content type is stale before deletion.
if content_types:
if kwargs.get('interactive', False):
content_type_display = '\n'.join([' %s | %s' % (ct.app_label, ct.model) for ct in content_types])
ok_to_delete = raw_input("""The following content types are stale and need to be deleted:
%s
Any objects related to these content types by a foreign key will also
be deleted. Are you sure you want to delete these content types?
If you're unsure, answer 'no'.
Type 'yes' to continue, or 'no' to cancel: """ % content_type_display)
else:
ok_to_delete = False
if ok_to_delete == 'yes':
for ct in content_types:
if verbosity >= 2:
print "Deleting stale content type '%s | %s'" % (ct.app_label, ct.model)
ct.delete()
else:
if verbosity >= 2:
print "Stale content types remain."
def update_all_contenttypes(verbosity=2, **kwargs):
for app in get_apps():
update_contenttypes(app, None, verbosity, **kwargs)
signals.post_syncdb.connect(update_contenttypes)
if __name__ == "__main__":
update_all_contenttypes()
| bsd-3-clause |
kkoksvik/FreeCAD | src/Mod/Start/StartPage/StartPage.py | 2 | 26929 | #***************************************************************************
#* *
#* Copyright (c) 2012 *
#* Yorik van Havre <yorik@uncreated.net> *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#***************************************************************************
# This is the start page template
import os,FreeCAD,FreeCADGui,tempfile,time,zipfile,urllib,re,cStringIO
from PySide import QtGui
from xml.etree.ElementTree import parse
FreeCADGui.addLanguagePath(":/translations")
FreeCADGui.updateLocale()
def translate(context,text):
"convenience function for the Qt translator"
# return str(QtGui.QApplication.translate(context, text, None, QtGui.QApplication.UnicodeUTF8).toUtf8())
u = QtGui.QApplication.translate(context, text, None,
QtGui.QApplication.UnicodeUTF8).encode("utf8")
s = cStringIO.StringIO()
for i in u:
if ord(i) == 39:
s.write("\\'")
else:
s.write(i)
t = s.getvalue()
s.close()
return t
# texts to be translated
text01 = translate("StartPage","FreeCAD Start Center")
text02 = translate("StartPage","Start a new project")
text03 = translate("StartPage","Recent Files")
text04 = translate("StartPage","Latest videos")
text05 = translate("StartPage","Latest commits")
text06 = translate("StartPage","On the web")
text07 = translate("StartPage","This is the FreeCAD Homepage. Here you will be able to find a lot of information about FreeCAD, including tutorials, examples and user documentation.")
text08 = translate("StartPage","FreeCAD Homepage")
text09 = translate("StartPage","Example projects")
text10 = translate("StartPage","Schenkel STEP file")
text11 = translate("StartPage","Load a PartDesign example")
text12 = translate("StartPage","Load a Drawing extraction")
text13 = translate("StartPage","Load a Robot simulation example")
text14 = translate("StartPage","Projects from the Web")
text15 = translate("StartPage","Schenkel STEP")
text16 = translate("StartPage","Complex Part")
text17 = translate("StartPage","Close this window after opening or creating a file")
text18 = translate("StartPage","Don't show me this window again next time")
text19 = translate("StartPage","Designing parts")
text20 = translate("StartPage","The <b>Part Design</b> workbench is designed to create complex pieces based on constrained 2D sketches. Use it to draw 2D shapes, constrain some of their elements and extrude them to form 3D pieces.")
text21 = translate("StartPage","Example workflow")
text22 = translate("StartPage","Part Design")
text23 = translate("StartPage","Designing architectural elements")
text24 = translate("StartPage","The <b>Architectural Design</b> workbench is specially designed for working with architectural elements such as walls or windows. Start by drawing 2D shapes, and use them as guides to build architecutral objects.")
text25 = translate("StartPage","Architectural Design")
text26 = translate("StartPage","Working with Meshes")
text27 = translate("StartPage","The <b>Mesh Workbench</b> is used to work with Mesh objects. Meshes are simpler 3D objects than Part objects, but they are often easier to import and export to/from other applications.")
text28 = translate("StartPage","FreeCAD offers you several tools to convert between Mesh and Part objects.")
text29 = translate("StartPage","Work with Meshes")
text30 = translate("StartPage","The complete workbench")
text31 = translate("StartPage","FreeCAD Complete workbench")
text32 = translate("StartPage","populated with some of the most commonly used tools.")
text33 = translate("StartPage","file size:")
text34 = translate("StartPage","creation time:")
text35 = translate("StartPage","last modified:")
text36 = translate("StartPage","location:")
text37 = translate("StartPage","User manual")
text38 = translate("StartPage","http://www.freecadweb.org/wiki/index.php?title=Online_Help_Toc")
text39 = translate("StartPage","Tutorials")
text40 = translate("StartPage","Python resources")
text41 = translate("StartPage","File not found")
text42 = translate("StartPage","from <a href=http://twitter.com/FreeCADNews>@FreeCADNews</a>")
text43 = translate("StartPage","The FreeCAD-tutorial blog")
text44 = translate("StartPage","from <a href=http://www.youtube.com/user/FreeCADNews?feature=mhee>FreeCADNews channel</a>")
text45 = translate("StartPage","This is the official user manual of FreeCAD, built, maintained and translated by the FreeCAD community.")
text46 = translate("StartPage","The tutorials section on the FreeCAD website")
text47 = translate("StartPage","The section of the FreeCAD website dedicated to python scripting, with examples, explanations, and API commands.")
text48 = translate("StartPage","A blog dedicated to teaching FreeCAD, maintained by members of the FreeCAD community")
text49 = translate("StartPage","Getting started")
text50 = translate("StartPage","The FreeCAD interface is divided in workbenches, which are sets of tools suited for a specific task. You can start with one of the workbenches in this list, or with the complete workbench, which presents you with some of the most used tools gathered from other workbenches. Click to read more about workbenches on the FreeCAD website.")
text51 = translate("StartPage","http://www.freecadweb.org/wiki/index.php?title=Workbenches")
text52 = translate("StartPage","Ship Design")
text53 = translate("StartPage","Designing and calculating ships")
text54 = translate("StartPage","The <b>Ship Design</b> module offers several tools to help ship designers to view, model and calculate profiles and other specific properties of ship hulls.")
text55 = translate("StartPage","Load an Architectural example model")
text56 = translate("StartPage","http://www.freecadweb.org/wiki/index.php?title=Tutorials")
text57 = translate("StartPage","http://www.freecadweb.org/wiki/index.php?title=Power_users_hub")
text58 = translate("StartPage","Your version of FreeCAD is up to date.")
text59 = translate("StartPage","There is a new release of FreeCAD available.")
text60 = translate("StartPage","Load an FEM example analysis")
text61 = translate("StartPage","Obtain a development version")
text62 = translate("StartPage","<b>Development versions</b> are made available by community members from time to time and usually contain the latest changes, but are more likely to contain bugs.")
text63 = translate("StartPage","See all commits")
# get FreeCAD version
v = FreeCAD.Version()
vmajor = v[0]
vminor = v[1]
vbuild = v[2].split(" ")[0]
# here is the html page skeleton
page = """
<html>
<head>
<title>FreeCAD - Start page</title>
<script language="javascript">
var linkDescriptions = [];
function JSONscriptRequest(fullUrl) {
// REST request path
this.fullUrl = fullUrl;
// Get the DOM location to put the script tag
this.headLoc = document.getElementsByTagName("head").item(0);
// Generate a unique script tag id
this.scriptId = 'JscriptId' + JSONscriptRequest.scriptCounter++;
}
// Static script ID counter
JSONscriptRequest.scriptCounter = 1;
JSONscriptRequest.prototype.buildScriptTag = function () {
// Create the script tag
this.scriptObj = document.createElement("script");
// Add script object attributes
this.scriptObj.setAttribute("type", "text/javascript");
this.scriptObj.setAttribute("charset", "utf-8");
this.scriptObj.setAttribute("src", this.fullUrl);
this.scriptObj.setAttribute("id", this.scriptId);
}
JSONscriptRequest.prototype.removeScriptTag = function () {
// Destroy the script tag
this.headLoc.removeChild(this.scriptObj);
}
JSONscriptRequest.prototype.addScriptTag = function () {
// Create the script tag
this.headLoc.appendChild(this.scriptObj);
}
function show(theText) {
ddiv = document.getElementById("description");
if (theText == "") theText = " ";
ddiv.innerHTML = theText;
}
function checkVersion(data) {
vdiv = document.getElementById("versionbox");
var cmajor = """ + vmajor + """;
var cminor = """ + vminor + """;
var cbuild = """ + vbuild + """;
var amajor = data[0]['major'];
var aminor = data[0]['minor'];
var abuild = data[0]['build'];
if (cmajor >= amajor && cminor >= aminor && cbuild >= abuild) {
vdiv.innerHTML=" """ + text58 + """: """ + vmajor + """.""" + vminor + """.""" + vbuild + """";
} else {
vdiv.innerHTML="<a href=exthttp://github.com/FreeCAD/FreeCAD/releases/latest> """ + text59 + """:"+amajor+"."+aminor+"."+abuild+"</a>";
}
}
function load() {
// load latest news
ddiv = document.getElementById("news");
ddiv.innerHTML = "Connecting...";
var tobj=new JSONscriptRequest('https://api.github.com/repos/FreeCAD/FreeCAD/commits?callback=showTweets');
tobj.buildScriptTag(); // Build the script tag
tobj.addScriptTag(); // Execute (add) the script tag
ddiv.innerHTML = "Downloading latest news...";
// load version
var script = document.createElement('script');
script.src = 'http://www.freecadweb.org/version.php?callback=checkVersion';
document.body.appendChild(script);
}
function stripTags(text) {
// from http://www.pagecolumn.com/tool/all_about_html_tags.htm /<\s*\/?\s*span\s*.*?>/g
stripped = text.replace("<table", "<div");
stripped = stripped.replace("</table", "</div");
stripped = stripped.replace("<tr", "<tr");
stripped = stripped.replace("</tr", "</tr");
stripped = stripped.replace("<td", "<td");
stripped = stripped.replace("</td", "</td");
stripped = stripped.replace("555px", "auto");
stripped = stripped.replace("border:1px", "border:0px");
stripped = stripped.replace("color:#000000;","");
return stripped;
}
function showTweets(data) {
ddiv = document.getElementById('news');
ddiv.innerHTML = "Received";
var html = ['<ul>'];
for (var i = 0; i < 15; i++) {
html.push('<li><img src="web.png"> <a href="ext', data.data[i].commit.url, '" onMouseOver="showDescr(', i+1, ')" onMouseOut="showDescr()">', data.data[i].commit.message, '</a></li>');
if ("message" in data.data[i].commit) {
linkDescriptions.push(stripTags(data.data[i].commit.message)+'<br/>'+data.data[i].commit.author.name+'<br/>'+data.data[i].commit.author.date);
} else {
linkDescriptions.push("");
}
}
html.push('</ul>');
html.push('<a href="exthttp://github.com/FreeCAD/FreeCAD/commits/master">""" + text63 + """<a/>');
ddiv.innerHTML = html.join('');
}
function showDescr(d) {
if (d) {
show(linkDescriptions[d-1]);
} else {
show("");
}
}
function scroller() {
desc = document.getElementById("description");
base = document.getElementById("column").offsetTop;
scro = window.scrollY;
if (scro > base) {
desc.className = "stick";
} else {
desc.className = "";
}
}
document.onmousemove=scroller;
</script>
<style type="text/css">
body {
background: #basecolor;
color: #textcolor;
font-family: Arial, Helvetica, Sans;
font-size: 11px;
}
a {
color: #linkcolor;
font-weight: bold;
text-decoration: none;
padding: 2px;
}
a:hover {
color: white;
background: #linkcolor;
border-radius: 5px;
}
p {
text-align: justify;
}
.left {
text-align: left;
}
h1 {
font-size: 3em;
letter-spacing: 2px;
padding: 20px 0 0 80px;
align: bottom;
color: #ffffff;
}
h2 {
font-size: 1.2em;
}
ul {
list-style-type: none;
padding: 0;
}
#column {
margin: 0 350px 0 10px;
}
#column img {
max-width: 14px;
}
.block {
background: #windowcolor;
border-radius: 5px;
padding: 8px;
margin-bottom: 10px;
color: #windowtextcolor;
width: auto;
}
.options {
clear: both;
}
.from {
font-size: 0.7em;
font-weight: normal;
}
#versionbox {
float: right;
text-align: right;
font-size: 0.33em;
font-weight: normal;
padding-right: 20px;
letter-spacing: 0;
color: #ffffff;
}
#description {
background: #windowcolor;
border-radius: 5px;
padding: 8px;
color: #windowtextcolor;
float: right;
width: 316px;
right: 10px;
height: 100%;
position: relative;
}
#description img {
max-width: 300px;
clear: both;
}
pre {
width: 300px !important;
white-space: pre-wrap;
}
.stick {
position: fixed !important;
top: 0px;
right: 18px !important;
}
</style>
</head>
<body onload="load()">
<h1><img src="FreeCAD.png"> """ + text01 + """<div id=versionbox> </div></h1>
<div id="description">
</div>
<div id="column">
<div class="block">
<h2>""" + text02 + """</h2>
defaultworkbenches
</div>
<div class="block">
<h2>""" + text03 + """</h2>
recentfiles
</div>
<div class="block">
<h2>""" + text05 + """</h2>
<div id="news">news feed</div>
</div>
<div class="block">
<h2>""" + text06 + """</h2>
defaultlinks
</div>
<div class="block">
<h2>""" + text09 + """</h2>
defaultexamples
</div>
customblocks
</div>
<!--
<form class="options">
<input type="checkbox" name="closeThisDialog">
""" + text17 + """<br/>
<input type="checkbox" name="dontShowAgain">
""" + text18 + """
</form>
-->
</body>
</html>
"""
def getWebExamples():
return """
<ul>
<li><a href="http://freecad-project.de/svn/ExampleData/FileFormates/Schenkel.stp">""" + text15 + """</a></li>
<li><a href="http://freecad-project.de/svn/ExampleData/Examples/CAD/Complex.FCStd">""" + text16 + """</a></li>
</ul>"""
def getExamples():
return """
<ul>
<li><img src="FreeCAD.png" style="width: 16px"> <a href="LoadSchenkel.py">""" + text10 + """</a></li>
<li><img src="FreeCAD.png" style="width: 16px"> <a href="LoadPartDesignExample.py">""" + text11 + """</a></li>
<li><img src="FreeCAD.png" style="width: 16px"> <a href="LoadDrawingExample.py">""" + text12 + """</a></li>
<li><img src="FreeCAD.png" style="width: 16px"> <a href="LoadRobotExample.py">""" + text13 + """</a></li>
<li><img src="FreeCAD.png" style="width: 16px"> <a href="LoadArchExample.py">""" + text55 + """</a></li>
<li><img src="FreeCAD.png" style="width: 16px"> <a href="LoadFemExample.py">""" + text60 + """</a></li>
</ul>"""
def getLinks():
return """
<ul>
<li><img src="web.png">
<a onMouseover="show('<p>""" + text07 + """</p>')"
onMouseout="show('')"
href="exthttp://www.freecadweb.org/">""" + text08 + """</a></li>
<li><img src="web.png">
<a onMouseover="show('<p>""" + text45 + """</p>')"
onMouseout="show('')"
href=ext""" + text38 + """>""" + text37 + """</a></li>
<li><img src="web.png">
<a onMouseover="show('<p>""" + text46 + """</p>')"
onMouseout="show('')"
href=ext""" + text56 + """>""" + text39 + """</a></li>
<li><img src="web.png">
<a onMouseover="show('<p>""" + text47 + """</p>')"
onMouseout="show('')"
href=ext""" + text57 + """>""" + text40 + """</a></li>
<li><img src="web.png">
<a onMouseover="show('<p>""" + text48 + """</p>')"
onMouseout="show('')"
href="exthttp://freecad-tutorial.blogspot.com/">""" + text43 + """</a></li>
<li><img src="web.png">
<a href="exthttp://github.com/FreeCAD/FreeCAD/releases"
onMouseOver="show('<p>""" + text62 + """</p>')"
onMouseOut="show('')">""" + text61 + """</a></li>
</ul>"""
def getWorkbenches():
return """
<ul>
<li><img src="blank.png">
<a onMouseover="show('<h3>""" + text49 + """</h3> \
<p>""" + text50 + """</p>')"
onMouseout="show('')"
href=""" + text51 + """>""" + text49 + """</a>
</li>
<li><img src="PartDesign.png">
<a onMouseover="show('<h3>""" + text19 + """</h3> \
<p>""" + text20 + """</p><p><small>""" + text21 + """ \
:</small></p><img src=PartDesignExample.png>')"
onMouseout="show('')"
href="PartDesign.py">""" + text22 + """</a>
</li>
<li><img src="ArchDesign.png">
<a onMouseover="show('<h3>""" + text23 + """</h3> \
<p>""" + text24 + """</p><p><small>""" + text21 + """ \
:</small></p><img src=ArchExample.png>')"
onMouseout="show('')"
href="ArchDesign.py">""" + text25 + """</a>
</li>
<li><img src="Ship.png">
<a onMouseover="show('<h3>""" + text53 + """</h3> \
<p>""" + text54 + """</p><p><small>""" + text21 + """ \
:</small></p><img src=ShipExample.png>')"
onMouseout="show('')"
href="Ship.py">""" + text52 + """</a>
</li>
<li><img src="Mesh.png">
<a onMouseover="show('<h3>""" + text26 + """</h3> \
<p>""" + text27 + """</p><p>""" + text28 + """</p>')"
onMouseout="show('')"
href="Mesh.py">""" + text29 + """</a>
</li>
</ul>"""
def getInfo(filename):
"returns available file information"
def getLocalTime(timestamp):
"returns a local time from a timestamp"
return time.strftime("%m/%d/%Y %H:%M:%S",time.localtime(timestamp))
def getSize(size):
"returns a human-readable size"
if size > 1024*1024:
hsize = str(size/(1024*1024)) + "Mb"
elif size > 1024:
hsize = str(size/1024) + "Kb"
else:
hsize = str(size) + "b"
return hsize
html = '<h3>'+os.path.basename(filename)+'</h3>'
if os.path.exists(filename):
# get normal file info
s = os.stat(filename)
html += "<p>" + text33 + " " + getSize(s.st_size) + "<br/>"
html += text34 + " " + getLocalTime(s.st_ctime) + "<br/>"
html += text35 + " " + getLocalTime(s.st_mtime) + "<br/>"
html += "<span>" + text36 + " " + filename + "</span></p>"
# get additional info from fcstd files
if os.path.splitext(filename)[1].upper() in [".FCSTD"]:
zfile=zipfile.ZipFile(filename)
files=zfile.namelist()
# check for meta-file if it's really a FreeCAD document
if files[0] == "Document.xml":
html += "<p>FreeCAD Standard File</p>"
image="thumbnails/Thumbnail.png"
if image in files:
image=zfile.read(image)
thumbfile = tempfile.mkstemp(suffix='.png')[1]
thumb = open(thumbfile,"wb")
thumb.write(image)
thumb.close()
html += '<img src=file://'
html += thumbfile + '><br/>'
else:
html += "<p>" + text41 + "</p>"
return html
def getRecentFiles():
"returns a list of 3 latest recent files"
rf = FreeCAD.ParamGet("User parameter:BaseApp/Preferences/RecentFiles")
ct = rf.GetInt("RecentFiles")
html = '<ul>'
for i in range(3):
if i < ct:
mr = rf.GetString("MRU%d" % (i))
if os.path.exists(mr):
fn = os.path.basename(mr)
html += '<li>'
if mr[-5:].upper() == "FCSTD":
html += '<img src="freecad-doc.png" style="width: 16px"> '
else:
html += '<img src="blank.png" style="width: 16px"> '
html += '<a '
html += 'onMouseover="show(\''+getInfo(mr)+'\')" '
html += 'onMouseout="show(\'\')" '
html += 'href="LoadMRU'+str(i)+'.py">'
html += fn
html += '</a></li>'
html += '</ul>'
return html
def getFeed(url,numitems=3):
"returns a html list with links from the given RSS feed url"
xml = parse(urllib.urlopen(url)).getroot()
items = []
channel = xml.find('channel')
for element in channel.findall('item'):
items.append({'title': element.find('title').text,
'description': element.find('description').text,
'link': element.find('link').text})
if len(items) > numitems:
items = items[:numitems]
resp = '<ul>'
for item in items:
descr = re.compile("style=\".*?\"").sub('',item['description'])
descr = re.compile("alt=\".*?\"").sub('',descr)
descr = re.compile("\"").sub('',descr)
d1 = re.findall("<img.*?>",descr)[0]
d2 = re.findall("<span>.*?</span>",descr)[0]
descr = "<h3>" + item['title'] + "</h3>"
descr += d1 + "<br/>"
descr += d2
resp += '<li><a onMouseover="show(\''
resp += descr
resp += '\')" onMouseout="show(\'\')" href="'
resp += item['link']
resp += '">'
resp += item['title']
resp += '</a></li>'
resp += '</ul>'
print resp
return resp
def getCustomBlocks():
"fetches custom html files in FreeCAD user dir"
output = ""
return output
def setColors(html):
"gets theme colors from the system, and sets appropriate styles"
defaults = {"#basecolor":"#191B26",
"#linkcolor":"#0092E8",
"#textcolor":"#FFFFFF",
"#windowcolor":"#FFFFFF",
"#windowtextcolor":"#000000"}
try:
palette = QtGui.qApp.palette()
except:
pass
else:
#defaults["#basecolor"] = palette.base().color().name()
defaults["#basecolor"] = "#171A2B url(Background.jpg)"
#defaults["#linkcolor"] = palette.link().color().name() # UGLY!!
defaults["#textcolor"] = palette.text().color().name()
defaults["#windowcolor"] = palette.window().color().name()
defaults["#windowtextcolor"] = palette.windowText().color().name()
for k,v in defaults.iteritems():
html = html.replace(k,str(v))
return html
def handle():
"returns the complete html startpage"
# add recent files
recentfiles = getRecentFiles()
html = page.replace("recentfiles",recentfiles)
# add default workbenches
html = html.replace("defaultworkbenches",getWorkbenches())
# add default web links
html = html.replace("defaultlinks",getLinks())
# add default examples
html = html.replace("defaultexamples",getExamples())
# add web examples
#html = html.replace("webexamples",getWebExamples())
# add custom blocks
html = html.replace("customblocks",getCustomBlocks())
# fetches system colors
html = setColors(html)
return html
def exportTestFile():
f = open(os.path.expanduser("~")+os.sep+"freecad-startpage.html","wb")
f.write(handle())
f.close()
| lgpl-2.1 |
darktears/chromium-crosswalk | chrome/common/extensions/docs/server2/jsc_view_test.py | 30 | 15249 | #!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import os
import unittest
from jsc_view import GetEventByNameFromEvents
from api_schema_graph import APISchemaGraph
from availability_finder import AvailabilityFinder, AvailabilityInfo
from branch_utility import BranchUtility, ChannelInfo
from compiled_file_system import CompiledFileSystem
from extensions_paths import CHROME_EXTENSIONS
from fake_host_file_system_provider import FakeHostFileSystemProvider
from fake_url_fetcher import FakeUrlFetcher
from features_bundle import FeaturesBundle
from future import Future
from host_file_system_iterator import HostFileSystemIterator
from jsc_view import CreateJSCView, _JSCViewBuilder, _FormatValue
from object_store_creator import ObjectStoreCreator
from schema_processor import SchemaProcessorFactoryForTest
from servlet import Request
from server_instance import ServerInstance
from test_data.api_data_source.canned_master_fs import CANNED_MASTER_FS_DATA
from test_data.canned_data import CANNED_API_FILE_SYSTEM_DATA
from test_data.object_level_availability.tabs import TABS_SCHEMA_BRANCHES
from test_file_system import TestFileSystem
from test_util import Server2Path
class _FakeTemplateCache(object):
def GetFromFile(self, key):
return Future(value='motemplate %s' % key)
class _FakeFeaturesBundle(object):
def GetAPIFeatures(self):
return Future(value={
'bluetooth': {'value': True},
'contextMenus': {'value': True},
'jsonStableAPI': {'value': True},
'idle': {'value': True},
'input.ime': {'value': True},
'tabs': {'value': True}
})
class _FakeAvailabilityFinder(object):
def __init__(self, fake_availability):
self._fake_availability = fake_availability
def GetAPIAvailability(self, api_name):
return self._fake_availability
def GetAPINodeAvailability(self, api_name):
schema_graph = APISchemaGraph()
api_graph = APISchemaGraph(json.loads(
CANNED_MASTER_FS_DATA['api'][api_name + '.json']))
# Give the graph fake ChannelInfo; it's not used in tests.
channel_info = ChannelInfo('stable', '28', 28)
schema_graph.Update(api_graph, lambda _: channel_info)
return schema_graph
class JSCViewTest(unittest.TestCase):
def setUp(self):
self._base_path = Server2Path('test_data', 'test_json')
server_instance = ServerInstance.ForTest(
TestFileSystem(CANNED_MASTER_FS_DATA, relative_to=CHROME_EXTENSIONS))
file_system = server_instance.host_file_system_provider.GetMaster()
self._json_cache = server_instance.compiled_fs_factory.ForJson(file_system)
self._features_bundle = FeaturesBundle(file_system,
server_instance.compiled_fs_factory,
server_instance.object_store_creator,
'extensions')
self._api_models = server_instance.platform_bundle.GetAPIModels(
'extensions')
self._fake_availability = AvailabilityInfo(ChannelInfo('stable', '396', 5))
def _ReadLocalFile(self, filename):
with open(os.path.join(self._base_path, filename), 'r') as f:
return f.read()
def _LoadJSON(self, filename):
return json.loads(self._ReadLocalFile(filename))
def _FakeLoadAddRulesSchema(self):
events = self._LoadJSON('add_rules_def_test.json')
return Future(value=GetEventByNameFromEvents(events))
def testFormatValue(self):
self.assertEquals('1,234,567', _FormatValue(1234567))
self.assertEquals('67', _FormatValue(67))
self.assertEquals('234,567', _FormatValue(234567))
def testGetEventByNameFromEvents(self):
events = {}
# Missing 'types' completely.
self.assertRaises(AssertionError, GetEventByNameFromEvents, events)
events['types'] = []
# No type 'Event' defined.
self.assertRaises(AssertionError, GetEventByNameFromEvents, events)
events['types'].append({ 'name': 'Event',
'functions': []})
add_rules = { "name": "addRules" }
events['types'][0]['functions'].append(add_rules)
self.assertEqual(add_rules,
GetEventByNameFromEvents(events)['addRules'])
events['types'][0]['functions'].append(add_rules)
# Duplicates are an error.
self.assertRaises(AssertionError, GetEventByNameFromEvents, events)
def testCreateId(self):
fake_avail_finder = _FakeAvailabilityFinder(self._fake_availability)
dict_ = CreateJSCView(
self._api_models.GetContentScriptAPIs().Get(),
self._api_models.GetModel('tester').Get(),
fake_avail_finder,
self._json_cache,
_FakeTemplateCache(),
self._features_bundle,
None,
'extensions',
[],
Request.ForTest(''))
self.assertEquals('type-TypeA', dict_['types'][0]['id'])
self.assertEquals('property-TypeA-b',
dict_['types'][0]['properties'][0]['id'])
self.assertEquals('method-get', dict_['functions'][0]['id'])
self.assertEquals('event-EventA', dict_['events'][0]['id'])
# TODO(kalman): re-enable this when we have a rebase option.
def DISABLED_testToDict(self):
fake_avail_finder = _FakeAvailabilityFinder(self._fake_availability)
expected_json = self._LoadJSON('expected_tester.json')
dict_ = CreateJSCView(
self._api_models.GetContentScriptAPIs().Get(),
self._api_models.GetModel('tester').Get(),
fake_avail_finder,
self._json_cache,
_FakeTemplateCache(),
self._features_bundle,
None,
'extensions',
[],
Request.ForTest(''))
self.assertEquals(expected_json, dict_)
def testAddRules(self):
fake_avail_finder = _FakeAvailabilityFinder(self._fake_availability)
dict_ = CreateJSCView(
self._api_models.GetContentScriptAPIs().Get(),
self._api_models.GetModel('add_rules_tester').Get(),
fake_avail_finder,
self._json_cache,
_FakeTemplateCache(),
self._features_bundle,
self._FakeLoadAddRulesSchema(),
'extensions',
[],
Request.ForTest(''))
# Check that the first event has the addRulesFunction defined.
self.assertEquals('add_rules_tester', dict_['name'])
self.assertEquals('rules', dict_['events'][0]['name'])
self.assertEquals('notable_name_to_check_for',
dict_['events'][0]['byName']['addRules'][
'parameters'][0]['name'])
# Check that the second event has addListener defined.
self.assertEquals('noRules', dict_['events'][1]['name'])
self.assertEquals('add_rules_tester', dict_['name'])
self.assertEquals('noRules', dict_['events'][1]['name'])
self.assertEquals('callback',
dict_['events'][0]['byName']['addListener'][
'parameters'][0]['name'])
def testGetIntroList(self):
fake_avail_finder = _FakeAvailabilityFinder(self._fake_availability)
model = _JSCViewBuilder(
self._api_models.GetContentScriptAPIs().Get(),
self._api_models.GetModel('tester').Get(),
fake_avail_finder,
self._json_cache,
_FakeTemplateCache(),
self._features_bundle,
None,
'extensions',
[])
expected_list = [
{ 'title': 'Description',
'content': [
{ 'text': 'a test api' }
]
},
{ 'title': 'Availability',
'content': [
{ 'partial': 'motemplate chrome/common/extensions/docs/' +
'templates/private/intro_tables/stable_message.html',
'version': 5,
'scheduled': None
}
]
},
{ 'title': 'Permissions',
'content': [
{ 'class': 'override',
'text': '"tester"'
},
{ 'text': 'is an API for testing things.' }
]
},
{ 'title': 'Manifest',
'content': [
{ 'class': 'code',
'text': '"tester": {...}'
}
]
},
{ 'title': 'Content Scripts',
'content': [
{
'partial': 'motemplate chrome/common/extensions/docs' +
'/templates/private/intro_tables/content_scripts.html',
'contentScriptSupport': {
'name': 'tester',
'restrictedTo': None
}
}
]
},
{ 'title': 'Learn More',
'content': [
{ 'link': 'https://tester.test.com/welcome.html',
'text': 'Welcome!'
}
]
}
]
self.assertEquals(model._GetIntroTableList(), expected_list)
# Tests the same data with a scheduled availability.
fake_avail_finder = _FakeAvailabilityFinder(
AvailabilityInfo(ChannelInfo('beta', '1453', 27), scheduled=28))
model = _JSCViewBuilder(
self._api_models.GetContentScriptAPIs().Get(),
self._api_models.GetModel('tester').Get(),
fake_avail_finder,
self._json_cache,
_FakeTemplateCache(),
self._features_bundle,
None,
'extensions',
[])
expected_list[1] = {
'title': 'Availability',
'content': [
{ 'partial': 'motemplate chrome/common/extensions/docs/' +
'templates/private/intro_tables/beta_message.html',
'version': 27,
'scheduled': 28
}
]
}
self.assertEquals(model._GetIntroTableList(), expected_list)
class JSCViewWithoutNodeAvailabilityTest(unittest.TestCase):
def setUp(self):
server_instance = ServerInstance.ForTest(
file_system_provider=FakeHostFileSystemProvider(
CANNED_API_FILE_SYSTEM_DATA))
self._api_models = server_instance.platform_bundle.GetAPIModels(
'extensions')
self._json_cache = server_instance.compiled_fs_factory.ForJson(
server_instance.host_file_system_provider.GetMaster())
self._avail_finder = server_instance.platform_bundle.GetAvailabilityFinder(
'extensions')
def testGetAPIAvailability(self):
api_availabilities = {
'bluetooth': 31,
'contextMenus': 'master',
'jsonStableAPI': 20,
'idle': 5,
'input.ime': 18,
'tabs': 18
}
for api_name, availability in api_availabilities.iteritems():
model_dict = CreateJSCView(
self._api_models.GetContentScriptAPIs().Get(),
self._api_models.GetModel(api_name).Get(),
self._avail_finder,
self._json_cache,
_FakeTemplateCache(),
_FakeFeaturesBundle(),
None,
'extensions',
[],
Request.ForTest(''))
self.assertEquals(availability,
model_dict['introList'][1]['content'][0]['version'])
class JSCViewWithNodeAvailabilityTest(unittest.TestCase):
def setUp(self):
tabs_unmodified_versions = (16, 20, 23, 24)
self._branch_utility = BranchUtility(
os.path.join('branch_utility', 'first.json'),
os.path.join('branch_utility', 'second.json'),
FakeUrlFetcher(Server2Path('test_data')),
ObjectStoreCreator.ForTest())
self._node_fs_creator = FakeHostFileSystemProvider(TABS_SCHEMA_BRANCHES)
self._node_fs_iterator = HostFileSystemIterator(self._node_fs_creator,
self._branch_utility)
test_object_store = ObjectStoreCreator.ForTest()
self._avail_finder = AvailabilityFinder(
self._branch_utility,
CompiledFileSystem.Factory(test_object_store),
self._node_fs_iterator,
self._node_fs_creator.GetMaster(),
test_object_store,
'extensions',
SchemaProcessorFactoryForTest())
server_instance = ServerInstance.ForTest(
file_system_provider=FakeHostFileSystemProvider(
TABS_SCHEMA_BRANCHES))
self._api_models = server_instance.platform_bundle.GetAPIModels(
'extensions')
self._json_cache = server_instance.compiled_fs_factory.ForJson(
server_instance.host_file_system_provider.GetMaster())
# Imitate the actual SVN file system by incrementing the stats for paths
# where an API schema has changed.
last_stat = type('last_stat', (object,), {'val': 0})
def stat_paths(file_system, channel_info):
if channel_info.version not in tabs_unmodified_versions:
last_stat.val += 1
# HACK: |file_system| is a MockFileSystem backed by a TestFileSystem.
# Increment the TestFileSystem stat count.
file_system._file_system.IncrementStat(by=last_stat.val)
# Continue looping. The iterator will stop after 'master' automatically.
return True
# Use the HostFileSystemIterator created above to change global stat values
# for the TestFileSystems that it creates.
self._node_fs_iterator.Ascending(
# The earliest version represented with the tabs' test data is 13.
self._branch_utility.GetStableChannelInfo(13),
stat_paths)
def testGetAPINodeAvailability(self):
def assertEquals(node, actual):
node_availabilities = {
'tabs.Tab': None,
'tabs.fakeTabsProperty1': None,
'tabs.get': None,
'tabs.onUpdated': None,
'tabs.InjectDetails': 25,
'tabs.fakeTabsProperty2': 15,
'tabs.getCurrent': 19,
'tabs.onActivated': 30
}
self.assertEquals(node_availabilities[node], actual)
model_dict = CreateJSCView(
self._api_models.GetContentScriptAPIs().Get(),
self._api_models.GetModel('tabs').Get(),
self._avail_finder,
self._json_cache,
_FakeTemplateCache(),
_FakeFeaturesBundle(),
None,
'extensions',
[],
Request.ForTest(''))
# Test nodes that have the same availability as their parent.
# Test type.
assertEquals('tabs.Tab', model_dict['types'][0]['availability'])
# Test property.
assertEquals('tabs.fakeTabsProperty1',
model_dict['properties'][1]['availability'])
# Test function.
assertEquals('tabs.get', model_dict['functions'][1]['availability'])
# Test event.
assertEquals('tabs.onUpdated', model_dict['events'][1]['availability'])
# Test nodes with varying availabilities.
# Test type.
assertEquals('tabs.InjectDetails',
model_dict['types'][1]['availability']['version'])
# Test property.
assertEquals('tabs.fakeTabsProperty2',
model_dict['properties'][3]['availability']['version'])
# Test function.
assertEquals('tabs.getCurrent',
model_dict['functions'][0]['availability']['version'])
# Test event.
assertEquals('tabs.onActivated',
model_dict['events'][0]['availability']['version'])
# Test a node that became deprecated.
self.assertEquals({
'scheduled': None,
'version': 26,
'partial': 'motemplate chrome/common/extensions/docs/templates/' +
'private/intro_tables/deprecated_message.html'
}, model_dict['types'][2]['availability'])
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
WillisXChen/django-oscar | oscar/lib/python2.7/site-packages/pygments/lexers/felix.py | 72 | 9410 | # -*- coding: utf-8 -*-
"""
pygments.lexers.felix
~~~~~~~~~~~~~~~~~~~~~
Lexer for the Felix language.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, include, bygroups, default, words, \
combined
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation
__all__ = ['FelixLexer']
class FelixLexer(RegexLexer):
"""
For `Felix <http://www.felix-lang.org>`_ source code.
.. versionadded:: 1.2
"""
name = 'Felix'
aliases = ['felix', 'flx']
filenames = ['*.flx', '*.flxh']
mimetypes = ['text/x-felix']
preproc = (
'elif', 'else', 'endif', 'if', 'ifdef', 'ifndef',
)
keywords = (
'_', '_deref', 'all', 'as',
'assert', 'attempt', 'call', 'callback', 'case', 'caseno', 'cclass',
'code', 'compound', 'ctypes', 'do', 'done', 'downto', 'elif', 'else',
'endattempt', 'endcase', 'endif', 'endmatch', 'enum', 'except',
'exceptions', 'expect', 'finally', 'for', 'forall', 'forget', 'fork',
'functor', 'goto', 'ident', 'if', 'incomplete', 'inherit', 'instance',
'interface', 'jump', 'lambda', 'loop', 'match', 'module', 'namespace',
'new', 'noexpand', 'nonterm', 'obj', 'of', 'open', 'parse', 'raise',
'regexp', 'reglex', 'regmatch', 'rename', 'return', 'the', 'then',
'to', 'type', 'typecase', 'typedef', 'typematch', 'typeof', 'upto',
'when', 'whilst', 'with', 'yield',
)
keyword_directives = (
'_gc_pointer', '_gc_type', 'body', 'comment', 'const', 'export',
'header', 'inline', 'lval', 'macro', 'noinline', 'noreturn',
'package', 'private', 'pod', 'property', 'public', 'publish',
'requires', 'todo', 'virtual', 'use',
)
keyword_declarations = (
'def', 'let', 'ref', 'val', 'var',
)
keyword_types = (
'unit', 'void', 'any', 'bool',
'byte', 'offset',
'address', 'caddress', 'cvaddress', 'vaddress',
'tiny', 'short', 'int', 'long', 'vlong',
'utiny', 'ushort', 'vshort', 'uint', 'ulong', 'uvlong',
'int8', 'int16', 'int32', 'int64',
'uint8', 'uint16', 'uint32', 'uint64',
'float', 'double', 'ldouble',
'complex', 'dcomplex', 'lcomplex',
'imaginary', 'dimaginary', 'limaginary',
'char', 'wchar', 'uchar',
'charp', 'charcp', 'ucharp', 'ucharcp',
'string', 'wstring', 'ustring',
'cont',
'array', 'varray', 'list',
'lvalue', 'opt', 'slice',
)
keyword_constants = (
'false', 'true',
)
operator_words = (
'and', 'not', 'in', 'is', 'isin', 'or', 'xor',
)
name_builtins = (
'_svc', 'while',
)
name_pseudo = (
'root', 'self', 'this',
)
decimal_suffixes = '([tTsSiIlLvV]|ll|LL|([iIuU])(8|16|32|64))?'
tokens = {
'root': [
include('whitespace'),
# Keywords
(words(('axiom', 'ctor', 'fun', 'gen', 'proc', 'reduce',
'union'), suffix=r'\b'),
Keyword, 'funcname'),
(words(('class', 'cclass', 'cstruct', 'obj', 'struct'), suffix=r'\b'),
Keyword, 'classname'),
(r'(instance|module|typeclass)\b', Keyword, 'modulename'),
(words(keywords, suffix=r'\b'), Keyword),
(words(keyword_directives, suffix=r'\b'), Name.Decorator),
(words(keyword_declarations, suffix=r'\b'), Keyword.Declaration),
(words(keyword_types, suffix=r'\b'), Keyword.Type),
(words(keyword_constants, suffix=r'\b'), Keyword.Constant),
# Operators
include('operators'),
# Float Literal
# -- Hex Float
(r'0[xX]([0-9a-fA-F_]*\.[0-9a-fA-F_]+|[0-9a-fA-F_]+)'
r'[pP][+\-]?[0-9_]+[lLfFdD]?', Number.Float),
# -- DecimalFloat
(r'[0-9_]+(\.[0-9_]+[eE][+\-]?[0-9_]+|'
r'\.[0-9_]*|[eE][+\-]?[0-9_]+)[lLfFdD]?', Number.Float),
(r'\.(0|[1-9][0-9_]*)([eE][+\-]?[0-9_]+)?[lLfFdD]?',
Number.Float),
# IntegerLiteral
# -- Binary
(r'0[Bb][01_]+%s' % decimal_suffixes, Number.Bin),
# -- Octal
(r'0[0-7_]+%s' % decimal_suffixes, Number.Oct),
# -- Hexadecimal
(r'0[xX][0-9a-fA-F_]+%s' % decimal_suffixes, Number.Hex),
# -- Decimal
(r'(0|[1-9][0-9_]*)%s' % decimal_suffixes, Number.Integer),
# Strings
('([rR][cC]?|[cC][rR])"""', String, 'tdqs'),
("([rR][cC]?|[cC][rR])'''", String, 'tsqs'),
('([rR][cC]?|[cC][rR])"', String, 'dqs'),
("([rR][cC]?|[cC][rR])'", String, 'sqs'),
('[cCfFqQwWuU]?"""', String, combined('stringescape', 'tdqs')),
("[cCfFqQwWuU]?'''", String, combined('stringescape', 'tsqs')),
('[cCfFqQwWuU]?"', String, combined('stringescape', 'dqs')),
("[cCfFqQwWuU]?'", String, combined('stringescape', 'sqs')),
# Punctuation
(r'[\[\]{}:(),;?]', Punctuation),
# Labels
(r'[a-zA-Z_]\w*:>', Name.Label),
# Identifiers
(r'(%s)\b' % '|'.join(name_builtins), Name.Builtin),
(r'(%s)\b' % '|'.join(name_pseudo), Name.Builtin.Pseudo),
(r'[a-zA-Z_]\w*', Name),
],
'whitespace': [
(r'\n', Text),
(r'\s+', Text),
include('comment'),
# Preprocessor
(r'#\s*if\s+0', Comment.Preproc, 'if0'),
(r'#', Comment.Preproc, 'macro'),
],
'operators': [
(r'(%s)\b' % '|'.join(operator_words), Operator.Word),
(r'!=|==|<<|>>|\|\||&&|[-~+/*%=<>&^|.$]', Operator),
],
'comment': [
(r'//(.*?)\n', Comment.Single),
(r'/[*]', Comment.Multiline, 'comment2'),
],
'comment2': [
(r'[^/*]', Comment.Multiline),
(r'/[*]', Comment.Multiline, '#push'),
(r'[*]/', Comment.Multiline, '#pop'),
(r'[/*]', Comment.Multiline),
],
'if0': [
(r'^\s*#if.*?(?<!\\)\n', Comment, '#push'),
(r'^\s*#endif.*?(?<!\\)\n', Comment, '#pop'),
(r'.*?\n', Comment),
],
'macro': [
include('comment'),
(r'(import|include)(\s+)(<[^>]*?>)',
bygroups(Comment.Preproc, Text, String), '#pop'),
(r'(import|include)(\s+)("[^"]*?")',
bygroups(Comment.Preproc, Text, String), '#pop'),
(r"(import|include)(\s+)('[^']*?')",
bygroups(Comment.Preproc, Text, String), '#pop'),
(r'[^/\n]+', Comment.Preproc),
# (r'/[*](.|\n)*?[*]/', Comment),
# (r'//.*?\n', Comment, '#pop'),
(r'/', Comment.Preproc),
(r'(?<=\\)\n', Comment.Preproc),
(r'\n', Comment.Preproc, '#pop'),
],
'funcname': [
include('whitespace'),
(r'[a-zA-Z_]\w*', Name.Function, '#pop'),
# anonymous functions
(r'(?=\()', Text, '#pop'),
],
'classname': [
include('whitespace'),
(r'[a-zA-Z_]\w*', Name.Class, '#pop'),
# anonymous classes
(r'(?=\{)', Text, '#pop'),
],
'modulename': [
include('whitespace'),
(r'\[', Punctuation, ('modulename2', 'tvarlist')),
default('modulename2'),
],
'modulename2': [
include('whitespace'),
(r'([a-zA-Z_]\w*)', Name.Namespace, '#pop:2'),
],
'tvarlist': [
include('whitespace'),
include('operators'),
(r'\[', Punctuation, '#push'),
(r'\]', Punctuation, '#pop'),
(r',', Punctuation),
(r'(with|where)\b', Keyword),
(r'[a-zA-Z_]\w*', Name),
],
'stringescape': [
(r'\\([\\abfnrtv"\']|\n|N\{.*?\}|u[a-fA-F0-9]{4}|'
r'U[a-fA-F0-9]{8}|x[a-fA-F0-9]{2}|[0-7]{1,3})', String.Escape)
],
'strings': [
(r'%(\([a-zA-Z0-9]+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?'
'[hlL]?[diouxXeEfFgGcrs%]', String.Interpol),
(r'[^\\\'"%\n]+', String),
# quotes, percents and backslashes must be parsed one at a time
(r'[\'"\\]', String),
# unhandled string formatting sign
(r'%', String)
# newlines are an error (use "nl" state)
],
'nl': [
(r'\n', String)
],
'dqs': [
(r'"', String, '#pop'),
# included here again for raw strings
(r'\\\\|\\"|\\\n', String.Escape),
include('strings')
],
'sqs': [
(r"'", String, '#pop'),
# included here again for raw strings
(r"\\\\|\\'|\\\n", String.Escape),
include('strings')
],
'tdqs': [
(r'"""', String, '#pop'),
include('strings'),
include('nl')
],
'tsqs': [
(r"'''", String, '#pop'),
include('strings'),
include('nl')
],
}
| bsd-3-clause |
samarthmed/emacs-config | .python-environments/default/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/escsm.py | 2930 | 7839 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .constants import eStart, eError, eItsMe
HZ_cls = (
1,0,0,0,0,0,0,0, # 00 - 07
0,0,0,0,0,0,0,0, # 08 - 0f
0,0,0,0,0,0,0,0, # 10 - 17
0,0,0,1,0,0,0,0, # 18 - 1f
0,0,0,0,0,0,0,0, # 20 - 27
0,0,0,0,0,0,0,0, # 28 - 2f
0,0,0,0,0,0,0,0, # 30 - 37
0,0,0,0,0,0,0,0, # 38 - 3f
0,0,0,0,0,0,0,0, # 40 - 47
0,0,0,0,0,0,0,0, # 48 - 4f
0,0,0,0,0,0,0,0, # 50 - 57
0,0,0,0,0,0,0,0, # 58 - 5f
0,0,0,0,0,0,0,0, # 60 - 67
0,0,0,0,0,0,0,0, # 68 - 6f
0,0,0,0,0,0,0,0, # 70 - 77
0,0,0,4,0,5,2,0, # 78 - 7f
1,1,1,1,1,1,1,1, # 80 - 87
1,1,1,1,1,1,1,1, # 88 - 8f
1,1,1,1,1,1,1,1, # 90 - 97
1,1,1,1,1,1,1,1, # 98 - 9f
1,1,1,1,1,1,1,1, # a0 - a7
1,1,1,1,1,1,1,1, # a8 - af
1,1,1,1,1,1,1,1, # b0 - b7
1,1,1,1,1,1,1,1, # b8 - bf
1,1,1,1,1,1,1,1, # c0 - c7
1,1,1,1,1,1,1,1, # c8 - cf
1,1,1,1,1,1,1,1, # d0 - d7
1,1,1,1,1,1,1,1, # d8 - df
1,1,1,1,1,1,1,1, # e0 - e7
1,1,1,1,1,1,1,1, # e8 - ef
1,1,1,1,1,1,1,1, # f0 - f7
1,1,1,1,1,1,1,1, # f8 - ff
)
HZ_st = (
eStart,eError, 3,eStart,eStart,eStart,eError,eError,# 00-07
eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,# 08-0f
eItsMe,eItsMe,eError,eError,eStart,eStart, 4,eError,# 10-17
5,eError, 6,eError, 5, 5, 4,eError,# 18-1f
4,eError, 4, 4, 4,eError, 4,eError,# 20-27
4,eItsMe,eStart,eStart,eStart,eStart,eStart,eStart,# 28-2f
)
HZCharLenTable = (0, 0, 0, 0, 0, 0)
HZSMModel = {'classTable': HZ_cls,
'classFactor': 6,
'stateTable': HZ_st,
'charLenTable': HZCharLenTable,
'name': "HZ-GB-2312"}
ISO2022CN_cls = (
2,0,0,0,0,0,0,0, # 00 - 07
0,0,0,0,0,0,0,0, # 08 - 0f
0,0,0,0,0,0,0,0, # 10 - 17
0,0,0,1,0,0,0,0, # 18 - 1f
0,0,0,0,0,0,0,0, # 20 - 27
0,3,0,0,0,0,0,0, # 28 - 2f
0,0,0,0,0,0,0,0, # 30 - 37
0,0,0,0,0,0,0,0, # 38 - 3f
0,0,0,4,0,0,0,0, # 40 - 47
0,0,0,0,0,0,0,0, # 48 - 4f
0,0,0,0,0,0,0,0, # 50 - 57
0,0,0,0,0,0,0,0, # 58 - 5f
0,0,0,0,0,0,0,0, # 60 - 67
0,0,0,0,0,0,0,0, # 68 - 6f
0,0,0,0,0,0,0,0, # 70 - 77
0,0,0,0,0,0,0,0, # 78 - 7f
2,2,2,2,2,2,2,2, # 80 - 87
2,2,2,2,2,2,2,2, # 88 - 8f
2,2,2,2,2,2,2,2, # 90 - 97
2,2,2,2,2,2,2,2, # 98 - 9f
2,2,2,2,2,2,2,2, # a0 - a7
2,2,2,2,2,2,2,2, # a8 - af
2,2,2,2,2,2,2,2, # b0 - b7
2,2,2,2,2,2,2,2, # b8 - bf
2,2,2,2,2,2,2,2, # c0 - c7
2,2,2,2,2,2,2,2, # c8 - cf
2,2,2,2,2,2,2,2, # d0 - d7
2,2,2,2,2,2,2,2, # d8 - df
2,2,2,2,2,2,2,2, # e0 - e7
2,2,2,2,2,2,2,2, # e8 - ef
2,2,2,2,2,2,2,2, # f0 - f7
2,2,2,2,2,2,2,2, # f8 - ff
)
ISO2022CN_st = (
eStart, 3,eError,eStart,eStart,eStart,eStart,eStart,# 00-07
eStart,eError,eError,eError,eError,eError,eError,eError,# 08-0f
eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,# 10-17
eItsMe,eItsMe,eItsMe,eError,eError,eError, 4,eError,# 18-1f
eError,eError,eError,eItsMe,eError,eError,eError,eError,# 20-27
5, 6,eError,eError,eError,eError,eError,eError,# 28-2f
eError,eError,eError,eItsMe,eError,eError,eError,eError,# 30-37
eError,eError,eError,eError,eError,eItsMe,eError,eStart,# 38-3f
)
ISO2022CNCharLenTable = (0, 0, 0, 0, 0, 0, 0, 0, 0)
ISO2022CNSMModel = {'classTable': ISO2022CN_cls,
'classFactor': 9,
'stateTable': ISO2022CN_st,
'charLenTable': ISO2022CNCharLenTable,
'name': "ISO-2022-CN"}
ISO2022JP_cls = (
2,0,0,0,0,0,0,0, # 00 - 07
0,0,0,0,0,0,2,2, # 08 - 0f
0,0,0,0,0,0,0,0, # 10 - 17
0,0,0,1,0,0,0,0, # 18 - 1f
0,0,0,0,7,0,0,0, # 20 - 27
3,0,0,0,0,0,0,0, # 28 - 2f
0,0,0,0,0,0,0,0, # 30 - 37
0,0,0,0,0,0,0,0, # 38 - 3f
6,0,4,0,8,0,0,0, # 40 - 47
0,9,5,0,0,0,0,0, # 48 - 4f
0,0,0,0,0,0,0,0, # 50 - 57
0,0,0,0,0,0,0,0, # 58 - 5f
0,0,0,0,0,0,0,0, # 60 - 67
0,0,0,0,0,0,0,0, # 68 - 6f
0,0,0,0,0,0,0,0, # 70 - 77
0,0,0,0,0,0,0,0, # 78 - 7f
2,2,2,2,2,2,2,2, # 80 - 87
2,2,2,2,2,2,2,2, # 88 - 8f
2,2,2,2,2,2,2,2, # 90 - 97
2,2,2,2,2,2,2,2, # 98 - 9f
2,2,2,2,2,2,2,2, # a0 - a7
2,2,2,2,2,2,2,2, # a8 - af
2,2,2,2,2,2,2,2, # b0 - b7
2,2,2,2,2,2,2,2, # b8 - bf
2,2,2,2,2,2,2,2, # c0 - c7
2,2,2,2,2,2,2,2, # c8 - cf
2,2,2,2,2,2,2,2, # d0 - d7
2,2,2,2,2,2,2,2, # d8 - df
2,2,2,2,2,2,2,2, # e0 - e7
2,2,2,2,2,2,2,2, # e8 - ef
2,2,2,2,2,2,2,2, # f0 - f7
2,2,2,2,2,2,2,2, # f8 - ff
)
ISO2022JP_st = (
eStart, 3,eError,eStart,eStart,eStart,eStart,eStart,# 00-07
eStart,eStart,eError,eError,eError,eError,eError,eError,# 08-0f
eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,# 10-17
eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eError,eError,# 18-1f
eError, 5,eError,eError,eError, 4,eError,eError,# 20-27
eError,eError,eError, 6,eItsMe,eError,eItsMe,eError,# 28-2f
eError,eError,eError,eError,eError,eError,eItsMe,eItsMe,# 30-37
eError,eError,eError,eItsMe,eError,eError,eError,eError,# 38-3f
eError,eError,eError,eError,eItsMe,eError,eStart,eStart,# 40-47
)
ISO2022JPCharLenTable = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
ISO2022JPSMModel = {'classTable': ISO2022JP_cls,
'classFactor': 10,
'stateTable': ISO2022JP_st,
'charLenTable': ISO2022JPCharLenTable,
'name': "ISO-2022-JP"}
ISO2022KR_cls = (
2,0,0,0,0,0,0,0, # 00 - 07
0,0,0,0,0,0,0,0, # 08 - 0f
0,0,0,0,0,0,0,0, # 10 - 17
0,0,0,1,0,0,0,0, # 18 - 1f
0,0,0,0,3,0,0,0, # 20 - 27
0,4,0,0,0,0,0,0, # 28 - 2f
0,0,0,0,0,0,0,0, # 30 - 37
0,0,0,0,0,0,0,0, # 38 - 3f
0,0,0,5,0,0,0,0, # 40 - 47
0,0,0,0,0,0,0,0, # 48 - 4f
0,0,0,0,0,0,0,0, # 50 - 57
0,0,0,0,0,0,0,0, # 58 - 5f
0,0,0,0,0,0,0,0, # 60 - 67
0,0,0,0,0,0,0,0, # 68 - 6f
0,0,0,0,0,0,0,0, # 70 - 77
0,0,0,0,0,0,0,0, # 78 - 7f
2,2,2,2,2,2,2,2, # 80 - 87
2,2,2,2,2,2,2,2, # 88 - 8f
2,2,2,2,2,2,2,2, # 90 - 97
2,2,2,2,2,2,2,2, # 98 - 9f
2,2,2,2,2,2,2,2, # a0 - a7
2,2,2,2,2,2,2,2, # a8 - af
2,2,2,2,2,2,2,2, # b0 - b7
2,2,2,2,2,2,2,2, # b8 - bf
2,2,2,2,2,2,2,2, # c0 - c7
2,2,2,2,2,2,2,2, # c8 - cf
2,2,2,2,2,2,2,2, # d0 - d7
2,2,2,2,2,2,2,2, # d8 - df
2,2,2,2,2,2,2,2, # e0 - e7
2,2,2,2,2,2,2,2, # e8 - ef
2,2,2,2,2,2,2,2, # f0 - f7
2,2,2,2,2,2,2,2, # f8 - ff
)
ISO2022KR_st = (
eStart, 3,eError,eStart,eStart,eStart,eError,eError,# 00-07
eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,# 08-0f
eItsMe,eItsMe,eError,eError,eError, 4,eError,eError,# 10-17
eError,eError,eError,eError, 5,eError,eError,eError,# 18-1f
eError,eError,eError,eItsMe,eStart,eStart,eStart,eStart,# 20-27
)
ISO2022KRCharLenTable = (0, 0, 0, 0, 0, 0)
ISO2022KRSMModel = {'classTable': ISO2022KR_cls,
'classFactor': 6,
'stateTable': ISO2022KR_st,
'charLenTable': ISO2022KRCharLenTable,
'name': "ISO-2022-KR"}
# flake8: noqa
| gpl-2.0 |
Nikita1710/ANUFifty50-Online-Mentoring-Platform | project/fifty_fifty/webcore/views.py | 1 | 4115 | from django.shortcuts import render, get_object_or_404
from django.core.mail import send_mail, BadHeaderError
from django.contrib import messages
from django.conf import settings
from django.contrib.auth.decorators import login_required
from content.models import Mentee, Mentor, Content_Summary
from blog.models import Post
from webcore.models import Profile
from feedback.forms import FeedbackForm
from feedback.models import Feedback_contact
from django.utils import timezone
#from content
# Create your views here.
def home(request):
context = locals()
template = 'index.html'
return render(request,template,context)
@login_required
def userProfile(request):
user = request.user
context = {'user':user, 'summary_list':Content_Summary.objects.all()}
template = 'menteelogin.html'
return render(request,template,context)
@login_required
def userProfileNews(request):
user = request.user
posts = Post.objects.filter(published_date__lte=timezone.now()).order_by('published_date')
template = 'blog/post_list.html'
return render(request,template, {'posts': posts})
## post_detail views the blog posts individually
@login_required
def post_detail(request, pk):
post = get_object_or_404(Post, pk=pk)
template = 'blog/post_detail.html'
return render(request, template, {'post': post})
@login_required
def userProfileMentor(request):
user = request.user
template = 'mentor.html'
return render(request,template)
@login_required
def userProfileResources(request):
user = request.user
context = {'user':user, 'post_list':Post.objects.all(), 'mentee_list':Mentee.objects.all(), 'mentor_list':Mentor.objects.all(), 'Content_Summary_list':Content_Summary.objects.all()}
template = 'resources.html'
return render(request,template,context)
@login_required
def userProfileFAQ(request):
user = request.user
context = {'user':user}
template = 'FAQ.html'
return render(request,template,context)
@login_required
def userProfileProfile(request):
user = request.user
context = {'user':user}
template = 'profile.html'
return render(request,template,context)
@login_required
def userProfileContent(request):
user = request.user
context = {'user':user, 'mentee_list':Mentee.objects.all(), 'mentor_list':Mentor.objects.all()}
template = 'content.html'
return render(request,template,context)
@login_required
def userProfileSettings(request):
user = request.user
context = {'user':user}
template = 'settings.html'
return render(request,template,context)
@login_required
def feedback_process(request):
User = get_object_or_404(Profile, pk=request.user.pk)
contact_template = 'feedback/feedback_contact.html'
# sucess_template = 'thanks.html'
# if this is a POST request we need to process the form data
if request.method == 'POST':
# create a form instance and populate it with data from the request:
form = FeedbackForm(request.POST)
# check whether it's valid:
if form.is_valid():
receiver_email = settings.EMAIL_HOST_USER
subject = form.subject(User.role)
message = form.cleaned_data['message']
# handle email eceptions
try:
send_mail(subject, message, request.user.email, [receiver_email])
except Exception as ex:
data = messages.add_message(request, messages.ERROR,'An error occurred. {}'.format(str(ex)))
else:
feedback_form = form.save(commit=False)
# feedback_form.receiver_email = receiver_email
feedback_form.user = User
feedback_form.save()
data = messages.add_message(request, messages.INFO, 'Thanks for sending a feedback.')
# render thank you message
return render(request, contact_template, {'message': data})
# if a GET (or any other method) we'll create a blank form
else:
form = FeedbackForm(user=User.user)
return render(request, contact_template, {'form': form})
| apache-2.0 |
ThinkOpen-Solutions/odoo | addons/account/project/__init__.py | 427 | 1100 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import project
import report
import wizard
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
dcroc16/skunk_works | google_appengine/lib/django-1.3/tests/regressiontests/admin_views/tests.py | 46 | 134255 | # coding: utf-8
import re
import datetime
import urlparse
from django.conf import settings
from django.core import mail
from django.core.exceptions import SuspiciousOperation
from django.core.files import temp as tempfile
from django.core.urlresolvers import reverse
# Register auth models with the admin.
from django.contrib.auth import REDIRECT_FIELD_NAME, admin
from django.contrib.auth.models import User, Permission, UNUSABLE_PASSWORD
from django.contrib.contenttypes.models import ContentType
from django.contrib.admin.models import LogEntry, DELETION
from django.contrib.admin.sites import LOGIN_FORM_KEY
from django.contrib.admin.util import quote
from django.contrib.admin.helpers import ACTION_CHECKBOX_NAME
from django.contrib.admin.views.main import IS_POPUP_VAR
from django.forms.util import ErrorList
import django.template.context
from django.test import TestCase
from django.utils import formats
from django.utils.cache import get_max_age
from django.utils.encoding import iri_to_uri
from django.utils.html import escape
from django.utils.http import urlencode
from django.utils.translation import activate, deactivate
from django.utils import unittest
# local test models
from models import (Article, BarAccount, CustomArticle, EmptyModel,
FooAccount, Gallery, ModelWithStringPrimaryKey,
Person, Persona, Picture, Podcast, Section, Subscriber, Vodcast,
Language, Collector, Widget, Grommet, DooHickey, FancyDoodad, Whatsit,
Category, Post, Plot, FunkyTag, Chapter, Book, Promo, WorkHour, Employee,
Question, Answer, Inquisition, Actor, FoodDelivery,
RowLevelChangePermissionModel, Paper, CoverLetter, Story, OtherStory)
class AdminViewBasicTest(TestCase):
fixtures = ['admin-views-users.xml', 'admin-views-colors.xml',
'admin-views-fabrics.xml', 'admin-views-books.xml']
# Store the bit of the URL where the admin is registered as a class
# variable. That way we can test a second AdminSite just by subclassing
# this test case and changing urlbit.
urlbit = 'admin'
def setUp(self):
self.old_USE_I18N = settings.USE_I18N
self.old_USE_L10N = settings.USE_L10N
self.old_LANGUAGE_CODE = settings.LANGUAGE_CODE
self.client.login(username='super', password='secret')
settings.USE_I18N = True
def tearDown(self):
settings.USE_I18N = self.old_USE_I18N
settings.USE_L10N = self.old_USE_L10N
settings.LANGUAGE_CODE = self.old_LANGUAGE_CODE
self.client.logout()
formats.reset_format_cache()
def testTrailingSlashRequired(self):
"""
If you leave off the trailing slash, app should redirect and add it.
"""
request = self.client.get('/test_admin/%s/admin_views/article/add' % self.urlbit)
self.assertRedirects(request,
'/test_admin/%s/admin_views/article/add/' % self.urlbit, status_code=301
)
def testBasicAddGet(self):
"""
A smoke test to ensure GET on the add_view works.
"""
response = self.client.get('/test_admin/%s/admin_views/section/add/' % self.urlbit)
self.assertEqual(response.status_code, 200)
def testAddWithGETArgs(self):
response = self.client.get('/test_admin/%s/admin_views/section/add/' % self.urlbit, {'name': 'My Section'})
self.assertEqual(response.status_code, 200)
self.assertTrue(
'value="My Section"' in response.content,
"Couldn't find an input with the right value in the response."
)
def testBasicEditGet(self):
"""
A smoke test to ensure GET on the change_view works.
"""
response = self.client.get('/test_admin/%s/admin_views/section/1/' % self.urlbit)
self.assertEqual(response.status_code, 200)
def testBasicEditGetStringPK(self):
"""
A smoke test to ensure GET on the change_view works (returns an HTTP
404 error, see #11191) when passing a string as the PK argument for a
model with an integer PK field.
"""
response = self.client.get('/test_admin/%s/admin_views/section/abc/' % self.urlbit)
self.assertEqual(response.status_code, 404)
def testBasicAddPost(self):
"""
A smoke test to ensure POST on add_view works.
"""
post_data = {
"name": u"Another Section",
# inline data
"article_set-TOTAL_FORMS": u"3",
"article_set-INITIAL_FORMS": u"0",
"article_set-MAX_NUM_FORMS": u"0",
}
response = self.client.post('/test_admin/%s/admin_views/section/add/' % self.urlbit, post_data)
self.assertEqual(response.status_code, 302) # redirect somewhere
def testPopupAddPost(self):
"""
Ensure http response from a popup is properly escaped.
"""
post_data = {
'_popup': u'1',
'title': u'title with a new\nline',
'content': u'some content',
'date_0': u'2010-09-10',
'date_1': u'14:55:39',
}
response = self.client.post('/test_admin/%s/admin_views/article/add/' % self.urlbit, post_data)
self.failUnlessEqual(response.status_code, 200)
self.assertContains(response, 'dismissAddAnotherPopup')
self.assertContains(response, 'title with a new\u000Aline')
# Post data for edit inline
inline_post_data = {
"name": u"Test section",
# inline data
"article_set-TOTAL_FORMS": u"6",
"article_set-INITIAL_FORMS": u"3",
"article_set-MAX_NUM_FORMS": u"0",
"article_set-0-id": u"1",
# there is no title in database, give one here or formset will fail.
"article_set-0-title": u"Norske bostaver æøå skaper problemer",
"article_set-0-content": u"<p>Middle content</p>",
"article_set-0-date_0": u"2008-03-18",
"article_set-0-date_1": u"11:54:58",
"article_set-0-section": u"1",
"article_set-1-id": u"2",
"article_set-1-title": u"Need a title.",
"article_set-1-content": u"<p>Oldest content</p>",
"article_set-1-date_0": u"2000-03-18",
"article_set-1-date_1": u"11:54:58",
"article_set-2-id": u"3",
"article_set-2-title": u"Need a title.",
"article_set-2-content": u"<p>Newest content</p>",
"article_set-2-date_0": u"2009-03-18",
"article_set-2-date_1": u"11:54:58",
"article_set-3-id": u"",
"article_set-3-title": u"",
"article_set-3-content": u"",
"article_set-3-date_0": u"",
"article_set-3-date_1": u"",
"article_set-4-id": u"",
"article_set-4-title": u"",
"article_set-4-content": u"",
"article_set-4-date_0": u"",
"article_set-4-date_1": u"",
"article_set-5-id": u"",
"article_set-5-title": u"",
"article_set-5-content": u"",
"article_set-5-date_0": u"",
"article_set-5-date_1": u"",
}
def testBasicEditPost(self):
"""
A smoke test to ensure POST on edit_view works.
"""
response = self.client.post('/test_admin/%s/admin_views/section/1/' % self.urlbit, self.inline_post_data)
self.assertEqual(response.status_code, 302) # redirect somewhere
def testEditSaveAs(self):
"""
Test "save as".
"""
post_data = self.inline_post_data.copy()
post_data.update({
'_saveasnew': u'Save+as+new',
"article_set-1-section": u"1",
"article_set-2-section": u"1",
"article_set-3-section": u"1",
"article_set-4-section": u"1",
"article_set-5-section": u"1",
})
response = self.client.post('/test_admin/%s/admin_views/section/1/' % self.urlbit, post_data)
self.assertEqual(response.status_code, 302) # redirect somewhere
def testChangeListSortingCallable(self):
"""
Ensure we can sort on a list_display field that is a callable
(column 2 is callable_year in ArticleAdmin)
"""
response = self.client.get('/test_admin/%s/admin_views/article/' % self.urlbit, {'ot': 'asc', 'o': 2})
self.assertEqual(response.status_code, 200)
self.assertTrue(
response.content.index('Oldest content') < response.content.index('Middle content') and
response.content.index('Middle content') < response.content.index('Newest content'),
"Results of sorting on callable are out of order."
)
def testChangeListSortingModel(self):
"""
Ensure we can sort on a list_display field that is a Model method
(colunn 3 is 'model_year' in ArticleAdmin)
"""
response = self.client.get('/test_admin/%s/admin_views/article/' % self.urlbit, {'ot': 'dsc', 'o': 3})
self.assertEqual(response.status_code, 200)
self.assertTrue(
response.content.index('Newest content') < response.content.index('Middle content') and
response.content.index('Middle content') < response.content.index('Oldest content'),
"Results of sorting on Model method are out of order."
)
def testChangeListSortingModelAdmin(self):
"""
Ensure we can sort on a list_display field that is a ModelAdmin method
(colunn 4 is 'modeladmin_year' in ArticleAdmin)
"""
response = self.client.get('/test_admin/%s/admin_views/article/' % self.urlbit, {'ot': 'asc', 'o': 4})
self.assertEqual(response.status_code, 200)
self.assertTrue(
response.content.index('Oldest content') < response.content.index('Middle content') and
response.content.index('Middle content') < response.content.index('Newest content'),
"Results of sorting on ModelAdmin method are out of order."
)
def testLimitedFilter(self):
"""Ensure admin changelist filters do not contain objects excluded via limit_choices_to.
This also tests relation-spanning filters (e.g. 'color__value').
"""
response = self.client.get('/test_admin/%s/admin_views/thing/' % self.urlbit)
self.assertEqual(response.status_code, 200)
self.assertTrue(
'<div id="changelist-filter">' in response.content,
"Expected filter not found in changelist view."
)
self.assertFalse(
'<a href="?color__id__exact=3">Blue</a>' in response.content,
"Changelist filter not correctly limited by limit_choices_to."
)
def testRelationSpanningFilters(self):
response = self.client.get('/test_admin/%s/admin_views/chapterxtra1/' %
self.urlbit)
self.assertEqual(response.status_code, 200)
self.assertContains(response, '<div id="changelist-filter">')
filters = {
'chap__id__exact': dict(
values=[c.id for c in Chapter.objects.all()],
test=lambda obj, value: obj.chap.id == value),
'chap__title': dict(
values=[c.title for c in Chapter.objects.all()],
test=lambda obj, value: obj.chap.title == value),
'chap__book__id__exact': dict(
values=[b.id for b in Book.objects.all()],
test=lambda obj, value: obj.chap.book.id == value),
'chap__book__name': dict(
values=[b.name for b in Book.objects.all()],
test=lambda obj, value: obj.chap.book.name == value),
'chap__book__promo__id__exact': dict(
values=[p.id for p in Promo.objects.all()],
test=lambda obj, value:
obj.chap.book.promo_set.filter(id=value).exists()),
'chap__book__promo__name': dict(
values=[p.name for p in Promo.objects.all()],
test=lambda obj, value:
obj.chap.book.promo_set.filter(name=value).exists()),
}
for filter_path, params in filters.items():
for value in params['values']:
query_string = urlencode({filter_path: value})
# ensure filter link exists
self.assertContains(response, '<a href="?%s">' % query_string)
# ensure link works
filtered_response = self.client.get(
'/test_admin/%s/admin_views/chapterxtra1/?%s' % (
self.urlbit, query_string))
self.assertEqual(filtered_response.status_code, 200)
# ensure changelist contains only valid objects
for obj in filtered_response.context['cl'].query_set.all():
self.assertTrue(params['test'](obj, value))
def testIncorrectLookupParameters(self):
"""Ensure incorrect lookup parameters are handled gracefully."""
response = self.client.get('/test_admin/%s/admin_views/thing/' % self.urlbit, {'notarealfield': '5'})
self.assertRedirects(response, '/test_admin/%s/admin_views/thing/?e=1' % self.urlbit)
response = self.client.get('/test_admin/%s/admin_views/thing/' % self.urlbit, {'color__id__exact': 'StringNotInteger!'})
self.assertRedirects(response, '/test_admin/%s/admin_views/thing/?e=1' % self.urlbit)
def testIsNullLookups(self):
"""Ensure is_null is handled correctly."""
Article.objects.create(title="I Could Go Anywhere", content="Versatile", date=datetime.datetime.now())
response = self.client.get('/test_admin/%s/admin_views/article/' % self.urlbit)
self.assertTrue('4 articles' in response.content, '"4 articles" missing from response')
response = self.client.get('/test_admin/%s/admin_views/article/' % self.urlbit, {'section__isnull': 'false'})
self.assertTrue('3 articles' in response.content, '"3 articles" missing from response')
response = self.client.get('/test_admin/%s/admin_views/article/' % self.urlbit, {'section__isnull': 'true'})
self.assertTrue('1 article' in response.content, '"1 article" missing from response')
def testLogoutAndPasswordChangeURLs(self):
response = self.client.get('/test_admin/%s/admin_views/article/' % self.urlbit)
self.assertFalse('<a href="/test_admin/%s/logout/">' % self.urlbit not in response.content)
self.assertFalse('<a href="/test_admin/%s/password_change/">' % self.urlbit not in response.content)
def testNamedGroupFieldChoicesChangeList(self):
"""
Ensures the admin changelist shows correct values in the relevant column
for rows corresponding to instances of a model in which a named group
has been used in the choices option of a field.
"""
response = self.client.get('/test_admin/%s/admin_views/fabric/' % self.urlbit)
self.assertEqual(response.status_code, 200)
self.assertTrue(
'<a href="1/">Horizontal</a>' in response.content and
'<a href="2/">Vertical</a>' in response.content,
"Changelist table isn't showing the right human-readable values set by a model field 'choices' option named group."
)
def testNamedGroupFieldChoicesFilter(self):
"""
Ensures the filter UI shows correctly when at least one named group has
been used in the choices option of a model field.
"""
response = self.client.get('/test_admin/%s/admin_views/fabric/' % self.urlbit)
self.assertEqual(response.status_code, 200)
self.assertTrue(
'<div id="changelist-filter">' in response.content,
"Expected filter not found in changelist view."
)
self.assertTrue(
'<a href="?surface__exact=x">Horizontal</a>' in response.content and
'<a href="?surface__exact=y">Vertical</a>' in response.content,
"Changelist filter isn't showing options contained inside a model field 'choices' option named group."
)
def testChangeListNullBooleanDisplay(self):
Post.objects.create(public=None)
# This hard-codes the URl because it'll fail if it runs
# against the 'admin2' custom admin (which doesn't have the
# Post model).
response = self.client.get("/test_admin/admin/admin_views/post/")
self.assertTrue('icon-unknown.gif' in response.content)
def testI18NLanguageNonEnglishDefault(self):
"""
Check if the Javascript i18n view returns an empty language catalog
if the default language is non-English but the selected language
is English. See #13388 and #3594 for more details.
"""
try:
settings.LANGUAGE_CODE = 'fr'
activate('en-us')
response = self.client.get('/test_admin/admin/jsi18n/')
self.assertNotContains(response, 'Choisir une heure')
finally:
deactivate()
def testI18NLanguageNonEnglishFallback(self):
"""
Makes sure that the fallback language is still working properly
in cases where the selected language cannot be found.
"""
try:
settings.LANGUAGE_CODE = 'fr'
activate('none')
response = self.client.get('/test_admin/admin/jsi18n/')
self.assertContains(response, 'Choisir une heure')
finally:
deactivate()
def testL10NDeactivated(self):
"""
Check if L10N is deactivated, the Javascript i18n view doesn't
return localized date/time formats. Refs #14824.
"""
try:
settings.LANGUAGE_CODE = 'ru'
settings.USE_L10N = False
activate('ru')
response = self.client.get('/test_admin/admin/jsi18n/')
self.assertNotContains(response, '%d.%m.%Y %H:%M:%S')
self.assertContains(response, '%Y-%m-%d %H:%M:%S')
finally:
deactivate()
def test_disallowed_filtering(self):
self.assertRaises(SuspiciousOperation,
self.client.get, "/test_admin/admin/admin_views/album/?owner__email__startswith=fuzzy"
)
try:
self.client.get("/test_admin/admin/admin_views/thing/?color__value__startswith=red")
self.client.get("/test_admin/admin/admin_views/thing/?color__value=red")
except SuspiciousOperation:
self.fail("Filters are allowed if explicitly included in list_filter")
try:
self.client.get("/test_admin/admin/admin_views/person/?age__gt=30")
except SuspiciousOperation:
self.fail("Filters should be allowed if they involve a local field without the need to whitelist them in list_filter or date_hierarchy.")
e1 = Employee.objects.create(name='Anonymous', gender=1, age=22, alive=True, code='123')
e2 = Employee.objects.create(name='Visitor', gender=2, age=19, alive=True, code='124')
WorkHour.objects.create(datum=datetime.datetime.now(), employee=e1)
WorkHour.objects.create(datum=datetime.datetime.now(), employee=e2)
response = self.client.get("/test_admin/admin/admin_views/workhour/")
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'employee__person_ptr__exact')
response = self.client.get("/test_admin/admin/admin_views/workhour/?employee__person_ptr__exact=%d" % e1.pk)
self.assertEqual(response.status_code, 200)
def test_allowed_filtering_15103(self):
"""
Regressions test for ticket 15103 - filtering on fields defined in a
ForeignKey 'limit_choices_to' should be allowed, otherwise raw_id_fields
can break.
"""
try:
self.client.get("/test_admin/admin/admin_views/inquisition/?leader__name=Palin&leader__age=27")
except SuspiciousOperation:
self.fail("Filters should be allowed if they are defined on a ForeignKey pointing to this model")
class AdminJavaScriptTest(AdminViewBasicTest):
def testSingleWidgetFirsFieldFocus(self):
"""
JavaScript-assisted auto-focus on first field.
"""
response = self.client.get('/test_admin/%s/admin_views/picture/add/' % self.urlbit)
self.assertContains(
response,
'<script type="text/javascript">document.getElementById("id_name").focus();</script>'
)
def testMultiWidgetFirsFieldFocus(self):
"""
JavaScript-assisted auto-focus should work if a model/ModelAdmin setup
is such that the first form field has a MultiWidget.
"""
response = self.client.get('/test_admin/%s/admin_views/reservation/add/' % self.urlbit)
self.assertContains(
response,
'<script type="text/javascript">document.getElementById("id_start_date_0").focus();</script>'
)
class SaveAsTests(TestCase):
fixtures = ['admin-views-users.xml','admin-views-person.xml']
def setUp(self):
self.client.login(username='super', password='secret')
def tearDown(self):
self.client.logout()
def test_save_as_duplication(self):
"""Ensure save as actually creates a new person"""
post_data = {'_saveasnew':'', 'name':'John M', 'gender':1, 'age': 42}
response = self.client.post('/test_admin/admin/admin_views/person/1/', post_data)
self.assertEqual(len(Person.objects.filter(name='John M')), 1)
self.assertEqual(len(Person.objects.filter(id=1)), 1)
def test_save_as_display(self):
"""
Ensure that 'save as' is displayed when activated and after submitting
invalid data aside save_as_new will not show us a form to overwrite the
initial model.
"""
response = self.client.get('/test_admin/admin/admin_views/person/1/')
self.assertTrue(response.context['save_as'])
post_data = {'_saveasnew':'', 'name':'John M', 'gender':3, 'alive':'checked'}
response = self.client.post('/test_admin/admin/admin_views/person/1/', post_data)
self.assertEqual(response.context['form_url'], '../add/')
class CustomModelAdminTest(AdminViewBasicTest):
urlbit = "admin2"
def testCustomAdminSiteLoginForm(self):
self.client.logout()
request = self.client.get('/test_admin/admin2/')
self.assertEqual(request.status_code, 200)
login = self.client.post('/test_admin/admin2/', {
REDIRECT_FIELD_NAME: '/test_admin/admin2/',
LOGIN_FORM_KEY: 1,
'username': 'customform',
'password': 'secret',
})
self.assertEqual(login.status_code, 200)
self.assertContains(login, 'custom form error')
def testCustomAdminSiteLoginTemplate(self):
self.client.logout()
request = self.client.get('/test_admin/admin2/')
self.assertTemplateUsed(request, 'custom_admin/login.html')
self.assertTrue('Hello from a custom login template' in request.content)
def testCustomAdminSiteLogoutTemplate(self):
request = self.client.get('/test_admin/admin2/logout/')
self.assertTemplateUsed(request, 'custom_admin/logout.html')
self.assertTrue('Hello from a custom logout template' in request.content)
def testCustomAdminSiteIndexViewAndTemplate(self):
request = self.client.get('/test_admin/admin2/')
self.assertTemplateUsed(request, 'custom_admin/index.html')
self.assertTrue('Hello from a custom index template *bar*' in request.content)
def testCustomAdminSitePasswordChangeTemplate(self):
request = self.client.get('/test_admin/admin2/password_change/')
self.assertTemplateUsed(request, 'custom_admin/password_change_form.html')
self.assertTrue('Hello from a custom password change form template' in request.content)
def testCustomAdminSitePasswordChangeDoneTemplate(self):
request = self.client.get('/test_admin/admin2/password_change/done/')
self.assertTemplateUsed(request, 'custom_admin/password_change_done.html')
self.assertTrue('Hello from a custom password change done template' in request.content)
def testCustomAdminSiteView(self):
self.client.login(username='super', password='secret')
response = self.client.get('/test_admin/%s/my_view/' % self.urlbit)
self.assertTrue(response.content == "Django is a magical pony!", response.content)
def get_perm(Model, perm):
"""Return the permission object, for the Model"""
ct = ContentType.objects.get_for_model(Model)
return Permission.objects.get(content_type=ct, codename=perm)
class AdminViewPermissionsTest(TestCase):
"""Tests for Admin Views Permissions."""
fixtures = ['admin-views-users.xml']
def setUp(self):
"""Test setup."""
# Setup permissions, for our users who can add, change, and delete.
# We can't put this into the fixture, because the content type id
# and the permission id could be different on each run of the test.
opts = Article._meta
# User who can add Articles
add_user = User.objects.get(username='adduser')
add_user.user_permissions.add(get_perm(Article,
opts.get_add_permission()))
# User who can change Articles
change_user = User.objects.get(username='changeuser')
change_user.user_permissions.add(get_perm(Article,
opts.get_change_permission()))
# User who can delete Articles
delete_user = User.objects.get(username='deleteuser')
delete_user.user_permissions.add(get_perm(Article,
opts.get_delete_permission()))
delete_user.user_permissions.add(get_perm(Section,
Section._meta.get_delete_permission()))
# login POST dicts
self.super_login = {
REDIRECT_FIELD_NAME: '/test_admin/admin/',
LOGIN_FORM_KEY: 1,
'username': 'super',
'password': 'secret',
}
self.super_email_login = {
REDIRECT_FIELD_NAME: '/test_admin/admin/',
LOGIN_FORM_KEY: 1,
'username': 'super@example.com',
'password': 'secret',
}
self.super_email_bad_login = {
REDIRECT_FIELD_NAME: '/test_admin/admin/',
LOGIN_FORM_KEY: 1,
'username': 'super@example.com',
'password': 'notsecret',
}
self.adduser_login = {
REDIRECT_FIELD_NAME: '/test_admin/admin/',
LOGIN_FORM_KEY: 1,
'username': 'adduser',
'password': 'secret',
}
self.changeuser_login = {
REDIRECT_FIELD_NAME: '/test_admin/admin/',
LOGIN_FORM_KEY: 1,
'username': 'changeuser',
'password': 'secret',
}
self.deleteuser_login = {
REDIRECT_FIELD_NAME: '/test_admin/admin/',
LOGIN_FORM_KEY: 1,
'username': 'deleteuser',
'password': 'secret',
}
self.joepublic_login = {
REDIRECT_FIELD_NAME: '/test_admin/admin/',
LOGIN_FORM_KEY: 1,
'username': 'joepublic',
'password': 'secret',
}
self.no_username_login = {
REDIRECT_FIELD_NAME: '/test_admin/admin/',
LOGIN_FORM_KEY: 1,
'password': 'secret',
}
def testLogin(self):
"""
Make sure only staff members can log in.
Successful posts to the login page will redirect to the orignal url.
Unsuccessfull attempts will continue to render the login page with
a 200 status code.
"""
# Super User
request = self.client.get('/test_admin/admin/')
self.assertEqual(request.status_code, 200)
login = self.client.post('/test_admin/admin/', self.super_login)
self.assertRedirects(login, '/test_admin/admin/')
self.assertFalse(login.context)
self.client.get('/test_admin/admin/logout/')
# Test if user enters e-mail address
request = self.client.get('/test_admin/admin/')
self.assertEqual(request.status_code, 200)
login = self.client.post('/test_admin/admin/', self.super_email_login)
self.assertContains(login, "Your e-mail address is not your username")
# only correct passwords get a username hint
login = self.client.post('/test_admin/admin/', self.super_email_bad_login)
self.assertContains(login, "Please enter a correct username and password.")
new_user = User(username='jondoe', password='secret', email='super@example.com')
new_user.save()
# check to ensure if there are multiple e-mail addresses a user doesn't get a 500
login = self.client.post('/test_admin/admin/', self.super_email_login)
self.assertContains(login, "Please enter a correct username and password.")
# Add User
request = self.client.get('/test_admin/admin/')
self.assertEqual(request.status_code, 200)
login = self.client.post('/test_admin/admin/', self.adduser_login)
self.assertRedirects(login, '/test_admin/admin/')
self.assertFalse(login.context)
self.client.get('/test_admin/admin/logout/')
# Change User
request = self.client.get('/test_admin/admin/')
self.assertEqual(request.status_code, 200)
login = self.client.post('/test_admin/admin/', self.changeuser_login)
self.assertRedirects(login, '/test_admin/admin/')
self.assertFalse(login.context)
self.client.get('/test_admin/admin/logout/')
# Delete User
request = self.client.get('/test_admin/admin/')
self.assertEqual(request.status_code, 200)
login = self.client.post('/test_admin/admin/', self.deleteuser_login)
self.assertRedirects(login, '/test_admin/admin/')
self.assertFalse(login.context)
self.client.get('/test_admin/admin/logout/')
# Regular User should not be able to login.
request = self.client.get('/test_admin/admin/')
self.assertEqual(request.status_code, 200)
login = self.client.post('/test_admin/admin/', self.joepublic_login)
self.assertEqual(login.status_code, 200)
self.assertContains(login, "Please enter a correct username and password.")
# Requests without username should not return 500 errors.
request = self.client.get('/test_admin/admin/')
self.assertEqual(request.status_code, 200)
login = self.client.post('/test_admin/admin/', self.no_username_login)
self.assertEqual(login.status_code, 200)
form = login.context[0].get('form')
self.assertEqual(form.errors['username'][0], 'This field is required.')
def testLoginSuccessfullyRedirectsToOriginalUrl(self):
request = self.client.get('/test_admin/admin/')
self.assertEqual(request.status_code, 200)
query_string = 'the-answer=42'
redirect_url = '/test_admin/admin/?%s' % query_string
new_next = {REDIRECT_FIELD_NAME: redirect_url}
login = self.client.post('/test_admin/admin/', dict(self.super_login, **new_next), QUERY_STRING=query_string)
self.assertRedirects(login, redirect_url)
def testAddView(self):
"""Test add view restricts access and actually adds items."""
add_dict = {'title' : 'Døm ikke',
'content': '<p>great article</p>',
'date_0': '2008-03-18', 'date_1': '10:54:39',
'section': 1}
# Change User should not have access to add articles
self.client.get('/test_admin/admin/')
self.client.post('/test_admin/admin/', self.changeuser_login)
# make sure the view removes test cookie
self.assertEqual(self.client.session.test_cookie_worked(), False)
request = self.client.get('/test_admin/admin/admin_views/article/add/')
self.assertEqual(request.status_code, 403)
# Try POST just to make sure
post = self.client.post('/test_admin/admin/admin_views/article/add/', add_dict)
self.assertEqual(post.status_code, 403)
self.assertEqual(Article.objects.all().count(), 3)
self.client.get('/test_admin/admin/logout/')
# Add user may login and POST to add view, then redirect to admin root
self.client.get('/test_admin/admin/')
self.client.post('/test_admin/admin/', self.adduser_login)
addpage = self.client.get('/test_admin/admin/admin_views/article/add/')
self.assertEqual(addpage.status_code, 200)
change_list_link = '<a href="../">Articles</a> ›'
self.assertFalse(change_list_link in addpage.content,
'User restricted to add permission is given link to change list view in breadcrumbs.')
post = self.client.post('/test_admin/admin/admin_views/article/add/', add_dict)
self.assertRedirects(post, '/test_admin/admin/')
self.assertEqual(Article.objects.all().count(), 4)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, 'Greetings from a created object')
self.client.get('/test_admin/admin/logout/')
# Super can add too, but is redirected to the change list view
self.client.get('/test_admin/admin/')
self.client.post('/test_admin/admin/', self.super_login)
addpage = self.client.get('/test_admin/admin/admin_views/article/add/')
self.assertEqual(addpage.status_code, 200)
self.assertFalse(change_list_link not in addpage.content,
'Unrestricted user is not given link to change list view in breadcrumbs.')
post = self.client.post('/test_admin/admin/admin_views/article/add/', add_dict)
self.assertRedirects(post, '/test_admin/admin/admin_views/article/')
self.assertEqual(Article.objects.all().count(), 5)
self.client.get('/test_admin/admin/logout/')
# 8509 - if a normal user is already logged in, it is possible
# to change user into the superuser without error
login = self.client.login(username='joepublic', password='secret')
# Check and make sure that if user expires, data still persists
self.client.get('/test_admin/admin/')
self.client.post('/test_admin/admin/', self.super_login)
# make sure the view removes test cookie
self.assertEqual(self.client.session.test_cookie_worked(), False)
def testChangeView(self):
"""Change view should restrict access and allow users to edit items."""
change_dict = {'title' : 'Ikke fordømt',
'content': '<p>edited article</p>',
'date_0': '2008-03-18', 'date_1': '10:54:39',
'section': 1}
# add user shoud not be able to view the list of article or change any of them
self.client.get('/test_admin/admin/')
self.client.post('/test_admin/admin/', self.adduser_login)
request = self.client.get('/test_admin/admin/admin_views/article/')
self.assertEqual(request.status_code, 403)
request = self.client.get('/test_admin/admin/admin_views/article/1/')
self.assertEqual(request.status_code, 403)
post = self.client.post('/test_admin/admin/admin_views/article/1/', change_dict)
self.assertEqual(post.status_code, 403)
self.client.get('/test_admin/admin/logout/')
# change user can view all items and edit them
self.client.get('/test_admin/admin/')
self.client.post('/test_admin/admin/', self.changeuser_login)
request = self.client.get('/test_admin/admin/admin_views/article/')
self.assertEqual(request.status_code, 200)
request = self.client.get('/test_admin/admin/admin_views/article/1/')
self.assertEqual(request.status_code, 200)
post = self.client.post('/test_admin/admin/admin_views/article/1/', change_dict)
self.assertRedirects(post, '/test_admin/admin/admin_views/article/')
self.assertEqual(Article.objects.get(pk=1).content, '<p>edited article</p>')
# one error in form should produce singular error message, multiple errors plural
change_dict['title'] = ''
post = self.client.post('/test_admin/admin/admin_views/article/1/', change_dict)
self.assertEqual(request.status_code, 200)
self.assertTrue('Please correct the error below.' in post.content,
'Singular error message not found in response to post with one error.')
change_dict['content'] = ''
post = self.client.post('/test_admin/admin/admin_views/article/1/', change_dict)
self.assertEqual(request.status_code, 200)
self.assertTrue('Please correct the errors below.' in post.content,
'Plural error message not found in response to post with multiple errors.')
self.client.get('/test_admin/admin/logout/')
# Test redirection when using row-level change permissions. Refs #11513.
RowLevelChangePermissionModel.objects.create(id=1, name="odd id")
RowLevelChangePermissionModel.objects.create(id=2, name="even id")
for login_dict in [self.super_login, self.changeuser_login, self.adduser_login, self.deleteuser_login]:
self.client.post('/test_admin/admin/', login_dict)
request = self.client.get('/test_admin/admin/admin_views/rowlevelchangepermissionmodel/1/')
self.assertEqual(request.status_code, 403)
request = self.client.post('/test_admin/admin/admin_views/rowlevelchangepermissionmodel/1/', {'name': 'changed'})
self.assertEqual(RowLevelChangePermissionModel.objects.get(id=1).name, 'odd id')
self.assertEqual(request.status_code, 403)
request = self.client.get('/test_admin/admin/admin_views/rowlevelchangepermissionmodel/2/')
self.assertEqual(request.status_code, 200)
request = self.client.post('/test_admin/admin/admin_views/rowlevelchangepermissionmodel/2/', {'name': 'changed'})
self.assertEqual(RowLevelChangePermissionModel.objects.get(id=2).name, 'changed')
self.assertRedirects(request, '/test_admin/admin/')
self.client.get('/test_admin/admin/logout/')
for login_dict in [self.joepublic_login, self.no_username_login]:
self.client.post('/test_admin/admin/', login_dict)
request = self.client.get('/test_admin/admin/admin_views/rowlevelchangepermissionmodel/1/')
self.assertEqual(request.status_code, 200)
self.assertContains(request, 'login-form')
request = self.client.post('/test_admin/admin/admin_views/rowlevelchangepermissionmodel/1/', {'name': 'changed'})
self.assertEqual(RowLevelChangePermissionModel.objects.get(id=1).name, 'odd id')
self.assertEqual(request.status_code, 200)
self.assertContains(request, 'login-form')
request = self.client.get('/test_admin/admin/admin_views/rowlevelchangepermissionmodel/2/')
self.assertEqual(request.status_code, 200)
self.assertContains(request, 'login-form')
request = self.client.post('/test_admin/admin/admin_views/rowlevelchangepermissionmodel/2/', {'name': 'changed again'})
self.assertEqual(RowLevelChangePermissionModel.objects.get(id=2).name, 'changed')
self.assertEqual(request.status_code, 200)
self.assertContains(request, 'login-form')
self.client.get('/test_admin/admin/logout/')
def testConditionallyShowAddSectionLink(self):
"""
The foreign key widget should only show the "add related" button if the
user has permission to add that related item.
"""
# Set up and log in user.
url = '/test_admin/admin/admin_views/article/add/'
add_link_text = ' class="add-another"'
self.client.get('/test_admin/admin/')
self.client.post('/test_admin/admin/', self.adduser_login)
# The add user can't add sections yet, so they shouldn't see the "add
# section" link.
response = self.client.get(url)
self.assertNotContains(response, add_link_text)
# Allow the add user to add sections too. Now they can see the "add
# section" link.
add_user = User.objects.get(username='adduser')
perm = get_perm(Section, Section._meta.get_add_permission())
add_user.user_permissions.add(perm)
response = self.client.get(url)
self.assertContains(response, add_link_text)
def testCustomModelAdminTemplates(self):
self.client.get('/test_admin/admin/')
self.client.post('/test_admin/admin/', self.super_login)
# Test custom change list template with custom extra context
request = self.client.get('/test_admin/admin/admin_views/customarticle/')
self.assertEqual(request.status_code, 200)
self.assertTrue("var hello = 'Hello!';" in request.content)
self.assertTemplateUsed(request, 'custom_admin/change_list.html')
# Test custom add form template
request = self.client.get('/test_admin/admin/admin_views/customarticle/add/')
self.assertTemplateUsed(request, 'custom_admin/add_form.html')
# Add an article so we can test delete, change, and history views
post = self.client.post('/test_admin/admin/admin_views/customarticle/add/', {
'content': '<p>great article</p>',
'date_0': '2008-03-18',
'date_1': '10:54:39'
})
self.assertRedirects(post, '/test_admin/admin/admin_views/customarticle/')
self.assertEqual(CustomArticle.objects.all().count(), 1)
article_pk = CustomArticle.objects.all()[0].pk
# Test custom delete, change, and object history templates
# Test custom change form template
request = self.client.get('/test_admin/admin/admin_views/customarticle/%d/' % article_pk)
self.assertTemplateUsed(request, 'custom_admin/change_form.html')
request = self.client.get('/test_admin/admin/admin_views/customarticle/%d/delete/' % article_pk)
self.assertTemplateUsed(request, 'custom_admin/delete_confirmation.html')
request = self.client.post('/test_admin/admin/admin_views/customarticle/', data={
'index': 0,
'action': ['delete_selected'],
'_selected_action': ['1'],
})
self.assertTemplateUsed(request, 'custom_admin/delete_selected_confirmation.html')
request = self.client.get('/test_admin/admin/admin_views/customarticle/%d/history/' % article_pk)
self.assertTemplateUsed(request, 'custom_admin/object_history.html')
self.client.get('/test_admin/admin/logout/')
def testDeleteView(self):
"""Delete view should restrict access and actually delete items."""
delete_dict = {'post': 'yes'}
# add user shoud not be able to delete articles
self.client.get('/test_admin/admin/')
self.client.post('/test_admin/admin/', self.adduser_login)
request = self.client.get('/test_admin/admin/admin_views/article/1/delete/')
self.assertEqual(request.status_code, 403)
post = self.client.post('/test_admin/admin/admin_views/article/1/delete/', delete_dict)
self.assertEqual(post.status_code, 403)
self.assertEqual(Article.objects.all().count(), 3)
self.client.get('/test_admin/admin/logout/')
# Delete user can delete
self.client.get('/test_admin/admin/')
self.client.post('/test_admin/admin/', self.deleteuser_login)
response = self.client.get('/test_admin/admin/admin_views/section/1/delete/')
# test response contains link to related Article
self.assertContains(response, "admin_views/article/1/")
response = self.client.get('/test_admin/admin/admin_views/article/1/delete/')
self.assertEqual(response.status_code, 200)
post = self.client.post('/test_admin/admin/admin_views/article/1/delete/', delete_dict)
self.assertRedirects(post, '/test_admin/admin/')
self.assertEqual(Article.objects.all().count(), 2)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, 'Greetings from a deleted object')
article_ct = ContentType.objects.get_for_model(Article)
logged = LogEntry.objects.get(content_type=article_ct, action_flag=DELETION)
self.assertEqual(logged.object_id, u'1')
self.client.get('/test_admin/admin/logout/')
def testDisabledPermissionsWhenLoggedIn(self):
self.client.login(username='super', password='secret')
superuser = User.objects.get(username='super')
superuser.is_active = False
superuser.save()
response = self.client.get('/test_admin/admin/')
self.assertContains(response, 'id="login-form"')
self.assertNotContains(response, 'Log out')
response = self.client.get('/test_admin/admin/secure-view/')
self.assertContains(response, 'id="login-form"')
class AdminViewDeletedObjectsTest(TestCase):
fixtures = ['admin-views-users.xml', 'deleted-objects.xml']
def setUp(self):
self.client.login(username='super', password='secret')
def tearDown(self):
self.client.logout()
def test_nesting(self):
"""
Objects should be nested to display the relationships that
cause them to be scheduled for deletion.
"""
pattern = re.compile(r"""<li>Plot: <a href=".+/admin_views/plot/1/">World Domination</a>\s*<ul>\s*<li>Plot details: <a href=".+/admin_views/plotdetails/1/">almost finished</a>""")
response = self.client.get('/test_admin/admin/admin_views/villain/%s/delete/' % quote(1))
self.assertTrue(pattern.search(response.content))
def test_cyclic(self):
"""
Cyclic relationships should still cause each object to only be
listed once.
"""
one = """<li>Cyclic one: <a href="/test_admin/admin/admin_views/cyclicone/1/">I am recursive</a>"""
two = """<li>Cyclic two: <a href="/test_admin/admin/admin_views/cyclictwo/1/">I am recursive too</a>"""
response = self.client.get('/test_admin/admin/admin_views/cyclicone/%s/delete/' % quote(1))
self.assertContains(response, one, 1)
self.assertContains(response, two, 1)
def test_perms_needed(self):
self.client.logout()
delete_user = User.objects.get(username='deleteuser')
delete_user.user_permissions.add(get_perm(Plot,
Plot._meta.get_delete_permission()))
self.assertTrue(self.client.login(username='deleteuser',
password='secret'))
response = self.client.get('/test_admin/admin/admin_views/plot/%s/delete/' % quote(1))
self.assertContains(response, "your account doesn't have permission to delete the following types of objects")
self.assertContains(response, "<li>plot details</li>")
def test_protected(self):
q = Question.objects.create(question="Why?")
a1 = Answer.objects.create(question=q, answer="Because.")
a2 = Answer.objects.create(question=q, answer="Yes.")
response = self.client.get("/test_admin/admin/admin_views/question/%s/delete/" % quote(q.pk))
self.assertContains(response, "would require deleting the following protected related objects")
self.assertContains(response, '<li>Answer: <a href="/test_admin/admin/admin_views/answer/%s/">Because.</a></li>' % a1.pk)
self.assertContains(response, '<li>Answer: <a href="/test_admin/admin/admin_views/answer/%s/">Yes.</a></li>' % a2.pk)
def test_not_registered(self):
should_contain = """<li>Secret hideout: underground bunker"""
response = self.client.get('/test_admin/admin/admin_views/villain/%s/delete/' % quote(1))
self.assertContains(response, should_contain, 1)
def test_multiple_fkeys_to_same_model(self):
"""
If a deleted object has two relationships from another model,
both of those should be followed in looking for related
objects to delete.
"""
should_contain = """<li>Plot: <a href="/test_admin/admin/admin_views/plot/1/">World Domination</a>"""
response = self.client.get('/test_admin/admin/admin_views/villain/%s/delete/' % quote(1))
self.assertContains(response, should_contain)
response = self.client.get('/test_admin/admin/admin_views/villain/%s/delete/' % quote(2))
self.assertContains(response, should_contain)
def test_multiple_fkeys_to_same_instance(self):
"""
If a deleted object has two relationships pointing to it from
another object, the other object should still only be listed
once.
"""
should_contain = """<li>Plot: <a href="/test_admin/admin/admin_views/plot/2/">World Peace</a></li>"""
response = self.client.get('/test_admin/admin/admin_views/villain/%s/delete/' % quote(2))
self.assertContains(response, should_contain, 1)
def test_inheritance(self):
"""
In the case of an inherited model, if either the child or
parent-model instance is deleted, both instances are listed
for deletion, as well as any relationships they have.
"""
should_contain = [
"""<li>Villain: <a href="/test_admin/admin/admin_views/villain/3/">Bob</a>""",
"""<li>Super villain: <a href="/test_admin/admin/admin_views/supervillain/3/">Bob</a>""",
"""<li>Secret hideout: floating castle""",
"""<li>Super secret hideout: super floating castle!"""
]
response = self.client.get('/test_admin/admin/admin_views/villain/%s/delete/' % quote(3))
for should in should_contain:
self.assertContains(response, should, 1)
response = self.client.get('/test_admin/admin/admin_views/supervillain/%s/delete/' % quote(3))
for should in should_contain:
self.assertContains(response, should, 1)
def test_generic_relations(self):
"""
If a deleted object has GenericForeignKeys pointing to it,
those objects should be listed for deletion.
"""
plot = Plot.objects.get(pk=3)
tag = FunkyTag.objects.create(content_object=plot, name='hott')
should_contain = """<li>Funky tag: hott"""
response = self.client.get('/test_admin/admin/admin_views/plot/%s/delete/' % quote(3))
self.assertContains(response, should_contain)
class AdminViewStringPrimaryKeyTest(TestCase):
fixtures = ['admin-views-users.xml', 'string-primary-key.xml']
def __init__(self, *args):
super(AdminViewStringPrimaryKeyTest, self).__init__(*args)
self.pk = """abcdefghijklmnopqrstuvwxyz ABCDEFGHIJKLMNOPQRSTUVWXYZ 1234567890 -_.!~*'() ;/?:@&=+$, <>#%" {}|\^[]`"""
def setUp(self):
self.client.login(username='super', password='secret')
content_type_pk = ContentType.objects.get_for_model(ModelWithStringPrimaryKey).pk
LogEntry.objects.log_action(100, content_type_pk, self.pk, self.pk, 2, change_message='')
def tearDown(self):
self.client.logout()
def test_get_history_view(self):
"Retrieving the history for the object using urlencoded form of primary key should work"
response = self.client.get('/test_admin/admin/admin_views/modelwithstringprimarykey/%s/history/' % quote(self.pk))
self.assertContains(response, escape(self.pk))
self.assertEqual(response.status_code, 200)
def test_get_change_view(self):
"Retrieving the object using urlencoded form of primary key should work"
response = self.client.get('/test_admin/admin/admin_views/modelwithstringprimarykey/%s/' % quote(self.pk))
self.assertContains(response, escape(self.pk))
self.assertEqual(response.status_code, 200)
def test_changelist_to_changeform_link(self):
"The link from the changelist referring to the changeform of the object should be quoted"
response = self.client.get('/test_admin/admin/admin_views/modelwithstringprimarykey/')
should_contain = """<th><a href="%s/">%s</a></th></tr>""" % (quote(self.pk), escape(self.pk))
self.assertContains(response, should_contain)
def test_recentactions_link(self):
"The link from the recent actions list referring to the changeform of the object should be quoted"
response = self.client.get('/test_admin/admin/')
should_contain = """<a href="admin_views/modelwithstringprimarykey/%s/">%s</a>""" % (quote(self.pk), escape(self.pk))
self.assertContains(response, should_contain)
def test_recentactions_without_content_type(self):
"If a LogEntry is missing content_type it will not display it in span tag under the hyperlink."
response = self.client.get('/test_admin/admin/')
should_contain = """<a href="admin_views/modelwithstringprimarykey/%s/">%s</a>""" % (quote(self.pk), escape(self.pk))
self.assertContains(response, should_contain)
should_contain = "Model with string primary key" # capitalized in Recent Actions
self.assertContains(response, should_contain)
logentry = LogEntry.objects.get(content_type__name__iexact=should_contain)
# http://code.djangoproject.com/ticket/10275
# if the log entry doesn't have a content type it should still be
# possible to view the Recent Actions part
logentry.content_type = None
logentry.save()
counted_presence_before = response.content.count(should_contain)
response = self.client.get('/test_admin/admin/')
counted_presence_after = response.content.count(should_contain)
self.assertEqual(counted_presence_before - 1,
counted_presence_after)
def test_deleteconfirmation_link(self):
"The link from the delete confirmation page referring back to the changeform of the object should be quoted"
response = self.client.get('/test_admin/admin/admin_views/modelwithstringprimarykey/%s/delete/' % quote(self.pk))
# this URL now comes through reverse(), thus iri_to_uri encoding
should_contain = """/%s/">%s</a>""" % (iri_to_uri(quote(self.pk)), escape(self.pk))
self.assertContains(response, should_contain)
def test_url_conflicts_with_add(self):
"A model with a primary key that ends with add should be visible"
add_model = ModelWithStringPrimaryKey(id="i have something to add")
add_model.save()
response = self.client.get('/test_admin/admin/admin_views/modelwithstringprimarykey/%s/' % quote(add_model.pk))
should_contain = """<h1>Change model with string primary key</h1>"""
self.assertContains(response, should_contain)
def test_url_conflicts_with_delete(self):
"A model with a primary key that ends with delete should be visible"
delete_model = ModelWithStringPrimaryKey(id="delete")
delete_model.save()
response = self.client.get('/test_admin/admin/admin_views/modelwithstringprimarykey/%s/' % quote(delete_model.pk))
should_contain = """<h1>Change model with string primary key</h1>"""
self.assertContains(response, should_contain)
def test_url_conflicts_with_history(self):
"A model with a primary key that ends with history should be visible"
history_model = ModelWithStringPrimaryKey(id="history")
history_model.save()
response = self.client.get('/test_admin/admin/admin_views/modelwithstringprimarykey/%s/' % quote(history_model.pk))
should_contain = """<h1>Change model with string primary key</h1>"""
self.assertContains(response, should_contain)
class SecureViewTests(TestCase):
fixtures = ['admin-views-users.xml']
def setUp(self):
# login POST dicts
self.super_login = {
LOGIN_FORM_KEY: 1,
REDIRECT_FIELD_NAME: '/test_admin/admin/secure-view/',
'username': 'super',
'password': 'secret',
}
self.super_email_login = {
LOGIN_FORM_KEY: 1,
REDIRECT_FIELD_NAME: '/test_admin/admin/secure-view/',
'username': 'super@example.com',
'password': 'secret',
}
self.super_email_bad_login = {
LOGIN_FORM_KEY: 1,
REDIRECT_FIELD_NAME: '/test_admin/admin/secure-view/',
'username': 'super@example.com',
'password': 'notsecret',
}
self.adduser_login = {
LOGIN_FORM_KEY: 1,
REDIRECT_FIELD_NAME: '/test_admin/admin/secure-view/',
'username': 'adduser',
'password': 'secret',
}
self.changeuser_login = {
LOGIN_FORM_KEY: 1,
REDIRECT_FIELD_NAME: '/test_admin/admin/secure-view/',
'username': 'changeuser',
'password': 'secret',
}
self.deleteuser_login = {
LOGIN_FORM_KEY: 1,
REDIRECT_FIELD_NAME: '/test_admin/admin/secure-view/',
'username': 'deleteuser',
'password': 'secret',
}
self.joepublic_login = {
LOGIN_FORM_KEY: 1,
REDIRECT_FIELD_NAME: '/test_admin/admin/secure-view/',
'username': 'joepublic',
'password': 'secret',
}
def tearDown(self):
self.client.logout()
def test_secure_view_shows_login_if_not_logged_in(self):
"Ensure that we see the login form"
response = self.client.get('/test_admin/admin/secure-view/' )
self.assertTemplateUsed(response, 'admin/login.html')
def test_secure_view_login_successfully_redirects_to_original_url(self):
request = self.client.get('/test_admin/admin/secure-view/')
self.assertEqual(request.status_code, 200)
query_string = 'the-answer=42'
redirect_url = '/test_admin/admin/secure-view/?%s' % query_string
new_next = {REDIRECT_FIELD_NAME: redirect_url}
login = self.client.post('/test_admin/admin/secure-view/', dict(self.super_login, **new_next), QUERY_STRING=query_string)
self.assertRedirects(login, redirect_url)
def test_staff_member_required_decorator_works_as_per_admin_login(self):
"""
Make sure only staff members can log in.
Successful posts to the login page will redirect to the orignal url.
Unsuccessfull attempts will continue to render the login page with
a 200 status code.
"""
# Super User
request = self.client.get('/test_admin/admin/secure-view/')
self.assertEqual(request.status_code, 200)
login = self.client.post('/test_admin/admin/secure-view/', self.super_login)
self.assertRedirects(login, '/test_admin/admin/secure-view/')
self.assertFalse(login.context)
self.client.get('/test_admin/admin/logout/')
# make sure the view removes test cookie
self.assertEqual(self.client.session.test_cookie_worked(), False)
# Test if user enters e-mail address
request = self.client.get('/test_admin/admin/secure-view/')
self.assertEqual(request.status_code, 200)
login = self.client.post('/test_admin/admin/secure-view/', self.super_email_login)
self.assertContains(login, "Your e-mail address is not your username")
# only correct passwords get a username hint
login = self.client.post('/test_admin/admin/secure-view/', self.super_email_bad_login)
self.assertContains(login, "Please enter a correct username and password.")
new_user = User(username='jondoe', password='secret', email='super@example.com')
new_user.save()
# check to ensure if there are multiple e-mail addresses a user doesn't get a 500
login = self.client.post('/test_admin/admin/secure-view/', self.super_email_login)
self.assertContains(login, "Please enter a correct username and password.")
# Add User
request = self.client.get('/test_admin/admin/secure-view/')
self.assertEqual(request.status_code, 200)
login = self.client.post('/test_admin/admin/secure-view/', self.adduser_login)
self.assertRedirects(login, '/test_admin/admin/secure-view/')
self.assertFalse(login.context)
self.client.get('/test_admin/admin/logout/')
# Change User
request = self.client.get('/test_admin/admin/secure-view/')
self.assertEqual(request.status_code, 200)
login = self.client.post('/test_admin/admin/secure-view/', self.changeuser_login)
self.assertRedirects(login, '/test_admin/admin/secure-view/')
self.assertFalse(login.context)
self.client.get('/test_admin/admin/logout/')
# Delete User
request = self.client.get('/test_admin/admin/secure-view/')
self.assertEqual(request.status_code, 200)
login = self.client.post('/test_admin/admin/secure-view/', self.deleteuser_login)
self.assertRedirects(login, '/test_admin/admin/secure-view/')
self.assertFalse(login.context)
self.client.get('/test_admin/admin/logout/')
# Regular User should not be able to login.
request = self.client.get('/test_admin/admin/secure-view/')
self.assertEqual(request.status_code, 200)
login = self.client.post('/test_admin/admin/secure-view/', self.joepublic_login)
self.assertEqual(login.status_code, 200)
# Login.context is a list of context dicts we just need to check the first one.
self.assertContains(login, "Please enter a correct username and password.")
# 8509 - if a normal user is already logged in, it is possible
# to change user into the superuser without error
login = self.client.login(username='joepublic', password='secret')
# Check and make sure that if user expires, data still persists
self.client.get('/test_admin/admin/secure-view/')
self.client.post('/test_admin/admin/secure-view/', self.super_login)
# make sure the view removes test cookie
self.assertEqual(self.client.session.test_cookie_worked(), False)
def test_shortcut_view_only_available_to_staff(self):
"""
Only admin users should be able to use the admin shortcut view.
"""
user_ctype = ContentType.objects.get_for_model(User)
user = User.objects.get(username='super')
shortcut_url = "/test_admin/admin/r/%s/%s/" % (user_ctype.pk, user.pk)
# Not logged in: we should see the login page.
response = self.client.get(shortcut_url, follow=False)
self.assertTemplateUsed(response, 'admin/login.html')
# Logged in? Redirect.
self.client.login(username='super', password='secret')
response = self.client.get(shortcut_url, follow=False)
# Can't use self.assertRedirects() because User.get_absolute_url() is silly.
self.assertEqual(response.status_code, 302)
self.assertEqual(response['Location'], 'http://example.com/users/super/')
class AdminViewUnicodeTest(TestCase):
fixtures = ['admin-views-unicode.xml']
def setUp(self):
self.client.login(username='super', password='secret')
def tearDown(self):
self.client.logout()
def testUnicodeEdit(self):
"""
A test to ensure that POST on edit_view handles non-ascii characters.
"""
post_data = {
"name": u"Test lærdommer",
# inline data
"chapter_set-TOTAL_FORMS": u"6",
"chapter_set-INITIAL_FORMS": u"3",
"chapter_set-MAX_NUM_FORMS": u"0",
"chapter_set-0-id": u"1",
"chapter_set-0-title": u"Norske bostaver æøå skaper problemer",
"chapter_set-0-content": u"<p>Svært frustrerende med UnicodeDecodeError</p>",
"chapter_set-1-id": u"2",
"chapter_set-1-title": u"Kjærlighet.",
"chapter_set-1-content": u"<p>La kjærligheten til de lidende seire.</p>",
"chapter_set-2-id": u"3",
"chapter_set-2-title": u"Need a title.",
"chapter_set-2-content": u"<p>Newest content</p>",
"chapter_set-3-id": u"",
"chapter_set-3-title": u"",
"chapter_set-3-content": u"",
"chapter_set-4-id": u"",
"chapter_set-4-title": u"",
"chapter_set-4-content": u"",
"chapter_set-5-id": u"",
"chapter_set-5-title": u"",
"chapter_set-5-content": u"",
}
response = self.client.post('/test_admin/admin/admin_views/book/1/', post_data)
self.assertEqual(response.status_code, 302) # redirect somewhere
def testUnicodeDelete(self):
"""
Ensure that the delete_view handles non-ascii characters
"""
delete_dict = {'post': 'yes'}
response = self.client.get('/test_admin/admin/admin_views/book/1/delete/')
self.assertEqual(response.status_code, 200)
response = self.client.post('/test_admin/admin/admin_views/book/1/delete/', delete_dict)
self.assertRedirects(response, '/test_admin/admin/admin_views/book/')
class AdminViewListEditable(TestCase):
fixtures = ['admin-views-users.xml', 'admin-views-person.xml']
def setUp(self):
self.client.login(username='super', password='secret')
def tearDown(self):
self.client.logout()
def test_inheritance(self):
Podcast.objects.create(name="This Week in Django",
release_date=datetime.date.today())
response = self.client.get('/test_admin/admin/admin_views/podcast/')
self.assertEqual(response.status_code, 200)
def test_inheritance_2(self):
Vodcast.objects.create(name="This Week in Django", released=True)
response = self.client.get('/test_admin/admin/admin_views/vodcast/')
self.assertEqual(response.status_code, 200)
def test_custom_pk(self):
Language.objects.create(iso='en', name='English', english_name='English')
response = self.client.get('/test_admin/admin/admin_views/language/')
self.assertEqual(response.status_code, 200)
def test_changelist_input_html(self):
response = self.client.get('/test_admin/admin/admin_views/person/')
# 2 inputs per object(the field and the hidden id field) = 6
# 3 management hidden fields = 3
# 4 action inputs (3 regular checkboxes, 1 checkbox to select all)
# main form submit button = 1
# search field and search submit button = 2
# CSRF field = 1
# field to track 'select all' across paginated views = 1
# 6 + 3 + 4 + 1 + 2 + 1 + 1 = 18 inputs
self.assertEqual(response.content.count("<input"), 18)
# 1 select per object = 3 selects
self.assertEqual(response.content.count("<select"), 4)
def test_post_messages(self):
# Ticket 12707: Saving inline editable should not show admin
# action warnings
data = {
"form-TOTAL_FORMS": "3",
"form-INITIAL_FORMS": "3",
"form-MAX_NUM_FORMS": "0",
"form-0-gender": "1",
"form-0-id": "1",
"form-1-gender": "2",
"form-1-id": "2",
"form-2-alive": "checked",
"form-2-gender": "1",
"form-2-id": "3",
"_save": "Save",
}
response = self.client.post('/test_admin/admin/admin_views/person/',
data, follow=True)
self.assertEqual(len(response.context['messages']), 1)
def test_post_submission(self):
data = {
"form-TOTAL_FORMS": "3",
"form-INITIAL_FORMS": "3",
"form-MAX_NUM_FORMS": "0",
"form-0-gender": "1",
"form-0-id": "1",
"form-1-gender": "2",
"form-1-id": "2",
"form-2-alive": "checked",
"form-2-gender": "1",
"form-2-id": "3",
"_save": "Save",
}
self.client.post('/test_admin/admin/admin_views/person/', data)
self.assertEqual(Person.objects.get(name="John Mauchly").alive, False)
self.assertEqual(Person.objects.get(name="Grace Hopper").gender, 2)
# test a filtered page
data = {
"form-TOTAL_FORMS": "2",
"form-INITIAL_FORMS": "2",
"form-MAX_NUM_FORMS": "0",
"form-0-id": "1",
"form-0-gender": "1",
"form-0-alive": "checked",
"form-1-id": "3",
"form-1-gender": "1",
"form-1-alive": "checked",
"_save": "Save",
}
self.client.post('/test_admin/admin/admin_views/person/?gender__exact=1', data)
self.assertEqual(Person.objects.get(name="John Mauchly").alive, True)
# test a searched page
data = {
"form-TOTAL_FORMS": "1",
"form-INITIAL_FORMS": "1",
"form-MAX_NUM_FORMS": "0",
"form-0-id": "1",
"form-0-gender": "1",
"_save": "Save",
}
self.client.post('/test_admin/admin/admin_views/person/?q=john', data)
self.assertEqual(Person.objects.get(name="John Mauchly").alive, False)
def test_non_field_errors(self):
''' Ensure that non field errors are displayed for each of the
forms in the changelist's formset. Refs #13126.
'''
fd1 = FoodDelivery.objects.create(reference='123', driver='bill', restaurant='thai')
fd2 = FoodDelivery.objects.create(reference='456', driver='bill', restaurant='india')
fd3 = FoodDelivery.objects.create(reference='789', driver='bill', restaurant='pizza')
data = {
"form-TOTAL_FORMS": "3",
"form-INITIAL_FORMS": "3",
"form-MAX_NUM_FORMS": "0",
"form-0-id": str(fd1.id),
"form-0-reference": "123",
"form-0-driver": "bill",
"form-0-restaurant": "thai",
# Same data as above: Forbidden because of unique_together!
"form-1-id": str(fd2.id),
"form-1-reference": "456",
"form-1-driver": "bill",
"form-1-restaurant": "thai",
"form-2-id": str(fd3.id),
"form-2-reference": "789",
"form-2-driver": "bill",
"form-2-restaurant": "pizza",
"_save": "Save",
}
response = self.client.post('/test_admin/admin/admin_views/fooddelivery/', data)
self.assertContains(response, '<tr><td colspan="4"><ul class="errorlist"><li>Food delivery with this Driver and Restaurant already exists.</li></ul></td></tr>', 1)
data = {
"form-TOTAL_FORMS": "3",
"form-INITIAL_FORMS": "3",
"form-MAX_NUM_FORMS": "0",
"form-0-id": str(fd1.id),
"form-0-reference": "123",
"form-0-driver": "bill",
"form-0-restaurant": "thai",
# Same data as above: Forbidden because of unique_together!
"form-1-id": str(fd2.id),
"form-1-reference": "456",
"form-1-driver": "bill",
"form-1-restaurant": "thai",
# Same data also.
"form-2-id": str(fd3.id),
"form-2-reference": "789",
"form-2-driver": "bill",
"form-2-restaurant": "thai",
"_save": "Save",
}
response = self.client.post('/test_admin/admin/admin_views/fooddelivery/', data)
self.assertContains(response, '<tr><td colspan="4"><ul class="errorlist"><li>Food delivery with this Driver and Restaurant already exists.</li></ul></td></tr>', 2)
def test_non_form_errors(self):
# test if non-form errors are handled; ticket #12716
data = {
"form-TOTAL_FORMS": "1",
"form-INITIAL_FORMS": "1",
"form-MAX_NUM_FORMS": "0",
"form-0-id": "2",
"form-0-alive": "1",
"form-0-gender": "2",
# Ensure that the form processing understands this as a list_editable "Save"
# and not an action "Go".
"_save": "Save",
}
response = self.client.post('/test_admin/admin/admin_views/person/', data)
self.assertContains(response, "Grace is not a Zombie")
def test_non_form_errors_is_errorlist(self):
# test if non-form errors are correctly handled; ticket #12878
data = {
"form-TOTAL_FORMS": "1",
"form-INITIAL_FORMS": "1",
"form-MAX_NUM_FORMS": "0",
"form-0-id": "2",
"form-0-alive": "1",
"form-0-gender": "2",
"_save": "Save",
}
response = self.client.post('/test_admin/admin/admin_views/person/', data)
non_form_errors = response.context['cl'].formset.non_form_errors()
self.assertTrue(isinstance(non_form_errors, ErrorList))
self.assertEqual(str(non_form_errors), str(ErrorList(["Grace is not a Zombie"])))
def test_list_editable_ordering(self):
collector = Collector.objects.create(id=1, name="Frederick Clegg")
Category.objects.create(id=1, order=1, collector=collector)
Category.objects.create(id=2, order=2, collector=collector)
Category.objects.create(id=3, order=0, collector=collector)
Category.objects.create(id=4, order=0, collector=collector)
# NB: The order values must be changed so that the items are reordered.
data = {
"form-TOTAL_FORMS": "4",
"form-INITIAL_FORMS": "4",
"form-MAX_NUM_FORMS": "0",
"form-0-order": "14",
"form-0-id": "1",
"form-0-collector": "1",
"form-1-order": "13",
"form-1-id": "2",
"form-1-collector": "1",
"form-2-order": "1",
"form-2-id": "3",
"form-2-collector": "1",
"form-3-order": "0",
"form-3-id": "4",
"form-3-collector": "1",
# Ensure that the form processing understands this as a list_editable "Save"
# and not an action "Go".
"_save": "Save",
}
response = self.client.post('/test_admin/admin/admin_views/category/', data)
# Successful post will redirect
self.assertEqual(response.status_code, 302)
# Check that the order values have been applied to the right objects
self.assertEqual(Category.objects.get(id=1).order, 14)
self.assertEqual(Category.objects.get(id=2).order, 13)
self.assertEqual(Category.objects.get(id=3).order, 1)
self.assertEqual(Category.objects.get(id=4).order, 0)
def test_list_editable_action_submit(self):
# List editable changes should not be executed if the action "Go" button is
# used to submit the form.
data = {
"form-TOTAL_FORMS": "3",
"form-INITIAL_FORMS": "3",
"form-MAX_NUM_FORMS": "0",
"form-0-gender": "1",
"form-0-id": "1",
"form-1-gender": "2",
"form-1-id": "2",
"form-2-alive": "checked",
"form-2-gender": "1",
"form-2-id": "3",
"index": "0",
"_selected_action": [u'3'],
"action": [u'', u'delete_selected'],
}
self.client.post('/test_admin/admin/admin_views/person/', data)
self.assertEqual(Person.objects.get(name="John Mauchly").alive, True)
self.assertEqual(Person.objects.get(name="Grace Hopper").gender, 1)
def test_list_editable_action_choices(self):
# List editable changes should be executed if the "Save" button is
# used to submit the form - any action choices should be ignored.
data = {
"form-TOTAL_FORMS": "3",
"form-INITIAL_FORMS": "3",
"form-MAX_NUM_FORMS": "0",
"form-0-gender": "1",
"form-0-id": "1",
"form-1-gender": "2",
"form-1-id": "2",
"form-2-alive": "checked",
"form-2-gender": "1",
"form-2-id": "3",
"_save": "Save",
"_selected_action": [u'1'],
"action": [u'', u'delete_selected'],
}
self.client.post('/test_admin/admin/admin_views/person/', data)
self.assertEqual(Person.objects.get(name="John Mauchly").alive, False)
self.assertEqual(Person.objects.get(name="Grace Hopper").gender, 2)
def test_list_editable_popup(self):
"""
Fields should not be list-editable in popups.
"""
response = self.client.get('/test_admin/admin/admin_views/person/')
self.assertNotEqual(response.context['cl'].list_editable, ())
response = self.client.get('/test_admin/admin/admin_views/person/?%s' % IS_POPUP_VAR)
self.assertEqual(response.context['cl'].list_editable, ())
def test_pk_hidden_fields(self):
""" Ensure that hidden pk fields aren't displayed in the table body and
that their corresponding human-readable value is displayed instead.
Note that the hidden pk fields are in fact be displayed but
separately (not in the table), and only once.
Refs #12475.
"""
story1 = Story.objects.create(title='The adventures of Guido', content='Once upon a time in Djangoland...')
story2 = Story.objects.create(title='Crouching Tiger, Hidden Python', content='The Python was sneaking into...')
response = self.client.get('/test_admin/admin/admin_views/story/')
self.assertContains(response, 'id="id_form-0-id"', 1) # Only one hidden field, in a separate place than the table.
self.assertContains(response, 'id="id_form-1-id"', 1)
self.assertContains(response, '<div class="hiddenfields">\n<input type="hidden" name="form-0-id" value="%d" id="id_form-0-id" /><input type="hidden" name="form-1-id" value="%d" id="id_form-1-id" />\n</div>' % (story2.id, story1.id))
self.assertContains(response, '<td>%d</td>' % story1.id, 1)
self.assertContains(response, '<td>%d</td>' % story2.id, 1)
def test_pk_hidden_fields_with_list_display_links(self):
""" Similarly as test_pk_hidden_fields, but when the hidden pk fields are
referenced in list_display_links.
Refs #12475.
"""
story1 = OtherStory.objects.create(title='The adventures of Guido', content='Once upon a time in Djangoland...')
story2 = OtherStory.objects.create(title='Crouching Tiger, Hidden Python', content='The Python was sneaking into...')
response = self.client.get('/test_admin/admin/admin_views/otherstory/')
self.assertContains(response, 'id="id_form-0-id"', 1) # Only one hidden field, in a separate place than the table.
self.assertContains(response, 'id="id_form-1-id"', 1)
self.assertContains(response, '<div class="hiddenfields">\n<input type="hidden" name="form-0-id" value="%d" id="id_form-0-id" /><input type="hidden" name="form-1-id" value="%d" id="id_form-1-id" />\n</div>' % (story2.id, story1.id))
self.assertContains(response, '<th><a href="%d/">%d</a></th>' % (story1.id, story1.id), 1)
self.assertContains(response, '<th><a href="%d/">%d</a></th>' % (story2.id, story2.id), 1)
class AdminSearchTest(TestCase):
fixtures = ['admin-views-users', 'multiple-child-classes',
'admin-views-person']
def setUp(self):
self.client.login(username='super', password='secret')
def tearDown(self):
self.client.logout()
def test_search_on_sibling_models(self):
"Check that a search that mentions sibling models"
response = self.client.get('/test_admin/admin/admin_views/recommendation/?q=bar')
# confirm the search returned 1 object
self.assertContains(response, "\n1 recommendation\n")
def test_with_fk_to_field(self):
"""Ensure that the to_field GET parameter is preserved when a search
is performed. Refs #10918.
"""
from django.contrib.admin.views.main import TO_FIELD_VAR
response = self.client.get('/test_admin/admin/auth/user/?q=joe&%s=username' % TO_FIELD_VAR)
self.assertContains(response, "\n1 user\n")
self.assertContains(response, '<input type="hidden" name="t" value="username"/>')
def test_exact_matches(self):
response = self.client.get('/test_admin/admin/admin_views/recommendation/?q=bar')
# confirm the search returned one object
self.assertContains(response, "\n1 recommendation\n")
response = self.client.get('/test_admin/admin/admin_views/recommendation/?q=ba')
# confirm the search returned zero objects
self.assertContains(response, "\n0 recommendations\n")
def test_beginning_matches(self):
response = self.client.get('/test_admin/admin/admin_views/person/?q=Gui')
# confirm the search returned one object
self.assertContains(response, "\n1 person\n")
self.assertContains(response, "Guido")
response = self.client.get('/test_admin/admin/admin_views/person/?q=uido')
# confirm the search returned zero objects
self.assertContains(response, "\n0 persons\n")
self.assertNotContains(response, "Guido")
class AdminInheritedInlinesTest(TestCase):
fixtures = ['admin-views-users.xml',]
def setUp(self):
self.client.login(username='super', password='secret')
def tearDown(self):
self.client.logout()
def testInline(self):
"Ensure that inline models which inherit from a common parent are correctly handled by admin."
foo_user = u"foo username"
bar_user = u"bar username"
name_re = re.compile('name="(.*?)"')
# test the add case
response = self.client.get('/test_admin/admin/admin_views/persona/add/')
names = name_re.findall(response.content)
# make sure we have no duplicate HTML names
self.assertEqual(len(names), len(set(names)))
# test the add case
post_data = {
"name": u"Test Name",
# inline data
"accounts-TOTAL_FORMS": u"1",
"accounts-INITIAL_FORMS": u"0",
"accounts-MAX_NUM_FORMS": u"0",
"accounts-0-username": foo_user,
"accounts-2-TOTAL_FORMS": u"1",
"accounts-2-INITIAL_FORMS": u"0",
"accounts-2-MAX_NUM_FORMS": u"0",
"accounts-2-0-username": bar_user,
}
response = self.client.post('/test_admin/admin/admin_views/persona/add/', post_data)
self.assertEqual(response.status_code, 302) # redirect somewhere
self.assertEqual(Persona.objects.count(), 1)
self.assertEqual(FooAccount.objects.count(), 1)
self.assertEqual(BarAccount.objects.count(), 1)
self.assertEqual(FooAccount.objects.all()[0].username, foo_user)
self.assertEqual(BarAccount.objects.all()[0].username, bar_user)
self.assertEqual(Persona.objects.all()[0].accounts.count(), 2)
persona_id = Persona.objects.all()[0].id
foo_id = FooAccount.objects.all()[0].id
bar_id = BarAccount.objects.all()[0].id
# test the edit case
response = self.client.get('/test_admin/admin/admin_views/persona/%d/' % persona_id)
names = name_re.findall(response.content)
# make sure we have no duplicate HTML names
self.assertEqual(len(names), len(set(names)))
post_data = {
"name": u"Test Name",
"accounts-TOTAL_FORMS": "2",
"accounts-INITIAL_FORMS": u"1",
"accounts-MAX_NUM_FORMS": u"0",
"accounts-0-username": "%s-1" % foo_user,
"accounts-0-account_ptr": str(foo_id),
"accounts-0-persona": str(persona_id),
"accounts-2-TOTAL_FORMS": u"2",
"accounts-2-INITIAL_FORMS": u"1",
"accounts-2-MAX_NUM_FORMS": u"0",
"accounts-2-0-username": "%s-1" % bar_user,
"accounts-2-0-account_ptr": str(bar_id),
"accounts-2-0-persona": str(persona_id),
}
response = self.client.post('/test_admin/admin/admin_views/persona/%d/' % persona_id, post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Persona.objects.count(), 1)
self.assertEqual(FooAccount.objects.count(), 1)
self.assertEqual(BarAccount.objects.count(), 1)
self.assertEqual(FooAccount.objects.all()[0].username, "%s-1" % foo_user)
self.assertEqual(BarAccount.objects.all()[0].username, "%s-1" % bar_user)
self.assertEqual(Persona.objects.all()[0].accounts.count(), 2)
class AdminActionsTest(TestCase):
fixtures = ['admin-views-users.xml', 'admin-views-actions.xml']
def setUp(self):
self.client.login(username='super', password='secret')
def tearDown(self):
self.client.logout()
def test_model_admin_custom_action(self):
"Tests a custom action defined in a ModelAdmin method"
action_data = {
ACTION_CHECKBOX_NAME: [1],
'action' : 'mail_admin',
'index': 0,
}
response = self.client.post('/test_admin/admin/admin_views/subscriber/', action_data)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, 'Greetings from a ModelAdmin action')
def test_model_admin_default_delete_action(self):
"Tests the default delete action defined as a ModelAdmin method"
action_data = {
ACTION_CHECKBOX_NAME: [1, 2],
'action' : 'delete_selected',
'index': 0,
}
delete_confirmation_data = {
ACTION_CHECKBOX_NAME: [1, 2],
'action' : 'delete_selected',
'post': 'yes',
}
confirmation = self.client.post('/test_admin/admin/admin_views/subscriber/', action_data)
self.assertContains(confirmation, "Are you sure you want to delete the selected subscribers")
self.assertTrue(confirmation.content.count(ACTION_CHECKBOX_NAME) == 2)
response = self.client.post('/test_admin/admin/admin_views/subscriber/', delete_confirmation_data)
self.assertEqual(Subscriber.objects.count(), 0)
def test_non_localized_pk(self):
"""If USE_THOUSAND_SEPARATOR is set, make sure that the ids for
the objects selected for deletion are rendered without separators.
Refs #14895.
"""
self.old_USE_THOUSAND_SEPARATOR = settings.USE_THOUSAND_SEPARATOR
self.old_USE_L10N = settings.USE_L10N
settings.USE_THOUSAND_SEPARATOR = True
settings.USE_L10N = True
subscriber = Subscriber.objects.get(id=1)
subscriber.id = 9999
subscriber.save()
action_data = {
ACTION_CHECKBOX_NAME: [9999, 2],
'action' : 'delete_selected',
'index': 0,
}
response = self.client.post('/test_admin/admin/admin_views/subscriber/', action_data)
self.assertTemplateUsed(response, 'admin/delete_selected_confirmation.html')
self.assertTrue('value="9999"' in response.content and 'value="2"' in response.content) # Instead of 9,999
settings.USE_THOUSAND_SEPARATOR = self.old_USE_THOUSAND_SEPARATOR
settings.USE_L10N = self.old_USE_L10N
def test_model_admin_default_delete_action_protected(self):
"""
Tests the default delete action defined as a ModelAdmin method in the
case where some related objects are protected from deletion.
"""
q1 = Question.objects.create(question="Why?")
a1 = Answer.objects.create(question=q1, answer="Because.")
a2 = Answer.objects.create(question=q1, answer="Yes.")
q2 = Question.objects.create(question="Wherefore?")
action_data = {
ACTION_CHECKBOX_NAME: [q1.pk, q2.pk],
'action' : 'delete_selected',
'index': 0,
}
response = self.client.post("/test_admin/admin/admin_views/question/", action_data)
self.assertContains(response, "would require deleting the following protected related objects")
self.assertContains(response, '<li>Answer: <a href="/test_admin/admin/admin_views/answer/%s/">Because.</a></li>' % a1.pk)
self.assertContains(response, '<li>Answer: <a href="/test_admin/admin/admin_views/answer/%s/">Yes.</a></li>' % a2.pk)
def test_custom_function_mail_action(self):
"Tests a custom action defined in a function"
action_data = {
ACTION_CHECKBOX_NAME: [1],
'action' : 'external_mail',
'index': 0,
}
response = self.client.post('/test_admin/admin/admin_views/externalsubscriber/', action_data)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, 'Greetings from a function action')
def test_custom_function_action_with_redirect(self):
"Tests a custom action defined in a function"
action_data = {
ACTION_CHECKBOX_NAME: [1],
'action' : 'redirect_to',
'index': 0,
}
response = self.client.post('/test_admin/admin/admin_views/externalsubscriber/', action_data)
self.assertEqual(response.status_code, 302)
def test_default_redirect(self):
"""
Test that actions which don't return an HttpResponse are redirected to
the same page, retaining the querystring (which may contain changelist
information).
"""
action_data = {
ACTION_CHECKBOX_NAME: [1],
'action' : 'external_mail',
'index': 0,
}
url = '/test_admin/admin/admin_views/externalsubscriber/?ot=asc&o=1'
response = self.client.post(url, action_data)
self.assertRedirects(response, url)
def test_model_without_action(self):
"Tests a ModelAdmin without any action"
response = self.client.get('/test_admin/admin/admin_views/oldsubscriber/')
self.assertEqual(response.context["action_form"], None)
self.assertTrue(
'<input type="checkbox" class="action-select"' not in response.content,
"Found an unexpected action toggle checkboxbox in response"
)
self.assertTrue('action-checkbox-column' not in response.content,
"Found unexpected action-checkbox-column class in response")
def test_model_without_action_still_has_jquery(self):
"Tests that a ModelAdmin without any actions still gets jQuery included in page"
response = self.client.get('/test_admin/admin/admin_views/oldsubscriber/')
self.assertEqual(response.context["action_form"], None)
self.assertTrue('jquery.min.js' in response.content,
"jQuery missing from admin pages for model with no admin actions"
)
def test_action_column_class(self):
"Tests that the checkbox column class is present in the response"
response = self.client.get('/test_admin/admin/admin_views/subscriber/')
self.assertNotEqual(response.context["action_form"], None)
self.assertTrue('action-checkbox-column' in response.content,
"Expected an action-checkbox-column in response")
def test_multiple_actions_form(self):
"""
Test that actions come from the form whose submit button was pressed (#10618).
"""
action_data = {
ACTION_CHECKBOX_NAME: [1],
# Two different actions selected on the two forms...
'action': ['external_mail', 'delete_selected'],
# ...but we clicked "go" on the top form.
'index': 0
}
response = self.client.post('/test_admin/admin/admin_views/externalsubscriber/', action_data)
# Send mail, don't delete.
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, 'Greetings from a function action')
def test_user_message_on_none_selected(self):
"""
User should see a warning when 'Go' is pressed and no items are selected.
"""
action_data = {
ACTION_CHECKBOX_NAME: [],
'action' : 'delete_selected',
'index': 0,
}
response = self.client.post('/test_admin/admin/admin_views/subscriber/', action_data)
msg = """Items must be selected in order to perform actions on them. No items have been changed."""
self.assertContains(response, msg)
self.assertEqual(Subscriber.objects.count(), 2)
def test_user_message_on_no_action(self):
"""
User should see a warning when 'Go' is pressed and no action is selected.
"""
action_data = {
ACTION_CHECKBOX_NAME: [1, 2],
'action' : '',
'index': 0,
}
response = self.client.post('/test_admin/admin/admin_views/subscriber/', action_data)
msg = """No action selected."""
self.assertContains(response, msg)
self.assertEqual(Subscriber.objects.count(), 2)
def test_selection_counter(self):
"""
Check if the selection counter is there.
"""
response = self.client.get('/test_admin/admin/admin_views/subscriber/')
self.assertContains(response, '0 of 2 selected')
def test_popup_actions(self):
""" Actions should not be shown in popups. """
response = self.client.get('/test_admin/admin/admin_views/subscriber/')
self.assertNotEquals(response.context["action_form"], None)
response = self.client.get(
'/test_admin/admin/admin_views/subscriber/?%s' % IS_POPUP_VAR)
self.assertEqual(response.context["action_form"], None)
class TestCustomChangeList(TestCase):
fixtures = ['admin-views-users.xml']
urlbit = 'admin'
def setUp(self):
result = self.client.login(username='super', password='secret')
self.assertEqual(result, True)
def tearDown(self):
self.client.logout()
def test_custom_changelist(self):
"""
Validate that a custom ChangeList class can be used (#9749)
"""
# Insert some data
post_data = {"name": u"First Gadget"}
response = self.client.post('/test_admin/%s/admin_views/gadget/add/' % self.urlbit, post_data)
self.assertEqual(response.status_code, 302) # redirect somewhere
# Hit the page once to get messages out of the queue message list
response = self.client.get('/test_admin/%s/admin_views/gadget/' % self.urlbit)
# Ensure that that data is still not visible on the page
response = self.client.get('/test_admin/%s/admin_views/gadget/' % self.urlbit)
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, 'First Gadget')
class TestInlineNotEditable(TestCase):
fixtures = ['admin-views-users.xml']
def setUp(self):
result = self.client.login(username='super', password='secret')
self.assertEqual(result, True)
def tearDown(self):
self.client.logout()
def test(self):
"""
InlineModelAdmin broken?
"""
response = self.client.get('/test_admin/admin/admin_views/parent/add/')
self.assertEqual(response.status_code, 200)
class AdminCustomQuerysetTest(TestCase):
fixtures = ['admin-views-users.xml']
def setUp(self):
self.client.login(username='super', password='secret')
self.pks = [EmptyModel.objects.create().id for i in range(3)]
def test_changelist_view(self):
response = self.client.get('/test_admin/admin/admin_views/emptymodel/')
for i in self.pks:
if i > 1:
self.assertContains(response, 'Primary key = %s' % i)
else:
self.assertNotContains(response, 'Primary key = %s' % i)
def test_change_view(self):
for i in self.pks:
response = self.client.get('/test_admin/admin/admin_views/emptymodel/%s/' % i)
if i > 1:
self.assertEqual(response.status_code, 200)
else:
self.assertEqual(response.status_code, 404)
def test_add_model_modeladmin_only_qs(self):
# only() is used in ModelAdmin.queryset()
p = Paper.objects.create(title=u"My Paper Title")
self.assertEqual(Paper.objects.count(), 1)
response = self.client.get('/test_admin/admin/admin_views/paper/%s/' % p.pk)
self.assertEqual(response.status_code, 200)
post_data = {
"title": u"My Modified Paper Title",
"_save": "Save",
}
response = self.client.post('/test_admin/admin/admin_views/paper/%s/' % p.pk,
post_data, follow=True)
self.assertEqual(response.status_code, 200)
# Message should contain non-ugly model name. Instance representation is set by unicode() (ugly)
self.assertContains(response, '<li class="info">The paper "Paper_Deferred_author object" was changed successfully.</li>')
# defer() is used in ModelAdmin.queryset()
cl = CoverLetter.objects.create(author=u"John Doe")
self.assertEqual(CoverLetter.objects.count(), 1)
response = self.client.get('/test_admin/admin/admin_views/coverletter/%s/' % cl.pk)
self.assertEqual(response.status_code, 200)
post_data = {
"author": u"John Doe II",
"_save": "Save",
}
response = self.client.post('/test_admin/admin/admin_views/coverletter/%s/' % cl.pk,
post_data, follow=True)
self.assertEqual(response.status_code, 200)
# Message should contain non-ugly model name. Instance representation is set by model's __unicode__()
self.assertContains(response, '<li class="info">The cover letter "John Doe II" was changed successfully.</li>')
class AdminInlineFileUploadTest(TestCase):
fixtures = ['admin-views-users.xml', 'admin-views-actions.xml']
urlbit = 'admin'
def setUp(self):
self.client.login(username='super', password='secret')
# Set up test Picture and Gallery.
# These must be set up here instead of in fixtures in order to allow Picture
# to use a NamedTemporaryFile.
tdir = tempfile.gettempdir()
file1 = tempfile.NamedTemporaryFile(suffix=".file1", dir=tdir)
file1.write('a' * (2 ** 21))
filename = file1.name
file1.close()
self.gallery = Gallery(name="Test Gallery")
self.gallery.save()
self.picture = Picture(name="Test Picture", image=filename, gallery=self.gallery)
self.picture.save()
def tearDown(self):
self.client.logout()
def test_inline_file_upload_edit_validation_error_post(self):
"""
Test that inline file uploads correctly display prior data (#10002).
"""
post_data = {
"name": u"Test Gallery",
"pictures-TOTAL_FORMS": u"2",
"pictures-INITIAL_FORMS": u"1",
"pictures-MAX_NUM_FORMS": u"0",
"pictures-0-id": unicode(self.picture.id),
"pictures-0-gallery": unicode(self.gallery.id),
"pictures-0-name": "Test Picture",
"pictures-0-image": "",
"pictures-1-id": "",
"pictures-1-gallery": str(self.gallery.id),
"pictures-1-name": "Test Picture 2",
"pictures-1-image": "",
}
response = self.client.post('/test_admin/%s/admin_views/gallery/%d/' % (self.urlbit, self.gallery.id), post_data)
self.assertTrue(response._container[0].find("Currently:") > -1)
class AdminInlineTests(TestCase):
fixtures = ['admin-views-users.xml']
def setUp(self):
self.post_data = {
"name": u"Test Name",
"widget_set-TOTAL_FORMS": "3",
"widget_set-INITIAL_FORMS": u"0",
"widget_set-MAX_NUM_FORMS": u"0",
"widget_set-0-id": "",
"widget_set-0-owner": "1",
"widget_set-0-name": "",
"widget_set-1-id": "",
"widget_set-1-owner": "1",
"widget_set-1-name": "",
"widget_set-2-id": "",
"widget_set-2-owner": "1",
"widget_set-2-name": "",
"doohickey_set-TOTAL_FORMS": "3",
"doohickey_set-INITIAL_FORMS": u"0",
"doohickey_set-MAX_NUM_FORMS": u"0",
"doohickey_set-0-owner": "1",
"doohickey_set-0-code": "",
"doohickey_set-0-name": "",
"doohickey_set-1-owner": "1",
"doohickey_set-1-code": "",
"doohickey_set-1-name": "",
"doohickey_set-2-owner": "1",
"doohickey_set-2-code": "",
"doohickey_set-2-name": "",
"grommet_set-TOTAL_FORMS": "3",
"grommet_set-INITIAL_FORMS": u"0",
"grommet_set-MAX_NUM_FORMS": u"0",
"grommet_set-0-code": "",
"grommet_set-0-owner": "1",
"grommet_set-0-name": "",
"grommet_set-1-code": "",
"grommet_set-1-owner": "1",
"grommet_set-1-name": "",
"grommet_set-2-code": "",
"grommet_set-2-owner": "1",
"grommet_set-2-name": "",
"whatsit_set-TOTAL_FORMS": "3",
"whatsit_set-INITIAL_FORMS": u"0",
"whatsit_set-MAX_NUM_FORMS": u"0",
"whatsit_set-0-owner": "1",
"whatsit_set-0-index": "",
"whatsit_set-0-name": "",
"whatsit_set-1-owner": "1",
"whatsit_set-1-index": "",
"whatsit_set-1-name": "",
"whatsit_set-2-owner": "1",
"whatsit_set-2-index": "",
"whatsit_set-2-name": "",
"fancydoodad_set-TOTAL_FORMS": "3",
"fancydoodad_set-INITIAL_FORMS": u"0",
"fancydoodad_set-MAX_NUM_FORMS": u"0",
"fancydoodad_set-0-doodad_ptr": "",
"fancydoodad_set-0-owner": "1",
"fancydoodad_set-0-name": "",
"fancydoodad_set-0-expensive": "on",
"fancydoodad_set-1-doodad_ptr": "",
"fancydoodad_set-1-owner": "1",
"fancydoodad_set-1-name": "",
"fancydoodad_set-1-expensive": "on",
"fancydoodad_set-2-doodad_ptr": "",
"fancydoodad_set-2-owner": "1",
"fancydoodad_set-2-name": "",
"fancydoodad_set-2-expensive": "on",
"category_set-TOTAL_FORMS": "3",
"category_set-INITIAL_FORMS": "0",
"category_set-MAX_NUM_FORMS": "0",
"category_set-0-order": "",
"category_set-0-id": "",
"category_set-0-collector": "1",
"category_set-1-order": "",
"category_set-1-id": "",
"category_set-1-collector": "1",
"category_set-2-order": "",
"category_set-2-id": "",
"category_set-2-collector": "1",
}
result = self.client.login(username='super', password='secret')
self.assertEqual(result, True)
self.collector = Collector(pk=1,name='John Fowles')
self.collector.save()
def tearDown(self):
self.client.logout()
def test_simple_inline(self):
"A simple model can be saved as inlines"
# First add a new inline
self.post_data['widget_set-0-name'] = "Widget 1"
collector_url = '/test_admin/admin/admin_views/collector/%d/' % self.collector.pk
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Widget.objects.count(), 1)
self.assertEqual(Widget.objects.all()[0].name, "Widget 1")
widget_id = Widget.objects.all()[0].id
# Check that the PK link exists on the rendered form
response = self.client.get(collector_url)
self.assertContains(response, 'name="widget_set-0-id"')
# Now resave that inline
self.post_data['widget_set-INITIAL_FORMS'] = "1"
self.post_data['widget_set-0-id'] = str(widget_id)
self.post_data['widget_set-0-name'] = "Widget 1"
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Widget.objects.count(), 1)
self.assertEqual(Widget.objects.all()[0].name, "Widget 1")
# Now modify that inline
self.post_data['widget_set-INITIAL_FORMS'] = "1"
self.post_data['widget_set-0-id'] = str(widget_id)
self.post_data['widget_set-0-name'] = "Widget 1 Updated"
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Widget.objects.count(), 1)
self.assertEqual(Widget.objects.all()[0].name, "Widget 1 Updated")
def test_explicit_autofield_inline(self):
"A model with an explicit autofield primary key can be saved as inlines. Regression for #8093"
# First add a new inline
self.post_data['grommet_set-0-name'] = "Grommet 1"
collector_url = '/test_admin/admin/admin_views/collector/%d/' % self.collector.pk
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Grommet.objects.count(), 1)
self.assertEqual(Grommet.objects.all()[0].name, "Grommet 1")
# Check that the PK link exists on the rendered form
response = self.client.get(collector_url)
self.assertContains(response, 'name="grommet_set-0-code"')
# Now resave that inline
self.post_data['grommet_set-INITIAL_FORMS'] = "1"
self.post_data['grommet_set-0-code'] = str(Grommet.objects.all()[0].code)
self.post_data['grommet_set-0-name'] = "Grommet 1"
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Grommet.objects.count(), 1)
self.assertEqual(Grommet.objects.all()[0].name, "Grommet 1")
# Now modify that inline
self.post_data['grommet_set-INITIAL_FORMS'] = "1"
self.post_data['grommet_set-0-code'] = str(Grommet.objects.all()[0].code)
self.post_data['grommet_set-0-name'] = "Grommet 1 Updated"
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Grommet.objects.count(), 1)
self.assertEqual(Grommet.objects.all()[0].name, "Grommet 1 Updated")
def test_char_pk_inline(self):
"A model with a character PK can be saved as inlines. Regression for #10992"
# First add a new inline
self.post_data['doohickey_set-0-code'] = "DH1"
self.post_data['doohickey_set-0-name'] = "Doohickey 1"
collector_url = '/test_admin/admin/admin_views/collector/%d/' % self.collector.pk
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(DooHickey.objects.count(), 1)
self.assertEqual(DooHickey.objects.all()[0].name, "Doohickey 1")
# Check that the PK link exists on the rendered form
response = self.client.get(collector_url)
self.assertContains(response, 'name="doohickey_set-0-code"')
# Now resave that inline
self.post_data['doohickey_set-INITIAL_FORMS'] = "1"
self.post_data['doohickey_set-0-code'] = "DH1"
self.post_data['doohickey_set-0-name'] = "Doohickey 1"
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(DooHickey.objects.count(), 1)
self.assertEqual(DooHickey.objects.all()[0].name, "Doohickey 1")
# Now modify that inline
self.post_data['doohickey_set-INITIAL_FORMS'] = "1"
self.post_data['doohickey_set-0-code'] = "DH1"
self.post_data['doohickey_set-0-name'] = "Doohickey 1 Updated"
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(DooHickey.objects.count(), 1)
self.assertEqual(DooHickey.objects.all()[0].name, "Doohickey 1 Updated")
def test_integer_pk_inline(self):
"A model with an integer PK can be saved as inlines. Regression for #10992"
# First add a new inline
self.post_data['whatsit_set-0-index'] = "42"
self.post_data['whatsit_set-0-name'] = "Whatsit 1"
response = self.client.post('/test_admin/admin/admin_views/collector/1/', self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Whatsit.objects.count(), 1)
self.assertEqual(Whatsit.objects.all()[0].name, "Whatsit 1")
# Check that the PK link exists on the rendered form
response = self.client.get('/test_admin/admin/admin_views/collector/1/')
self.assertContains(response, 'name="whatsit_set-0-index"')
# Now resave that inline
self.post_data['whatsit_set-INITIAL_FORMS'] = "1"
self.post_data['whatsit_set-0-index'] = "42"
self.post_data['whatsit_set-0-name'] = "Whatsit 1"
response = self.client.post('/test_admin/admin/admin_views/collector/1/', self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Whatsit.objects.count(), 1)
self.assertEqual(Whatsit.objects.all()[0].name, "Whatsit 1")
# Now modify that inline
self.post_data['whatsit_set-INITIAL_FORMS'] = "1"
self.post_data['whatsit_set-0-index'] = "42"
self.post_data['whatsit_set-0-name'] = "Whatsit 1 Updated"
response = self.client.post('/test_admin/admin/admin_views/collector/1/', self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Whatsit.objects.count(), 1)
self.assertEqual(Whatsit.objects.all()[0].name, "Whatsit 1 Updated")
def test_inherited_inline(self):
"An inherited model can be saved as inlines. Regression for #11042"
# First add a new inline
self.post_data['fancydoodad_set-0-name'] = "Fancy Doodad 1"
collector_url = '/test_admin/admin/admin_views/collector/%d/' % self.collector.pk
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(FancyDoodad.objects.count(), 1)
self.assertEqual(FancyDoodad.objects.all()[0].name, "Fancy Doodad 1")
doodad_pk = FancyDoodad.objects.all()[0].pk
# Check that the PK link exists on the rendered form
response = self.client.get(collector_url)
self.assertContains(response, 'name="fancydoodad_set-0-doodad_ptr"')
# Now resave that inline
self.post_data['fancydoodad_set-INITIAL_FORMS'] = "1"
self.post_data['fancydoodad_set-0-doodad_ptr'] = str(doodad_pk)
self.post_data['fancydoodad_set-0-name'] = "Fancy Doodad 1"
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(FancyDoodad.objects.count(), 1)
self.assertEqual(FancyDoodad.objects.all()[0].name, "Fancy Doodad 1")
# Now modify that inline
self.post_data['fancydoodad_set-INITIAL_FORMS'] = "1"
self.post_data['fancydoodad_set-0-doodad_ptr'] = str(doodad_pk)
self.post_data['fancydoodad_set-0-name'] = "Fancy Doodad 1 Updated"
response = self.client.post(collector_url, self.post_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(FancyDoodad.objects.count(), 1)
self.assertEqual(FancyDoodad.objects.all()[0].name, "Fancy Doodad 1 Updated")
def test_ordered_inline(self):
"""Check that an inline with an editable ordering fields is
updated correctly. Regression for #10922"""
# Create some objects with an initial ordering
Category.objects.create(id=1, order=1, collector=self.collector)
Category.objects.create(id=2, order=2, collector=self.collector)
Category.objects.create(id=3, order=0, collector=self.collector)
Category.objects.create(id=4, order=0, collector=self.collector)
# NB: The order values must be changed so that the items are reordered.
self.post_data.update({
"name": "Frederick Clegg",
"category_set-TOTAL_FORMS": "7",
"category_set-INITIAL_FORMS": "4",
"category_set-MAX_NUM_FORMS": "0",
"category_set-0-order": "14",
"category_set-0-id": "1",
"category_set-0-collector": "1",
"category_set-1-order": "13",
"category_set-1-id": "2",
"category_set-1-collector": "1",
"category_set-2-order": "1",
"category_set-2-id": "3",
"category_set-2-collector": "1",
"category_set-3-order": "0",
"category_set-3-id": "4",
"category_set-3-collector": "1",
"category_set-4-order": "",
"category_set-4-id": "",
"category_set-4-collector": "1",
"category_set-5-order": "",
"category_set-5-id": "",
"category_set-5-collector": "1",
"category_set-6-order": "",
"category_set-6-id": "",
"category_set-6-collector": "1",
})
response = self.client.post('/test_admin/admin/admin_views/collector/1/', self.post_data)
# Successful post will redirect
self.assertEqual(response.status_code, 302)
# Check that the order values have been applied to the right objects
self.assertEqual(self.collector.category_set.count(), 4)
self.assertEqual(Category.objects.get(id=1).order, 14)
self.assertEqual(Category.objects.get(id=2).order, 13)
self.assertEqual(Category.objects.get(id=3).order, 1)
self.assertEqual(Category.objects.get(id=4).order, 0)
class NeverCacheTests(TestCase):
fixtures = ['admin-views-users.xml', 'admin-views-colors.xml', 'admin-views-fabrics.xml']
def setUp(self):
self.client.login(username='super', password='secret')
def tearDown(self):
self.client.logout()
def testAdminIndex(self):
"Check the never-cache status of the main index"
response = self.client.get('/test_admin/admin/')
self.assertEqual(get_max_age(response), 0)
def testAppIndex(self):
"Check the never-cache status of an application index"
response = self.client.get('/test_admin/admin/admin_views/')
self.assertEqual(get_max_age(response), 0)
def testModelIndex(self):
"Check the never-cache status of a model index"
response = self.client.get('/test_admin/admin/admin_views/fabric/')
self.assertEqual(get_max_age(response), 0)
def testModelAdd(self):
"Check the never-cache status of a model add page"
response = self.client.get('/test_admin/admin/admin_views/fabric/add/')
self.assertEqual(get_max_age(response), 0)
def testModelView(self):
"Check the never-cache status of a model edit page"
response = self.client.get('/test_admin/admin/admin_views/section/1/')
self.assertEqual(get_max_age(response), 0)
def testModelHistory(self):
"Check the never-cache status of a model history page"
response = self.client.get('/test_admin/admin/admin_views/section/1/history/')
self.assertEqual(get_max_age(response), 0)
def testModelDelete(self):
"Check the never-cache status of a model delete page"
response = self.client.get('/test_admin/admin/admin_views/section/1/delete/')
self.assertEqual(get_max_age(response), 0)
def testLogin(self):
"Check the never-cache status of login views"
self.client.logout()
response = self.client.get('/test_admin/admin/')
self.assertEqual(get_max_age(response), 0)
def testLogout(self):
"Check the never-cache status of logout view"
response = self.client.get('/test_admin/admin/logout/')
self.assertEqual(get_max_age(response), 0)
def testPasswordChange(self):
"Check the never-cache status of the password change view"
self.client.logout()
response = self.client.get('/test_admin/password_change/')
self.assertEqual(get_max_age(response), None)
def testPasswordChangeDone(self):
"Check the never-cache status of the password change done view"
response = self.client.get('/test_admin/admin/password_change/done/')
self.assertEqual(get_max_age(response), None)
def testJsi18n(self):
"Check the never-cache status of the Javascript i18n view"
response = self.client.get('/test_admin/admin/jsi18n/')
self.assertEqual(get_max_age(response), None)
class ReadonlyTest(TestCase):
fixtures = ['admin-views-users.xml']
def setUp(self):
self.client.login(username='super', password='secret')
def tearDown(self):
self.client.logout()
def test_readonly_get(self):
response = self.client.get('/test_admin/admin/admin_views/post/add/')
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, 'name="posted"')
# 3 fields + 2 submit buttons + 4 inline management form fields, + 2
# hidden fields for inlines + 1 field for the inline + 2 empty form
self.assertEqual(response.content.count("<input"), 14)
self.assertContains(response, formats.localize(datetime.date.today()))
self.assertContains(response,
"<label>Awesomeness level:</label>")
self.assertContains(response, "Very awesome.")
self.assertContains(response, "Unkown coolness.")
self.assertContains(response, "foo")
self.assertContains(response,
formats.localize(datetime.date.today() - datetime.timedelta(days=7))
)
self.assertContains(response, '<div class="form-row coolness">')
self.assertContains(response, '<div class="form-row awesomeness_level">')
self.assertContains(response, '<div class="form-row posted">')
self.assertContains(response, '<div class="form-row value">')
self.assertContains(response, '<div class="form-row ">')
self.assertContains(response, '<p class="help">', 3)
self.assertContains(response, '<p class="help">Some help text for the title (with unicode ŠĐĆŽćžšđ)</p>')
self.assertContains(response, '<p class="help">Some help text for the content (with unicode ŠĐĆŽćžšđ)</p>')
self.assertContains(response, '<p class="help">Some help text for the date (with unicode ŠĐĆŽćžšđ)</p>')
p = Post.objects.create(title="I worked on readonly_fields", content="Its good stuff")
response = self.client.get('/test_admin/admin/admin_views/post/%d/' % p.pk)
self.assertContains(response, "%d amount of cool" % p.pk)
def test_readonly_post(self):
data = {
"title": "Django Got Readonly Fields",
"content": "This is an incredible development.",
"link_set-TOTAL_FORMS": "1",
"link_set-INITIAL_FORMS": "0",
"link_set-MAX_NUM_FORMS": "0",
}
response = self.client.post('/test_admin/admin/admin_views/post/add/', data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Post.objects.count(), 1)
p = Post.objects.get()
self.assertEqual(p.posted, datetime.date.today())
data["posted"] = "10-8-1990" # some date that's not today
response = self.client.post('/test_admin/admin/admin_views/post/add/', data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Post.objects.count(), 2)
p = Post.objects.order_by('-id')[0]
self.assertEqual(p.posted, datetime.date.today())
def test_readonly_manytomany(self):
"Regression test for #13004"
response = self.client.get('/test_admin/admin/admin_views/pizza/add/')
self.assertEqual(response.status_code, 200)
class RawIdFieldsTest(TestCase):
fixtures = ['admin-views-users.xml']
def setUp(self):
self.client.login(username='super', password='secret')
def tearDown(self):
self.client.logout()
def test_limit_choices_to(self):
"""Regression test for 14880"""
# This includes tests integers, strings and booleans in the lookup query string
actor = Actor.objects.create(name="Palin", age=27)
inquisition1 = Inquisition.objects.create(expected=True,
leader=actor,
country="England")
inquisition2 = Inquisition.objects.create(expected=False,
leader=actor,
country="Spain")
response = self.client.get('/test_admin/admin/admin_views/sketch/add/')
# Find the link
m = re.search(r'<a href="([^"]*)"[^>]* id="lookup_id_inquisition"', response.content)
self.assertTrue(m) # Got a match
popup_url = m.groups()[0].replace("&", "&")
# Handle relative links
popup_url = urlparse.urljoin(response.request['PATH_INFO'], popup_url)
# Get the popup
response2 = self.client.get(popup_url)
self.assertContains(response2, "Spain")
self.assertNotContains(response2, "England")
class UserAdminTest(TestCase):
"""
Tests user CRUD functionality.
"""
fixtures = ['admin-views-users.xml']
def setUp(self):
self.client.login(username='super', password='secret')
def tearDown(self):
self.client.logout()
def test_save_button(self):
user_count = User.objects.count()
response = self.client.post('/test_admin/admin/auth/user/add/', {
'username': 'newuser',
'password1': 'newpassword',
'password2': 'newpassword',
})
new_user = User.objects.order_by('-id')[0]
self.assertRedirects(response, '/test_admin/admin/auth/user/%s/' % new_user.pk)
self.assertEqual(User.objects.count(), user_count + 1)
self.assertNotEqual(new_user.password, UNUSABLE_PASSWORD)
def test_save_continue_editing_button(self):
user_count = User.objects.count()
response = self.client.post('/test_admin/admin/auth/user/add/', {
'username': 'newuser',
'password1': 'newpassword',
'password2': 'newpassword',
'_continue': '1',
})
new_user = User.objects.order_by('-id')[0]
self.assertRedirects(response, '/test_admin/admin/auth/user/%s/' % new_user.pk)
self.assertEqual(User.objects.count(), user_count + 1)
self.assertNotEqual(new_user.password, UNUSABLE_PASSWORD)
def test_password_mismatch(self):
response = self.client.post('/test_admin/admin/auth/user/add/', {
'username': 'newuser',
'password1': 'newpassword',
'password2': 'mismatch',
})
self.assertEqual(response.status_code, 200)
adminform = response.context['adminform']
self.assertTrue('password' not in adminform.form.errors)
self.assertEqual(adminform.form.errors['password2'],
[u"The two password fields didn't match."])
def test_user_fk_popup(self):
"""Quick user addition in a FK popup shouldn't invoke view for further user customization"""
response = self.client.get('/test_admin/admin/admin_views/album/add/')
self.assertEqual(response.status_code, 200)
self.assertContains(response, '/test_admin/admin/auth/user/add')
self.assertContains(response, 'class="add-another" id="add_id_owner" onclick="return showAddAnotherPopup(this);"')
response = self.client.get('/test_admin/admin/auth/user/add/?_popup=1')
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, 'name="_continue"')
self.assertNotContains(response, 'name="_addanother"')
data = {
'username': 'newuser',
'password1': 'newpassword',
'password2': 'newpassword',
'_popup': '1',
'_save': '1',
}
response = self.client.post('/test_admin/admin/auth/user/add/?_popup=1', data, follow=True)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'dismissAddAnotherPopup')
def test_save_add_another_button(self):
user_count = User.objects.count()
response = self.client.post('/test_admin/admin/auth/user/add/', {
'username': 'newuser',
'password1': 'newpassword',
'password2': 'newpassword',
'_addanother': '1',
})
new_user = User.objects.order_by('-id')[0]
self.assertRedirects(response, '/test_admin/admin/auth/user/add/')
self.assertEqual(User.objects.count(), user_count + 1)
self.assertNotEqual(new_user.password, UNUSABLE_PASSWORD)
try:
import docutils
except ImportError:
docutils = None
#@unittest.skipUnless(docutils, "no docutils installed.")
class AdminDocsTest(TestCase):
fixtures = ['admin-views-users.xml']
def setUp(self):
self.client.login(username='super', password='secret')
def tearDown(self):
self.client.logout()
def test_tags(self):
response = self.client.get('/test_admin/admin/doc/tags/')
# The builtin tag group exists
self.assertContains(response, "<h2>Built-in tags</h2>", count=2)
# A builtin tag exists in both the index and detail
self.assertContains(response, '<h3 id="built_in-autoescape">autoescape</h3>')
self.assertContains(response, '<li><a href="#built_in-autoescape">autoescape</a></li>')
# An app tag exists in both the index and detail
self.assertContains(response, '<h3 id="flatpages-get_flatpages">get_flatpages</h3>')
self.assertContains(response, '<li><a href="#flatpages-get_flatpages">get_flatpages</a></li>')
# The admin list tag group exists
self.assertContains(response, "<h2>admin_list</h2>", count=2)
# An admin list tag exists in both the index and detail
self.assertContains(response, '<h3 id="admin_list-admin_actions">admin_actions</h3>')
self.assertContains(response, '<li><a href="#admin_list-admin_actions">admin_actions</a></li>')
def test_filters(self):
response = self.client.get('/test_admin/admin/doc/filters/')
# The builtin filter group exists
self.assertContains(response, "<h2>Built-in filters</h2>", count=2)
# A builtin filter exists in both the index and detail
self.assertContains(response, '<h3 id="built_in-add">add</h3>')
self.assertContains(response, '<li><a href="#built_in-add">add</a></li>')
AdminDocsTest = unittest.skipUnless(docutils, "no docutils installed.")(AdminDocsTest)
class ValidXHTMLTests(TestCase):
fixtures = ['admin-views-users.xml']
urlbit = 'admin'
def setUp(self):
self._context_processors = None
self._use_i18n, settings.USE_I18N = settings.USE_I18N, False
if 'django.core.context_processors.i18n' in settings.TEMPLATE_CONTEXT_PROCESSORS:
self._context_processors = settings.TEMPLATE_CONTEXT_PROCESSORS
cp = list(settings.TEMPLATE_CONTEXT_PROCESSORS)
cp.remove('django.core.context_processors.i18n')
settings.TEMPLATE_CONTEXT_PROCESSORS = tuple(cp)
# Force re-evaluation of the contex processor list
django.template.context._standard_context_processors = None
self.client.login(username='super', password='secret')
def tearDown(self):
self.client.logout()
if self._context_processors is not None:
settings.TEMPLATE_CONTEXT_PROCESSORS = self._context_processors
# Force re-evaluation of the contex processor list
django.template.context._standard_context_processors = None
settings.USE_I18N = self._use_i18n
def testLangNamePresent(self):
response = self.client.get('/test_admin/%s/admin_views/' % self.urlbit)
self.assertFalse(' lang=""' in response.content)
self.assertFalse(' xml:lang=""' in response.content)
class DateHierarchyTests(TestCase):
fixtures = ['admin-views-users.xml']
def setUp(self):
self.client.login(username='super', password='secret')
self.old_USE_THOUSAND_SEPARATOR = settings.USE_THOUSAND_SEPARATOR
self.old_USE_L10N = settings.USE_L10N
settings.USE_THOUSAND_SEPARATOR = True
settings.USE_L10N = True
def tearDown(self):
settings.USE_THOUSAND_SEPARATOR = self.old_USE_THOUSAND_SEPARATOR
settings.USE_L10N = self.old_USE_L10N
formats.reset_format_cache()
def assert_non_localized_year(self, response, year):
"""Ensure that the year is not localized with
USE_THOUSAND_SEPARATOR. Refs #15234.
"""
self.assertNotContains(response, formats.number_format(year))
def assert_contains_year_link(self, response, date):
self.assertContains(response, '?release_date__year=%d"' % (date.year,))
def assert_contains_month_link(self, response, date):
self.assertContains(
response, '?release_date__year=%d&release_date__month=%d"' % (
date.year, date.month))
def assert_contains_day_link(self, response, date):
self.assertContains(
response, '?release_date__year=%d&'
'release_date__month=%d&release_date__day=%d"' % (
date.year, date.month, date.day))
def test_empty(self):
"""
Ensure that no date hierarchy links display with empty changelist.
"""
response = self.client.get(
reverse('admin:admin_views_podcast_changelist'))
self.assertNotContains(response, 'release_date__year=')
self.assertNotContains(response, 'release_date__month=')
self.assertNotContains(response, 'release_date__day=')
def test_single(self):
"""
Ensure that single day-level date hierarchy appears for single object.
"""
DATE = datetime.date(2000, 6, 30)
Podcast.objects.create(release_date=DATE)
url = reverse('admin:admin_views_podcast_changelist')
response = self.client.get(url)
self.assert_contains_day_link(response, DATE)
self.assert_non_localized_year(response, 2000)
def test_within_month(self):
"""
Ensure that day-level links appear for changelist within single month.
"""
DATES = (datetime.date(2000, 6, 30),
datetime.date(2000, 6, 15),
datetime.date(2000, 6, 3))
for date in DATES:
Podcast.objects.create(release_date=date)
url = reverse('admin:admin_views_podcast_changelist')
response = self.client.get(url)
for date in DATES:
self.assert_contains_day_link(response, date)
self.assert_non_localized_year(response, 2000)
def test_within_year(self):
"""
Ensure that month-level links appear for changelist within single year.
"""
DATES = (datetime.date(2000, 1, 30),
datetime.date(2000, 3, 15),
datetime.date(2000, 5, 3))
for date in DATES:
Podcast.objects.create(release_date=date)
url = reverse('admin:admin_views_podcast_changelist')
response = self.client.get(url)
# no day-level links
self.assertNotContains(response, 'release_date__day=')
for date in DATES:
self.assert_contains_month_link(response, date)
self.assert_non_localized_year(response, 2000)
def test_multiple_years(self):
"""
Ensure that year-level links appear for year-spanning changelist.
"""
DATES = (datetime.date(2001, 1, 30),
datetime.date(2003, 3, 15),
datetime.date(2005, 5, 3))
for date in DATES:
Podcast.objects.create(release_date=date)
response = self.client.get(
reverse('admin:admin_views_podcast_changelist'))
# no day/month-level links
self.assertNotContains(response, 'release_date__day=')
self.assertNotContains(response, 'release_date__month=')
for date in DATES:
self.assert_contains_year_link(response, date)
# and make sure GET parameters still behave correctly
for date in DATES:
url = '%s?release_date__year=%d' % (
reverse('admin:admin_views_podcast_changelist'),
date.year)
response = self.client.get(url)
self.assert_contains_month_link(response, date)
self.assert_non_localized_year(response, 2000)
self.assert_non_localized_year(response, 2003)
self.assert_non_localized_year(response, 2005)
url = '%s?release_date__year=%d&release_date__month=%d' % (
reverse('admin:admin_views_podcast_changelist'),
date.year, date.month)
response = self.client.get(url)
self.assert_contains_day_link(response, date)
self.assert_non_localized_year(response, 2000)
self.assert_non_localized_year(response, 2003)
self.assert_non_localized_year(response, 2005)
| mit |
dcroc16/skunk_works | google_appengine/lib/django-1.4/django/contrib/localflavor/us/forms.py | 87 | 4651 | """
USA-specific Form helpers
"""
from __future__ import absolute_import
import re
from django.core.validators import EMPTY_VALUES
from django.forms import ValidationError
from django.forms.fields import Field, RegexField, Select, CharField
from django.utils.encoding import smart_unicode
from django.utils.translation import ugettext_lazy as _
phone_digits_re = re.compile(r'^(?:1-?)?(\d{3})[-\.]?(\d{3})[-\.]?(\d{4})$')
ssn_re = re.compile(r"^(?P<area>\d{3})[-\ ]?(?P<group>\d{2})[-\ ]?(?P<serial>\d{4})$")
class USZipCodeField(RegexField):
default_error_messages = {
'invalid': _('Enter a zip code in the format XXXXX or XXXXX-XXXX.'),
}
def __init__(self, max_length=None, min_length=None, *args, **kwargs):
super(USZipCodeField, self).__init__(r'^\d{5}(?:-\d{4})?$',
max_length, min_length, *args, **kwargs)
class USPhoneNumberField(CharField):
default_error_messages = {
'invalid': _('Phone numbers must be in XXX-XXX-XXXX format.'),
}
def clean(self, value):
super(USPhoneNumberField, self).clean(value)
if value in EMPTY_VALUES:
return u''
value = re.sub('(\(|\)|\s+)', '', smart_unicode(value))
m = phone_digits_re.search(value)
if m:
return u'%s-%s-%s' % (m.group(1), m.group(2), m.group(3))
raise ValidationError(self.error_messages['invalid'])
class USSocialSecurityNumberField(Field):
"""
A United States Social Security number.
Checks the following rules to determine whether the number is valid:
* Conforms to the XXX-XX-XXXX format.
* No group consists entirely of zeroes.
* The leading group is not "666" (block "666" will never be allocated).
* The number is not in the promotional block 987-65-4320 through
987-65-4329, which are permanently invalid.
* The number is not one known to be invalid due to otherwise widespread
promotional use or distribution (e.g., the Woolworth's number or the
1962 promotional number).
"""
default_error_messages = {
'invalid': _('Enter a valid U.S. Social Security number in XXX-XX-XXXX format.'),
}
def clean(self, value):
super(USSocialSecurityNumberField, self).clean(value)
if value in EMPTY_VALUES:
return u''
match = re.match(ssn_re, value)
if not match:
raise ValidationError(self.error_messages['invalid'])
area, group, serial = match.groupdict()['area'], match.groupdict()['group'], match.groupdict()['serial']
# First pass: no blocks of all zeroes.
if area == '000' or \
group == '00' or \
serial == '0000':
raise ValidationError(self.error_messages['invalid'])
# Second pass: promotional and otherwise permanently invalid numbers.
if area == '666' or \
(area == '987' and group == '65' and 4320 <= int(serial) <= 4329) or \
value == '078-05-1120' or \
value == '219-09-9999':
raise ValidationError(self.error_messages['invalid'])
return u'%s-%s-%s' % (area, group, serial)
class USStateField(Field):
"""
A form field that validates its input is a U.S. state name or abbreviation.
It normalizes the input to the standard two-leter postal service
abbreviation for the given state.
"""
default_error_messages = {
'invalid': _('Enter a U.S. state or territory.'),
}
def clean(self, value):
from django.contrib.localflavor.us.us_states import STATES_NORMALIZED
super(USStateField, self).clean(value)
if value in EMPTY_VALUES:
return u''
try:
value = value.strip().lower()
except AttributeError:
pass
else:
try:
return STATES_NORMALIZED[value.strip().lower()].decode('ascii')
except KeyError:
pass
raise ValidationError(self.error_messages['invalid'])
class USStateSelect(Select):
"""
A Select widget that uses a list of U.S. states/territories as its choices.
"""
def __init__(self, attrs=None):
from django.contrib.localflavor.us.us_states import STATE_CHOICES
super(USStateSelect, self).__init__(attrs, choices=STATE_CHOICES)
class USPSSelect(Select):
"""
A Select widget that uses a list of US Postal Service codes as its
choices.
"""
def __init__(self, attrs=None):
from django.contrib.localflavor.us.us_states import USPS_CHOICES
super(USPSSelect, self).__init__(attrs, choices=USPS_CHOICES)
| mit |
xflows/clowdflows-backend | workflows/management/commands/export_package_old.py | 8 | 6287 | from unicodedata import category
from django.core.management.base import BaseCommand, CommandError
from workflows.models import Category, AbstractWidget, AbstractInput, AbstractOutput, AbstractOption
from django.core import serializers
from optparse import make_option
import uuid
class Command(BaseCommand):
args = 'file_name package_name <package_name2, package_name3, ...>'
help = 'Exports all AbstractWidgets from a package "package_name" to a file named "file_name". Exports also all needed connected models Category, AbstractInput, AbstractOutput, and AbstractOption.'
option_list = BaseCommand.option_list + (
make_option('-n', '--newuid',
action="store_true",
dest='newuid',
default=False,
help='UID field of all exported models will be overwritten with new random values.'),
make_option('-u', '--updateuid',
action="store_true",
dest='updateuid',
default=True,
help='Models without UIDs will be assigned new ones. Use this option when adding new parameters, widgets, etc, to an existing package.'),
make_option('-a', '--all',
action="store_true",
dest='all',
default=False,
help='Export all widgets regardless of the specified package'),
)
def handle(self, *args, **options):
if (len(args) < 2):
raise CommandError('Arguments "file_name" and "package_name" are required!')
if options['newuid'] and options['updateuid']:
raise CommandError('--newuid and --updateuid flags are mutually exclusive.')
try:
f = open(args[0], 'w')
except:
raise CommandError('There was a problem with creating/overwriting given output file')
result = export_package_string(self.stdout.write, args[1:], options['newuid'], options['updateuid'], options['all'], int(options['verbosity']))
try:
f.write(result.encode('utf-8'))
except:
raise CommandError('There was a problem with writing to the given output file')
verbosity = int(options['verbosity'])
if verbosity>0 and verbosity<3:
self.stdout.write('Tip: use higher "verbosity" option numbers to se more detailed output of what is being exported.\n')
self.stdout.write('Export procedure successfully finished. Results written to the file.\n')
def export_package_string(writeFunc, packages, newuid, updateuid, all, verbosity):
assert isinstance(packages, tuple)
assert isinstance(newuid, bool)
assert isinstance(updateuid, bool)
assert isinstance(all, bool)
objs = []
for topCat in Category.objects.filter(parent = None):
objs.extend(get_package_objs_in_category(topCat, packages, all))
if len(objs) == 0:
raise CommandError('Selected package(s) were not found!')
#be careful uid is only changed on these instances and is not written to the database
for a in objs:
if newuid or (not a.uid and updateuid):
a.uid = str(uuid.uuid4())
a.save()
print_stataistics(objs, verbosity, writeFunc)
result = serializers.serialize("json", objs, indent=2, ensure_ascii=False)
return result
def get_package_objs_in_category(cat, packages, all):
assert isinstance(cat, Category)
assert isinstance(packages, tuple)
assert isinstance(all, bool)
objs = []
objs.extend(get_package_wids_in_category(cat, packages, all))
for catChild in cat.children.all():
objs.extend(get_package_objs_in_category(catChild, packages, all))
if len(objs)>0:
objs.insert(0,cat)
return objs
def get_package_wids_in_category(cat, packages, all):
assert isinstance(cat, Category)
assert isinstance(packages, tuple)
assert isinstance(all, bool)
objs = []
if all:
wids = cat.widgets.filter(user = None)
else:
wids = cat.widgets.filter(package__in = packages)
for wid in wids:
objs.append(wid)
for inp in wid.inputs.all():
objs.append(inp)
objs.extend(inp.options.all())
objs.extend(wid.outputs.all())
return objs
def print_stataistics(objs, verbosity, writeFunc):
if verbosity > 0:
writeFunc('Selection contains:\n')
writeFunc(' % 4i AbstractWidget(s)\n' % len([obj for obj in objs if isinstance(obj, AbstractWidget)]))
writeFunc(' % 4i AbstractInput(s)\n' % len([obj for obj in objs if isinstance(obj, AbstractInput)]))
writeFunc(' % 4i AbstractOutput(s)\n' % len([obj for obj in objs if isinstance(obj, AbstractOutput)]))
writeFunc(' % 4i AbstractOption(s)\n' % len([obj for obj in objs if isinstance(obj, AbstractOption)]))
writeFunc(' % 4i Category(s)\n' % len([obj for obj in objs if isinstance(obj, Category)]))
if (verbosity == 1):
writeFunc('Exported categories:\n')
if (verbosity == 2):
writeFunc('Exported categories and widgets:\n')
if (verbosity == 3):
writeFunc('Exported categories, widgets, inputs, outputs and options:\n')
indent = 0
for obj in objs:
s = ''
if isinstance(obj, Category):
indent = str(obj).count('::')
s = '% 3i. Category ===== %s =====' % (obj.order, obj)
if isinstance(obj, AbstractWidget):
s = ' % 3i. AbstractWidget: %s [%s]' % (obj.order, obj.name, obj.action)
if isinstance(obj, AbstractInput):
s = ' % 3i. AbstractInput: (%s) %s' % (obj.order, obj.short_name, obj.name)
if isinstance(obj, AbstractOutput):
s = ' % 3i. AbstractOutput: (%s) %s' % (obj.order, obj.short_name, obj.name)
if isinstance(obj, AbstractOption):
s = ' AbstractOption: %s | %s' % (obj.name, obj.value)
if isinstance(obj, Category) or (isinstance(obj, AbstractWidget) and verbosity > 1) or verbosity > 2:
writeFunc(' ' * indent + s + '\n')
| mit |
tobbad/micropython | tests/misc/rge_sm.py | 15 | 4422 | # evolve the RGEs of the standard model from electroweak scale up
# by dpgeorge
import math
class RungeKutta(object):
def __init__(self, functions, initConditions, t0, dh, save=True):
self.Trajectory, self.save = [[t0] + initConditions], save
self.functions = [lambda *args: 1.0] + list(functions)
self.N, self.dh = len(self.functions), dh
self.coeff = [1.0 / 6.0, 2.0 / 6.0, 2.0 / 6.0, 1.0 / 6.0]
self.InArgCoeff = [0.0, 0.5, 0.5, 1.0]
def iterate(self):
step = self.Trajectory[-1][:]
istep, iac = step[:], self.InArgCoeff
k, ktmp = self.N * [0.0], self.N * [0.0]
for ic, c in enumerate(self.coeff):
for if_, f in enumerate(self.functions):
arguments = [(x + k[i] * iac[ic]) for i, x in enumerate(istep)]
try:
feval = f(*arguments)
except OverflowError:
return False
if abs(feval) > 1e2: # stop integrating
return False
ktmp[if_] = self.dh * feval
k = ktmp[:]
step = [s + c * k[ik] for ik, s in enumerate(step)]
if self.save:
self.Trajectory += [step]
else:
self.Trajectory = [step]
return True
def solve(self, finishtime):
while self.Trajectory[-1][0] < finishtime:
if not self.iterate():
break
def solveNSteps(self, nSteps):
for i in range(nSteps):
if not self.iterate():
break
def series(self):
return zip(*self.Trajectory)
# 1-loop RGES for the main parameters of the SM
# couplings are: g1, g2, g3 of U(1), SU(2), SU(3); yt (top Yukawa), lambda (Higgs quartic)
# see arxiv.org/abs/0812.4950, eqs 10-15
sysSM = (
lambda *a: 41.0 / 96.0 / math.pi ** 2 * a[1] ** 3, # g1
lambda *a: -19.0 / 96.0 / math.pi ** 2 * a[2] ** 3, # g2
lambda *a: -42.0 / 96.0 / math.pi ** 2 * a[3] ** 3, # g3
lambda *a: 1.0
/ 16.0
/ math.pi ** 2
* (
9.0 / 2.0 * a[4] ** 3
- 8.0 * a[3] ** 2 * a[4]
- 9.0 / 4.0 * a[2] ** 2 * a[4]
- 17.0 / 12.0 * a[1] ** 2 * a[4]
), # yt
lambda *a: 1.0
/ 16.0
/ math.pi ** 2
* (
24.0 * a[5] ** 2
+ 12.0 * a[4] ** 2 * a[5]
- 9.0 * a[5] * (a[2] ** 2 + 1.0 / 3.0 * a[1] ** 2)
- 6.0 * a[4] ** 4
+ 9.0 / 8.0 * a[2] ** 4
+ 3.0 / 8.0 * a[1] ** 4
+ 3.0 / 4.0 * a[2] ** 2 * a[1] ** 2
), # lambda
)
def drange(start, stop, step):
r = start
while r < stop:
yield r
r += step
def phaseDiagram(system, trajStart, trajPlot, h=0.1, tend=1.0, range=1.0):
tstart = 0.0
for i in drange(0, range, 0.1 * range):
for j in drange(0, range, 0.1 * range):
rk = RungeKutta(system, trajStart(i, j), tstart, h)
rk.solve(tend)
# draw the line
for tr in rk.Trajectory:
x, y = trajPlot(tr)
print(x, y)
print()
# draw the arrow
continue
l = (len(rk.Trajectory) - 1) / 3
if l > 0 and 2 * l < len(rk.Trajectory):
p1 = rk.Trajectory[l]
p2 = rk.Trajectory[2 * l]
x1, y1 = trajPlot(p1)
x2, y2 = trajPlot(p2)
dx = -0.5 * (y2 - y1) # orthogonal to line
dy = 0.5 * (x2 - x1) # orthogonal to line
# l = math.sqrt(dx*dx + dy*dy)
# if abs(l) > 1e-3:
# l = 0.1 / l
# dx *= l
# dy *= l
print(x1 + dx, y1 + dy)
print(x2, y2)
print(x1 - dx, y1 - dy)
print()
def singleTraj(system, trajStart, h=0.02, tend=1.0):
tstart = 0.0
# compute the trajectory
rk = RungeKutta(system, trajStart, tstart, h)
rk.solve(tend)
# print out trajectory
for i in range(len(rk.Trajectory)):
tr = rk.Trajectory[i]
print(" ".join(["{:.4f}".format(t) for t in tr]))
# phaseDiagram(sysSM, (lambda i, j: [0.354, 0.654, 1.278, 0.8 + 0.2 * i, 0.1 + 0.1 * j]), (lambda a: (a[4], a[5])), h=0.1, tend=math.log(10**17))
# initial conditions at M_Z
singleTraj(
sysSM, [0.354, 0.654, 1.278, 0.983, 0.131], h=0.5, tend=math.log(10 ** 17)
) # true values
| mit |
waheedahmed/edx-platform | common/lib/xmodule/xmodule/modulestore/split_mongo/split.py | 2 | 158417 | """
Provides full versioning CRUD and representation for collections of xblocks (e.g., courses, modules, etc).
Representation:
* course_index: a dictionary:
** '_id': a unique id which cannot change,
** 'org': the org's id. Only used for searching not identity,
** 'course': the course's catalog number
** 'run': the course's run id,
** 'edited_by': user_id of user who created the original entry,
** 'edited_on': the datetime of the original creation,
** 'versions': versions_dict: {branch_id: structure_id, ...}
** 'search_targets': a dict of search key and value. For example, wiki_slug. Add any fields whose edits
should change the search targets to SplitMongoModuleStore.SEARCH_TARGET dict
* structure:
** '_id': an ObjectId (guid),
** 'root': BlockKey (the block_type and block_id of the root block in the 'blocks' dictionary)
** 'previous_version': the structure from which this one was derived. For published courses, this
points to the previously published version of the structure not the draft published to this.
** 'original_version': the original structure id in the previous_version relation. Is a pseudo object
identifier enabling quick determination if 2 structures have any shared history,
** 'edited_by': user_id of the user whose change caused the creation of this structure version,
** 'edited_on': the datetime for the change causing this creation of this structure version,
** 'blocks': dictionary of xblocks in this structure:
*** BlockKey: key mapping to each BlockData:
*** BlockData: object containing the following attributes:
**** 'block_type': the xblock type id
**** 'definition': the db id of the record containing the content payload for this xblock
**** 'fields': the Scope.settings and children field values
***** 'children': This is stored as a list of (block_type, block_id) pairs
**** 'defaults': Scope.settings default values copied from a template block (used e.g. when
blocks are copied from a library to a course)
**** 'edit_info': EditInfo object:
***** 'edited_on': when was this xblock's fields last changed (will be edited_on value of
update_version structure)
***** 'edited_by': user_id for who changed this xblock last (will be edited_by value of
update_version structure)
***** 'update_version': the guid for the structure where this xblock got its current field
values. This may point to a structure not in this structure's history (e.g., to a draft
branch from which this version was published.)
***** 'previous_version': the guid for the structure which previously changed this xblock
(will be the previous value of update_version; so, may point to a structure not in this
structure's history.)
***** 'source_version': the guid for the structure was copied/published into this block
* definition: shared content with revision history for xblock content fields
** '_id': definition_id (guid),
** 'block_type': xblock type id
** 'fields': scope.content (and possibly other) field values.
** 'edit_info': dictionary:
*** 'edited_by': user_id whose edit caused this version of the definition,
*** 'edited_on': datetime of the change causing this version
*** 'previous_version': the definition_id of the previous version of this definition
*** 'original_version': definition_id of the root of the previous version relation on this
definition. Acts as a pseudo-object identifier.
"""
import copy
import datetime
import hashlib
import logging
from contracts import contract, new_contract
from importlib import import_module
from mongodb_proxy import autoretry_read
from path import Path as path
from pytz import UTC
from bson.objectid import ObjectId
from xblock.core import XBlock
from xblock.fields import Scope, Reference, ReferenceList, ReferenceValueDict
from xmodule.course_module import CourseSummary
from xmodule.errortracker import null_error_tracker
from opaque_keys.edx.keys import CourseKey
from opaque_keys.edx.locator import (
BlockUsageLocator, DefinitionLocator, CourseLocator, LibraryLocator, VersionTree, LocalId,
)
from ccx_keys.locator import CCXLocator, CCXBlockUsageLocator
from xmodule.modulestore.exceptions import InsufficientSpecificationError, VersionConflictError, DuplicateItemError, \
DuplicateCourseError, MultipleCourseBlocksFound
from xmodule.modulestore import (
inheritance, ModuleStoreWriteBase, ModuleStoreEnum,
BulkOpsRecord, BulkOperationsMixin, SortedAssetList, BlockData
)
from ..exceptions import ItemNotFoundError
from .caching_descriptor_system import CachingDescriptorSystem
from xmodule.modulestore.split_mongo.mongo_connection import MongoConnection, DuplicateKeyError
from xmodule.modulestore.split_mongo import BlockKey, CourseEnvelope
from xmodule.modulestore.store_utilities import DETACHED_XBLOCK_TYPES
from xmodule.error_module import ErrorDescriptor
from collections import defaultdict
from types import NoneType
from xmodule.assetstore import AssetMetadata
log = logging.getLogger(__name__)
# ==============================================================================
#
# Known issue:
# Inheritance for cached kvs doesn't work on edits. Use case.
# 1) attribute foo is inheritable
# 2) g.children = [p], p.children = [a]
# 3) g.foo = 1 on load
# 4) if g.foo > 0, if p.foo > 0, if a.foo > 0 all eval True
# 5) p.foo = -1
# 6) g.foo > 0, p.foo <= 0 all eval True BUT
# 7) BUG: a.foo > 0 still evals True but should be False
# 8) reread and everything works right
# 9) p.del(foo), p.foo > 0 is True! works
# 10) BUG: a.foo < 0!
# Local fix wont' permanently work b/c xblock may cache a.foo...
#
# ==============================================================================
# When blacklists are this, all children should be excluded
EXCLUDE_ALL = '*'
new_contract('BlockUsageLocator', BlockUsageLocator)
new_contract('BlockKey', BlockKey)
new_contract('XBlock', XBlock)
class SplitBulkWriteRecord(BulkOpsRecord):
def __init__(self):
super(SplitBulkWriteRecord, self).__init__()
self.initial_index = None
self.index = None
self.structures = {}
self.structures_in_db = set()
# dict(version_guid, dict(BlockKey, module))
self.modules = defaultdict(dict)
self.definitions = {}
self.definitions_in_db = set()
self.course_key = None
# TODO: This needs to track which branches have actually been modified/versioned,
# so that copying one branch to another doesn't update the original branch.
@property
def dirty_branches(self):
"""
Return a list of which branch version ids differ from what was stored
in the database at the beginning of this bulk operation.
"""
# If no course index has been set, then no branches have changed
if self.index is None:
return []
# If there was no index in the database to start with, then all branches
# are dirty by definition
if self.initial_index is None:
return self.index.get('versions', {}).keys()
# Return branches whose ids differ between self.index and self.initial_index
return [
branch
for branch, _id
in self.index.get('versions', {}).items()
if self.initial_index.get('versions', {}).get(branch) != _id
]
def structure_for_branch(self, branch):
return self.structures.get(self.index.get('versions', {}).get(branch))
def set_structure_for_branch(self, branch, structure):
if self.index is not None:
self.index.setdefault('versions', {})[branch] = structure['_id']
self.structures[structure['_id']] = structure
def __repr__(self):
return u"SplitBulkWriteRecord<{!r}, {!r}, {!r}, {!r}, {!r}>".format(
self._active_count,
self.initial_index,
self.index,
self.structures,
self.structures_in_db,
)
class SplitBulkWriteMixin(BulkOperationsMixin):
"""
This implements the :meth:`bulk_operations` modulestore semantics for the :class:`SplitMongoModuleStore`.
In particular, it implements :meth:`_begin_bulk_operation` and
:meth:`_end_bulk_operation` to provide the external interface, and then exposes a set of methods
for interacting with course_indexes and structures that can be used by :class:`SplitMongoModuleStore`.
Internally, this mixin records the set of all active bulk operations (keyed on the active course),
and only writes those values to ``self.mongo_connection`` when :meth:`_end_bulk_operation` is called.
If a bulk write operation isn't active, then the changes are immediately written to the underlying
mongo_connection.
"""
_bulk_ops_record_type = SplitBulkWriteRecord
def _get_bulk_ops_record(self, course_key, ignore_case=False):
"""
Return the :class:`.SplitBulkWriteRecord` for this course.
"""
# handle split specific things and defer to super otherwise
if course_key is None:
return self._bulk_ops_record_type()
if not isinstance(course_key, (CourseLocator, LibraryLocator)):
raise TypeError(u'{!r} is not a CourseLocator or LibraryLocator'.format(course_key))
# handle version_guid based retrieval locally
if course_key.org is None or course_key.course is None or course_key.run is None:
return self._active_bulk_ops.records[
course_key.replace(org=None, course=None, run=None, branch=None)
]
# handle ignore case and general use
return super(SplitBulkWriteMixin, self)._get_bulk_ops_record(
course_key.replace(branch=None, version_guid=None), ignore_case
)
def _clear_bulk_ops_record(self, course_key):
"""
Clear the record for this course
"""
if not isinstance(course_key, (CourseLocator, LibraryLocator)):
raise TypeError('{!r} is not a CourseLocator or LibraryLocator'.format(course_key))
if course_key.org and course_key.course and course_key.run:
del self._active_bulk_ops.records[course_key.replace(branch=None, version_guid=None)]
else:
del self._active_bulk_ops.records[
course_key.replace(org=None, course=None, run=None, branch=None)
]
def _start_outermost_bulk_operation(self, bulk_write_record, course_key):
"""
Begin a bulk write operation on course_key.
"""
bulk_write_record.initial_index = self.db_connection.get_course_index(course_key)
# Ensure that any edits to the index don't pollute the initial_index
bulk_write_record.index = copy.deepcopy(bulk_write_record.initial_index)
bulk_write_record.course_key = course_key
def _end_outermost_bulk_operation(self, bulk_write_record, structure_key):
"""
End the active bulk write operation on structure_key (course or library key).
"""
dirty = False
# If the content is dirty, then update the database
for _id in bulk_write_record.structures.viewkeys() - bulk_write_record.structures_in_db:
dirty = True
try:
self.db_connection.insert_structure(bulk_write_record.structures[_id], bulk_write_record.course_key)
except DuplicateKeyError:
# We may not have looked up this structure inside this bulk operation, and thus
# didn't realize that it was already in the database. That's OK, the store is
# append only, so if it's already been written, we can just keep going.
log.debug("Attempted to insert duplicate structure %s", _id)
for _id in bulk_write_record.definitions.viewkeys() - bulk_write_record.definitions_in_db:
dirty = True
try:
self.db_connection.insert_definition(bulk_write_record.definitions[_id], bulk_write_record.course_key)
except DuplicateKeyError:
# We may not have looked up this definition inside this bulk operation, and thus
# didn't realize that it was already in the database. That's OK, the store is
# append only, so if it's already been written, we can just keep going.
log.debug("Attempted to insert duplicate definition %s", _id)
if bulk_write_record.index is not None and bulk_write_record.index != bulk_write_record.initial_index:
dirty = True
if bulk_write_record.initial_index is None:
self.db_connection.insert_course_index(bulk_write_record.index, bulk_write_record.course_key)
else:
self.db_connection.update_course_index(
bulk_write_record.index,
from_index=bulk_write_record.initial_index,
course_context=bulk_write_record.course_key
)
return dirty
def get_course_index(self, course_key, ignore_case=False):
"""
Return the index for course_key.
"""
if self._is_in_bulk_operation(course_key, ignore_case):
return self._get_bulk_ops_record(course_key, ignore_case).index
else:
return self.db_connection.get_course_index(course_key, ignore_case)
def delete_course_index(self, course_key):
"""
Delete the course index from cache and the db
"""
if self._is_in_bulk_operation(course_key, False):
self._clear_bulk_ops_record(course_key)
self.db_connection.delete_course_index(course_key)
def insert_course_index(self, course_key, index_entry):
bulk_write_record = self._get_bulk_ops_record(course_key)
if bulk_write_record.active:
bulk_write_record.index = index_entry
else:
self.db_connection.insert_course_index(index_entry, course_key)
def update_course_index(self, course_key, updated_index_entry):
"""
Change the given course's index entry.
Note, this operation can be dangerous and break running courses.
Does not return anything useful.
"""
bulk_write_record = self._get_bulk_ops_record(course_key)
if bulk_write_record.active:
bulk_write_record.index = updated_index_entry
else:
self.db_connection.update_course_index(updated_index_entry, course_context=course_key)
def get_structure(self, course_key, version_guid):
bulk_write_record = self._get_bulk_ops_record(course_key)
if bulk_write_record.active:
structure = bulk_write_record.structures.get(version_guid)
# The structure hasn't been loaded from the db yet, so load it
if structure is None:
structure = self.db_connection.get_structure(version_guid, course_key)
bulk_write_record.structures[version_guid] = structure
if structure is not None:
bulk_write_record.structures_in_db.add(version_guid)
return structure
else:
# cast string to ObjectId if necessary
version_guid = course_key.as_object_id(version_guid)
return self.db_connection.get_structure(version_guid, course_key)
def update_structure(self, course_key, structure):
"""
Update a course structure, respecting the current bulk operation status
(no data will be written to the database if a bulk operation is active.)
"""
self._clear_cache(structure['_id'])
bulk_write_record = self._get_bulk_ops_record(course_key)
if bulk_write_record.active:
bulk_write_record.structures[structure['_id']] = structure
else:
self.db_connection.insert_structure(structure, course_key)
def get_cached_block(self, course_key, version_guid, block_id):
"""
If there's an active bulk_operation, see if it's cached this module and just return it
Don't do any extra work to get the ones which are not cached. Make the caller do the work & cache them.
"""
bulk_write_record = self._get_bulk_ops_record(course_key)
if bulk_write_record.active:
return bulk_write_record.modules[version_guid].get(block_id, None)
else:
return None
def cache_block(self, course_key, version_guid, block_key, block):
"""
The counterpart to :method `get_cached_block` which caches a block.
Returns nothing.
"""
bulk_write_record = self._get_bulk_ops_record(course_key)
if bulk_write_record.active:
bulk_write_record.modules[version_guid][block_key] = block
def decache_block(self, course_key, version_guid, block_key):
"""
Write operations which don't write from blocks must remove the target blocks from the cache.
Returns nothing.
"""
bulk_write_record = self._get_bulk_ops_record(course_key)
if bulk_write_record.active:
try:
del bulk_write_record.modules[version_guid][block_key]
except KeyError:
pass
def get_definition(self, course_key, definition_guid):
"""
Retrieve a single definition by id, respecting the active bulk operation
on course_key.
Args:
course_key (:class:`.CourseKey`): The course being operated on
definition_guid (str or ObjectID): The id of the definition to load
"""
bulk_write_record = self._get_bulk_ops_record(course_key)
if bulk_write_record.active:
definition = bulk_write_record.definitions.get(definition_guid)
# The definition hasn't been loaded from the db yet, so load it
if definition is None:
definition = self.db_connection.get_definition(definition_guid, course_key)
bulk_write_record.definitions[definition_guid] = definition
if definition is not None:
bulk_write_record.definitions_in_db.add(definition_guid)
return definition
else:
# cast string to ObjectId if necessary
definition_guid = course_key.as_object_id(definition_guid)
return self.db_connection.get_definition(definition_guid, course_key)
def get_definitions(self, course_key, ids):
"""
Return all definitions that specified in ``ids``.
If a definition with the same id is in both the cache and the database,
the cached version will be preferred.
Arguments:
course_key (:class:`.CourseKey`): The course that these definitions are being loaded
for (to respect bulk operations).
ids (list): A list of definition ids
"""
definitions = []
ids = set(ids)
bulk_write_record = self._get_bulk_ops_record(course_key)
if bulk_write_record.active:
# Only query for the definitions that aren't already cached.
for definition in bulk_write_record.definitions.values():
definition_id = definition.get('_id')
if definition_id in ids:
ids.remove(definition_id)
definitions.append(definition)
if len(ids):
# Query the db for the definitions.
defs_from_db = self.db_connection.get_definitions(list(ids), course_key)
# Add the retrieved definitions to the cache.
bulk_write_record.definitions.update({d.get('_id'): d for d in defs_from_db})
definitions.extend(defs_from_db)
return definitions
def update_definition(self, course_key, definition):
"""
Update a definition, respecting the current bulk operation status
(no data will be written to the database if a bulk operation is active.)
"""
bulk_write_record = self._get_bulk_ops_record(course_key)
if bulk_write_record.active:
bulk_write_record.definitions[definition['_id']] = definition
else:
self.db_connection.insert_definition(definition, course_key)
def version_structure(self, course_key, structure, user_id):
"""
Copy the structure and update the history info (edited_by, edited_on, previous_version)
"""
if course_key.branch is None:
raise InsufficientSpecificationError(course_key)
bulk_write_record = self._get_bulk_ops_record(course_key)
# If we have an active bulk write, and it's already been edited, then just use that structure
if bulk_write_record.active and course_key.branch in bulk_write_record.dirty_branches:
return bulk_write_record.structure_for_branch(course_key.branch)
# Otherwise, make a new structure
new_structure = copy.deepcopy(structure)
new_structure['_id'] = ObjectId()
new_structure['previous_version'] = structure['_id']
new_structure['edited_by'] = user_id
new_structure['edited_on'] = datetime.datetime.now(UTC)
new_structure['schema_version'] = self.SCHEMA_VERSION
# If we're in a bulk write, update the structure used there, and mark it as dirty
if bulk_write_record.active:
bulk_write_record.set_structure_for_branch(course_key.branch, new_structure)
return new_structure
def version_block(self, block_data, user_id, update_version):
"""
Update the block_data object based on it having been edited.
"""
if block_data.edit_info.update_version == update_version:
return
original_usage = block_data.edit_info.original_usage
original_usage_version = block_data.edit_info.original_usage_version
block_data.edit_info.edited_on = datetime.datetime.now(UTC)
block_data.edit_info.edited_by = user_id
block_data.edit_info.previous_version = block_data.edit_info.update_version
block_data.edit_info.update_version = update_version
if original_usage:
block_data.edit_info.original_usage = original_usage
block_data.edit_info.original_usage_version = original_usage_version
def find_matching_course_indexes(self, branch=None, search_targets=None, org_target=None):
"""
Find the course_indexes which have the specified branch and search_targets. An optional org_target
can be specified to apply an ORG filter to return only the courses that are part of
that ORG.
Returns:
a Cursor if there are no changes in flight or a list if some have changed in current bulk op
"""
indexes = self.db_connection.find_matching_course_indexes(branch, search_targets, org_target)
def _replace_or_append_index(altered_index):
"""
If the index is already in indexes, replace it. Otherwise, append it.
"""
for index, existing in enumerate(indexes):
if all(existing[attr] == altered_index[attr] for attr in ['org', 'course', 'run']):
indexes[index] = altered_index
return
indexes.append(altered_index)
# add any being built but not yet persisted or in the process of being updated
for _, record in self._active_records:
if branch and branch not in record.index.get('versions', {}):
continue
if search_targets:
if any(
'search_targets' not in record.index or
field not in record.index['search_targets'] or
record.index['search_targets'][field] != value
for field, value in search_targets.iteritems()
):
continue
# if we've specified a filter by org,
# make sure we've honored that filter when
# integrating in-transit records
if org_target:
if record.index['org'] != org_target:
continue
if not hasattr(indexes, 'append'): # Just in time conversion to list from cursor
indexes = list(indexes)
_replace_or_append_index(record.index)
return indexes
def find_course_blocks_by_id(self, ids):
"""
Find all structures that specified in `ids`. Filter the course blocks to only return whose
`block_type` is `course`
Arguments:
ids (list): A list of structure ids
"""
ids = set(ids)
return self.db_connection.find_course_blocks_by_id(list(ids))
def find_structures_by_id(self, ids):
"""
Return all structures that specified in ``ids``.
If a structure with the same id is in both the cache and the database,
the cached version will be preferred.
Arguments:
ids (list): A list of structure ids
"""
structures = []
ids = set(ids)
for _, record in self._active_records:
for structure in record.structures.values():
structure_id = structure.get('_id')
if structure_id in ids:
ids.remove(structure_id)
structures.append(structure)
structures.extend(self.db_connection.find_structures_by_id(list(ids)))
return structures
def find_structures_derived_from(self, ids):
"""
Return all structures that were immediately derived from a structure listed in ``ids``.
Arguments:
ids (list): A list of structure ids
"""
found_structure_ids = set()
structures = []
for _, record in self._active_records:
for structure in record.structures.values():
if structure.get('previous_version') in ids:
structures.append(structure)
if '_id' in structure:
found_structure_ids.add(structure['_id'])
structures.extend(
structure
for structure in self.db_connection.find_structures_derived_from(ids)
if structure['_id'] not in found_structure_ids
)
return structures
def find_ancestor_structures(self, original_version, block_key):
"""
Find all structures that originated from ``original_version`` that contain ``block_key``.
Any structure found in the cache will be preferred to a structure with the same id from the database.
Arguments:
original_version (str or ObjectID): The id of a structure
block_key (BlockKey): The id of the block in question
"""
found_structure_ids = set()
structures = []
for _, record in self._active_records:
for structure in record.structures.values():
if 'original_version' not in structure:
continue
if structure['original_version'] != original_version:
continue
if block_key not in structure.get('blocks', {}):
continue
if 'update_version' not in structure['blocks'][block_key].get('edit_info', {}):
continue
structures.append(structure)
found_structure_ids.add(structure['_id'])
structures.extend(
structure
for structure in self.db_connection.find_ancestor_structures(original_version, block_key)
if structure['_id'] not in found_structure_ids
)
return structures
class SplitMongoModuleStore(SplitBulkWriteMixin, ModuleStoreWriteBase):
"""
A Mongodb backed ModuleStore supporting versions, inheritance,
and sharing.
"""
SCHEMA_VERSION = 1
# a list of field names to store in course index search_targets. Note, this will
# only record one value per key. If branches disagree, the last one set wins.
# It won't recompute the value on operations such as update_course_index (e.g., to revert to a prev
# version) but those functions will have an optional arg for setting these.
SEARCH_TARGET_DICT = ['wiki_slug']
def __init__(self, contentstore, doc_store_config, fs_root, render_template,
default_class=None,
error_tracker=null_error_tracker,
i18n_service=None, fs_service=None, user_service=None,
services=None, signal_handler=None, **kwargs):
"""
:param doc_store_config: must have a host, db, and collection entries. Other common entries: port, tz_aware.
"""
super(SplitMongoModuleStore, self).__init__(contentstore, **kwargs)
self.db_connection = MongoConnection(**doc_store_config)
if default_class is not None:
module_path, __, class_name = default_class.rpartition('.')
class_ = getattr(import_module(module_path), class_name)
self.default_class = class_
else:
self.default_class = None
self.fs_root = path(fs_root)
self.error_tracker = error_tracker
self.render_template = render_template
self.services = services or {}
if i18n_service is not None:
self.services["i18n"] = i18n_service
if fs_service is not None:
self.services["fs"] = fs_service
if user_service is not None:
self.services["user"] = user_service
if self.request_cache is not None:
self.services["request_cache"] = self.request_cache
self.signal_handler = signal_handler
def close_connections(self):
"""
Closes any open connections to the underlying databases
"""
self.db_connection.close_connections()
def mongo_wire_version(self):
"""
Returns the wire version for mongo. Only used to unit tests which instrument the connection.
"""
return self.db_connection.mongo_wire_version
def _drop_database(self, database=True, collections=True, connections=True):
"""
A destructive operation to drop the underlying database and close all connections.
Intended to be used by test code for cleanup.
If database is True, then this should drop the entire database.
Otherwise, if collections is True, then this should drop all of the collections used
by this modulestore.
Otherwise, the modulestore should remove all data from the collections.
If connections is True, then close the connection to the database as well.
"""
# drop the assets
super(SplitMongoModuleStore, self)._drop_database(database, collections, connections)
self.db_connection._drop_database(database, collections, connections) # pylint: disable=protected-access
def cache_items(self, system, base_block_ids, course_key, depth=0, lazy=True):
"""
Handles caching of items once inheritance and any other one time
per course per fetch operations are done.
Arguments:
system: a CachingDescriptorSystem
base_block_ids: list of BlockIds to fetch
course_key: the destination course providing the context
depth: how deep below these to prefetch
lazy: whether to load definitions now or later
"""
with self.bulk_operations(course_key, emit_signals=False):
new_module_data = {}
for block_id in base_block_ids:
new_module_data = self.descendants(
system.course_entry.structure['blocks'],
block_id,
depth,
new_module_data
)
# This method supports lazy loading, where the descendent definitions aren't loaded
# until they're actually needed.
if not lazy:
# Non-lazy loading: Load all descendants by id.
descendent_definitions = self.get_definitions(
course_key,
[
block.definition
for block in new_module_data.itervalues()
]
)
# Turn definitions into a map.
definitions = {definition['_id']: definition
for definition in descendent_definitions}
for block in new_module_data.itervalues():
if block.definition in definitions:
definition = definitions[block.definition]
# convert_fields gets done later in the runtime's xblock_from_json
block.fields.update(definition.get('fields'))
block.definition_loaded = True
system.module_data.update(new_module_data)
return system.module_data
@contract(course_entry=CourseEnvelope, block_keys="list(BlockKey)", depth="int | None")
def _load_items(self, course_entry, block_keys, depth=0, **kwargs):
"""
Load & cache the given blocks from the course. May return the blocks in any order.
Load the definitions into each block if lazy is in kwargs and is False;
otherwise, do not load the definitions - they'll be loaded later when needed.
"""
runtime = self._get_cache(course_entry.structure['_id'])
if runtime is None:
lazy = kwargs.pop('lazy', True)
runtime = self.create_runtime(course_entry, lazy)
self._add_cache(course_entry.structure['_id'], runtime)
self.cache_items(runtime, block_keys, course_entry.course_key, depth, lazy)
return [runtime.load_item(block_key, course_entry, **kwargs) for block_key in block_keys]
def _get_cache(self, course_version_guid):
"""
Find the descriptor cache for this course if it exists
:param course_version_guid:
"""
if self.request_cache is None:
return None
return self.request_cache.data.setdefault('course_cache', {}).get(course_version_guid)
def _add_cache(self, course_version_guid, system):
"""
Save this cache for subsequent access
:param course_version_guid:
:param system:
"""
if self.request_cache is not None:
self.request_cache.data.setdefault('course_cache', {})[course_version_guid] = system
return system
def _clear_cache(self, course_version_guid=None):
"""
Should only be used by testing or something which implements transactional boundary semantics.
:param course_version_guid: if provided, clear only this entry
"""
if self.request_cache is None:
return
if course_version_guid:
try:
del self.request_cache.data.setdefault('course_cache', {})[course_version_guid]
except KeyError:
pass
else:
self.request_cache.data['course_cache'] = {}
def _lookup_course(self, course_key, head_validation=True):
"""
Decode the locator into the right series of db access. Does not
return the CourseDescriptor! It returns the actual db json from
structures.
Semantics: if course id and branch given, then it will get that branch. If
also give a version_guid, it will see if the current head of that branch == that guid. If not
it raises VersionConflictError (the version now differs from what it was when you got your
reference) unless you specify head_validation = False, in which case it will return the
revision (if specified) by the course_key.
:param course_key: any subclass of CourseLocator
"""
if not course_key.version_guid:
head_validation = True
if head_validation and course_key.org and course_key.course and course_key.run:
if course_key.branch is None:
raise InsufficientSpecificationError(course_key)
# use the course id
index = self.get_course_index(course_key)
if index is None:
raise ItemNotFoundError(course_key)
if course_key.branch not in index['versions']:
raise ItemNotFoundError(course_key)
version_guid = index['versions'][course_key.branch]
if course_key.version_guid is not None and version_guid != course_key.version_guid:
# This may be a bit too touchy but it's hard to infer intent
raise VersionConflictError(course_key, version_guid)
elif course_key.version_guid is None:
raise InsufficientSpecificationError(course_key)
else:
# TODO should this raise an exception if branch was provided?
version_guid = course_key.version_guid
entry = self.get_structure(course_key, version_guid)
if entry is None:
raise ItemNotFoundError('Structure: {}'.format(version_guid))
# b/c more than one course can use same structure, the 'org', 'course',
# 'run', and 'branch' are not intrinsic to structure
# and the one assoc'd w/ it by another fetch may not be the one relevant to this fetch; so,
# add it in the envelope for the structure.
return CourseEnvelope(course_key.replace(version_guid=version_guid), entry)
def _get_course_blocks_for_branch(self, branch, **kwargs):
"""
Internal generator for fetching lists of courses without loading them.
"""
version_guids, id_version_map = self.collect_ids_from_matching_indexes(branch, **kwargs)
if not version_guids:
return
for entry in self.find_course_blocks_by_id(version_guids):
for course_index in id_version_map[entry['_id']]:
yield entry, course_index
def _get_structures_for_branch(self, branch, **kwargs):
"""
Internal generator for fetching lists of courses, libraries, etc.
"""
version_guids, id_version_map = self.collect_ids_from_matching_indexes(branch, **kwargs)
if not version_guids:
return
for entry in self.find_structures_by_id(version_guids):
for course_index in id_version_map[entry['_id']]:
yield entry, course_index
def collect_ids_from_matching_indexes(self, branch, **kwargs):
"""
Find the course_indexes which have the specified branch. if `kwargs` contains `org`
to apply an ORG filter to return only the courses that are part of that ORG. Extract `version_guids`
from the course_indexes.
"""
matching_indexes = self.find_matching_course_indexes(
branch,
search_targets=None,
org_target=kwargs.get('org')
)
# collect ids and then query for those
version_guids = []
id_version_map = defaultdict(list)
for course_index in matching_indexes:
version_guid = course_index['versions'][branch]
version_guids.append(version_guid)
id_version_map[version_guid].append(course_index)
return version_guids, id_version_map
def _get_structures_for_branch_and_locator(self, branch, locator_factory, **kwargs):
"""
Internal generator for fetching lists of courses, libraries, etc.
:param str branch: Branch to fetch structures from
:param type locator_factory: Factory to create locator from structure info and branch
"""
result = []
for entry, structure_info in self._get_structures_for_branch(branch, **kwargs):
locator = locator_factory(structure_info, branch)
envelope = CourseEnvelope(locator, entry)
root = entry['root']
structures_list = self._load_items(envelope, [root], depth=0, **kwargs)
if not isinstance(structures_list[0], ErrorDescriptor):
result.append(structures_list[0])
return result
def _create_course_locator(self, course_info, branch):
"""
Creates course locator using course_info dict and branch
"""
return CourseLocator(
org=course_info['org'],
course=course_info['course'],
run=course_info['run'],
branch=branch,
)
def _create_library_locator(self, library_info, branch):
"""
Creates library locator using library_info dict and branch
"""
return LibraryLocator(
org=library_info['org'],
library=library_info['course'],
branch=branch,
)
@autoretry_read()
def get_courses(self, branch, **kwargs):
"""
Returns a list of course descriptors matching any given qualifiers.
qualifiers should be a dict of keywords matching the db fields or any
legal query for mongo to use against the active_versions collection.
Note, this is to find the current head of the named branch type.
To get specific versions via guid use get_course.
:param branch: the branch for which to return courses.
"""
# get the blocks for each course index (s/b the root)
return self._get_structures_for_branch_and_locator(branch, self._create_course_locator, **kwargs)
@autoretry_read()
def get_course_summaries(self, branch, **kwargs):
"""
Returns a list of `CourseSummary` which matching any given qualifiers.
qualifiers should be a dict of keywords matching the db fields or any
legal query for mongo to use against the active_versions collection.
Note, this is to find the current head of the named branch type.
To get specific versions via guid use get_course.
:param branch: the branch for which to return courses.
"""
def extract_course_summary(course):
"""
Extract course information from the course block for split.
"""
return {
field: course.fields[field]
for field in CourseSummary.course_info_fields
if field in course.fields
}
courses_summaries = []
for entry, structure_info in self._get_course_blocks_for_branch(branch, **kwargs):
course_locator = self._create_course_locator(structure_info, branch=None)
course_block = [
block_data
for block_key, block_data in entry['blocks'].items()
if block_key.type == "course"
]
if not course_block:
raise ItemNotFoundError
if len(course_block) > 1:
raise MultipleCourseBlocksFound(
"Expected 1 course block to be found in the course, but found {0}".format(len(course_block))
)
course_summary = extract_course_summary(course_block[0])
courses_summaries.append(
CourseSummary(course_locator, **course_summary)
)
return courses_summaries
def get_libraries(self, branch="library", **kwargs):
"""
Returns a list of "library" root blocks matching any given qualifiers.
TODO: better way of identifying library index entry vs. course index entry.
"""
return self._get_structures_for_branch_and_locator(branch, self._create_library_locator, **kwargs)
def make_course_key(self, org, course, run):
"""
Return a valid :class:`~opaque_keys.edx.keys.CourseKey` for this modulestore
that matches the supplied `org`, `course`, and `run`.
This key may represent a course that doesn't exist in this modulestore.
"""
return CourseLocator(org, course, run)
def make_course_usage_key(self, course_key):
"""
Return a valid :class:`~opaque_keys.edx.keys.UsageKey` for this modulestore
that matches the supplied course_key.
"""
locator_cls = CCXBlockUsageLocator if isinstance(course_key, CCXLocator) else BlockUsageLocator
return locator_cls(course_key, 'course', 'course')
def _get_structure(self, structure_id, depth, head_validation=True, **kwargs):
"""
Gets Course or Library by locator
"""
structure_entry = self._lookup_course(structure_id, head_validation=head_validation)
root = structure_entry.structure['root']
result = self._load_items(structure_entry, [root], depth, **kwargs)
return result[0]
def get_course(self, course_id, depth=0, **kwargs):
"""
Gets the course descriptor for the course identified by the locator
"""
if not isinstance(course_id, CourseLocator) or course_id.deprecated:
# The supplied CourseKey is of the wrong type, so it can't possibly be stored in this modulestore.
raise ItemNotFoundError(course_id)
return self._get_structure(course_id, depth, **kwargs)
def get_library(self, library_id, depth=0, head_validation=True, **kwargs):
"""
Gets the 'library' root block for the library identified by the locator
"""
if not isinstance(library_id, LibraryLocator):
# The supplied CourseKey is of the wrong type, so it can't possibly be stored in this modulestore.
raise ItemNotFoundError(library_id)
return self._get_structure(library_id, depth, head_validation=head_validation, **kwargs)
def has_course(self, course_id, ignore_case=False, **kwargs):
"""
Does this course exist in this modulestore. This method does not verify that the branch &/or
version in the course_id exists. Use get_course_index_info to check that.
Returns the course_id of the course if it was found, else None
Note: we return the course_id instead of a boolean here since the found course may have
a different id than the given course_id when ignore_case is True.
"""
if not isinstance(course_id, CourseLocator) or course_id.deprecated:
# The supplied CourseKey is of the wrong type, so it can't possibly be stored in this modulestore.
return False
course_index = self.get_course_index(course_id, ignore_case)
return CourseLocator(course_index['org'], course_index['course'], course_index['run'], course_id.branch) if course_index else None
def has_library(self, library_id, ignore_case=False, **kwargs):
"""
Does this library exist in this modulestore. This method does not verify that the branch &/or
version in the library_id exists.
Returns the library_id of the course if it was found, else None.
"""
if not isinstance(library_id, LibraryLocator):
return None
index = self.get_course_index(library_id, ignore_case)
if index:
return LibraryLocator(index['org'], index['course'], library_id.branch)
return None
def has_item(self, usage_key):
"""
Returns True if usage_key exists in its course. Returns false if
the course or the block w/in the course do not exist for the given version.
raises InsufficientSpecificationError if the usage_key does not id a block
"""
if not isinstance(usage_key, BlockUsageLocator) or usage_key.deprecated:
# The supplied UsageKey is of the wrong type, so it can't possibly be stored in this modulestore.
return False
if usage_key.block_id is None:
raise InsufficientSpecificationError(usage_key)
try:
course_structure = self._lookup_course(usage_key.course_key).structure
except ItemNotFoundError:
# this error only occurs if the course does not exist
return False
return self._get_block_from_structure(course_structure, BlockKey.from_usage_key(usage_key)) is not None
@contract(returns='XBlock')
def get_item(self, usage_key, depth=0, **kwargs):
"""
depth (int): An argument that some module stores may use to prefetch
descendants of the queried modules for more efficient results later
in the request. The depth is counted in the number of
calls to get_children() to cache. None indicates to cache all
descendants.
raises InsufficientSpecificationError or ItemNotFoundError
"""
if not isinstance(usage_key, BlockUsageLocator) or usage_key.deprecated:
# The supplied UsageKey is of the wrong type, so it can't possibly be stored in this modulestore.
raise ItemNotFoundError(usage_key)
with self.bulk_operations(usage_key.course_key):
course = self._lookup_course(usage_key.course_key)
items = self._load_items(course, [BlockKey.from_usage_key(usage_key)], depth, **kwargs)
if len(items) == 0:
raise ItemNotFoundError(usage_key)
elif len(items) > 1:
log.debug("Found more than one item for '{}'".format(usage_key))
return items[0]
def get_items(self, course_locator, settings=None, content=None, qualifiers=None, include_orphans=True, **kwargs):
"""
Returns:
list of XModuleDescriptor instances for the matching items within the course with
the given course_locator
NOTE: don't use this to look for courses as the course_locator is required. Use get_courses.
Args:
course_locator (CourseLocator): the course identifier
settings (dict): fields to look for which have settings scope. Follows same syntax
and rules as qualifiers below
content (dict): fields to look for which have content scope. Follows same syntax and
rules as qualifiers below.
qualifiers (dict): what to look for within the course.
Common qualifiers are ``category`` or any field name. if the target field is a list,
then it searches for the given value in the list not list equivalence.
For substring matching pass a regex object.
For split,
you can search by ``edited_by``, ``edited_on`` providing a function testing limits.
include_orphans (boolean): Returns all items in a course, including orphans if present.
True - This would return all items irrespective of course in tree checking. It may fetch orphans
if present in the course.
False - if we want only those items which are in the course tree. This would ensure no orphans are
fetched.
"""
if not isinstance(course_locator, CourseKey) or course_locator.deprecated:
# The supplied courselike key is of the wrong type, so it can't possibly be stored in this modulestore.
return []
course = self._lookup_course(course_locator)
items = []
qualifiers = qualifiers.copy() if qualifiers else {} # copy the qualifiers (destructively manipulated here)
def _block_matches_all(block_data):
"""
Check that the block matches all the criteria
"""
# do the checks which don't require loading any additional data
if ( # pylint: disable=bad-continuation
self._block_matches(block_data, qualifiers) and
self._block_matches(block_data.fields, settings)
):
if content:
definition_block = self.get_definition(course_locator, block_data.definition)
return self._block_matches(definition_block['fields'], content)
else:
return True
if settings is None:
settings = {}
if 'name' in qualifiers:
# odd case where we don't search just confirm
block_name = qualifiers.pop('name')
block_ids = []
for block_id, block in course.structure['blocks'].iteritems():
# Do an in comparison on the name qualifier
# so that a list can be used to filter on block_id
if block_id.id in block_name and _block_matches_all(block):
block_ids.append(block_id)
return self._load_items(course, block_ids, **kwargs)
if 'category' in qualifiers:
qualifiers['block_type'] = qualifiers.pop('category')
# don't expect caller to know that children are in fields
if 'children' in qualifiers:
settings['children'] = qualifiers.pop('children')
# No need of these caches unless include_orphans is set to False
path_cache = None
parents_cache = None
if not include_orphans:
path_cache = {}
parents_cache = self.build_block_key_to_parents_mapping(course.structure)
for block_id, value in course.structure['blocks'].iteritems():
if _block_matches_all(value):
if not include_orphans:
if ( # pylint: disable=bad-continuation
block_id.type in DETACHED_XBLOCK_TYPES or
self.has_path_to_root(block_id, course, path_cache, parents_cache)
):
items.append(block_id)
else:
items.append(block_id)
if len(items) > 0:
return self._load_items(course, items, depth=0, **kwargs)
else:
return []
def build_block_key_to_parents_mapping(self, structure):
"""
Given a structure, builds block_key to parents mapping for all block keys in structure
and returns it
:param structure: db json of course structure
:return dict: a dictionary containing mapping of block_keys against their parents.
"""
children_to_parents = defaultdict(list)
for parent_key, value in structure['blocks'].iteritems():
for child_key in value.fields.get('children', []):
children_to_parents[child_key].append(parent_key)
return children_to_parents
def has_path_to_root(self, block_key, course, path_cache=None, parents_cache=None):
"""
Check recursively if an xblock has a path to the course root
:param block_key: BlockKey of the component whose path is to be checked
:param course: actual db json of course from structures
:param path_cache: a dictionary that records which modules have a path to the root so that we don't have to
double count modules if we're computing this for a list of modules in a course.
:param parents_cache: a dictionary containing mapping of block_key to list of its parents. Optionally, this
should be built for course structure to make this method faster.
:return Bool: whether or not component has path to the root
"""
if path_cache and block_key in path_cache:
return path_cache[block_key]
if parents_cache is None:
xblock_parents = self._get_parents_from_structure(block_key, course.structure)
else:
xblock_parents = parents_cache[block_key]
if len(xblock_parents) == 0 and block_key.type in ["course", "library"]:
# Found, xblock has the path to the root
if path_cache is not None:
path_cache[block_key] = True
return True
has_path = any(
self.has_path_to_root(xblock_parent, course, path_cache, parents_cache)
for xblock_parent in xblock_parents
)
if path_cache is not None:
path_cache[block_key] = has_path
return has_path
def get_parent_location(self, locator, **kwargs):
"""
Return the location (Locators w/ block_ids) for the parent of this location in this
course. Could use get_items(location, {'children': block_id}) but this is slightly faster.
NOTE: the locator must contain the block_id, and this code does not actually ensure block_id exists
:param locator: BlockUsageLocator restricting search scope
"""
if not isinstance(locator, BlockUsageLocator) or locator.deprecated:
# The supplied locator is of the wrong type, so it can't possibly be stored in this modulestore.
raise ItemNotFoundError(locator)
course = self._lookup_course(locator.course_key)
all_parent_ids = self._get_parents_from_structure(BlockKey.from_usage_key(locator), course.structure)
# Check and verify the found parent_ids are not orphans; Remove parent which has no valid path
# to the course root
parent_ids = [
valid_parent
for valid_parent in all_parent_ids
if self.has_path_to_root(valid_parent, course)
]
if len(parent_ids) == 0:
return None
# find alphabetically least
parent_ids.sort(key=lambda parent: (parent.type, parent.id))
return BlockUsageLocator.make_relative(
locator,
block_type=parent_ids[0].type,
block_id=parent_ids[0].id,
)
def get_orphans(self, course_key, **kwargs):
"""
Return an array of all of the orphans in the course.
"""
if not isinstance(course_key, CourseLocator) or course_key.deprecated:
# The supplied CourseKey is of the wrong type, so it can't possibly be stored in this modulestore.
raise ItemNotFoundError(course_key)
detached_categories = [name for name, __ in XBlock.load_tagged_classes("detached")]
course = self._lookup_course(course_key)
items = set(course.structure['blocks'].keys())
items.remove(course.structure['root'])
blocks = course.structure['blocks']
for block_id, block_data in blocks.iteritems():
items.difference_update(BlockKey(*child) for child in block_data.fields.get('children', []))
if block_data.block_type in detached_categories:
items.discard(block_id)
return [
course_key.make_usage_key(block_type=block_id.type, block_id=block_id.id)
for block_id in items
]
def get_course_index_info(self, course_key):
"""
The index records the initial creation of the indexed course and tracks the current version
heads. This function is primarily for test verification but may serve some
more general purpose.
:param course_key: must have a org, course, and run set
:return {'org': string,
versions: {'draft': the head draft version id,
'published': the head published version id if any,
},
'edited_by': who created the course originally (named edited for consistency),
'edited_on': when the course was originally created
}
"""
if not isinstance(course_key, CourseLocator) or course_key.deprecated:
# The supplied CourseKey is of the wrong type, so it can't possibly be stored in this modulestore.
raise ItemNotFoundError(course_key)
if not (course_key.course and course_key.run and course_key.org):
return None
index = self.get_course_index(course_key)
return index
# TODO figure out a way to make this info accessible from the course descriptor
def get_course_history_info(self, course_key):
"""
Because xblocks doesn't give a means to separate the course structure's meta information from
the course xblock's, this method will get that info for the structure as a whole.
:param course_key:
:return {'original_version': the version guid of the original version of this course,
'previous_version': the version guid of the previous version,
'edited_by': who made the last change,
'edited_on': when the change was made
}
"""
if not isinstance(course_key, CourseLocator) or course_key.deprecated:
# The supplied CourseKey is of the wrong type, so it can't possibly be stored in this modulestore.
raise ItemNotFoundError(course_key)
course = self._lookup_course(course_key).structure
return {
'original_version': course['original_version'],
'previous_version': course['previous_version'],
'edited_by': course['edited_by'],
'edited_on': course['edited_on']
}
def get_definition_history_info(self, definition_locator, course_context=None):
"""
Because xblocks doesn't give a means to separate the definition's meta information from
the usage xblock's, this method will get that info for the definition
:return {'original_version': the version guid of the original version of this course,
'previous_version': the version guid of the previous version,
'edited_by': who made the last change,
'edited_on': when the change was made
}
"""
if not isinstance(definition_locator, DefinitionLocator) or definition_locator.deprecated:
# The supplied locator is of the wrong type, so it can't possibly be stored in this modulestore.
raise ItemNotFoundError(definition_locator)
definition = self.db_connection.get_definition(definition_locator.definition_id, course_context)
if definition is None:
return None
return definition['edit_info']
def get_course_successors(self, course_locator, version_history_depth=1):
"""
Find the version_history_depth next versions of this course. Return as a VersionTree
Mostly makes sense when course_locator uses a version_guid, but because it finds all relevant
next versions, these do include those created for other courses.
:param course_locator:
"""
if not isinstance(course_locator, CourseLocator) or course_locator.deprecated:
# The supplied CourseKey is of the wrong type, so it can't possibly be stored in this modulestore.
raise ItemNotFoundError(course_locator)
if version_history_depth < 1:
return None
if course_locator.version_guid is None:
course = self._lookup_course(course_locator)
version_guid = course.structure['_id']
course_locator = course_locator.for_version(version_guid)
else:
version_guid = course_locator.version_guid
# TODO if depth is significant, it may make sense to get all that have the same original_version
# and reconstruct the subtree from version_guid
next_entries = self.find_structures_derived_from([version_guid])
# must only scan cursor's once
next_versions = [struct for struct in next_entries]
result = {version_guid: [CourseLocator(version_guid=struct['_id']) for struct in next_versions]}
depth = 1
while depth < version_history_depth and len(next_versions) > 0:
depth += 1
next_entries = self.find_structures_derived_from([struct['_id'] for struct in next_versions])
next_versions = [struct for struct in next_entries]
for course_structure in next_versions:
result.setdefault(course_structure['previous_version'], []).append(
CourseLocator(version_guid=struct['_id']))
return VersionTree(course_locator, result)
def get_block_generations(self, block_locator):
"""
Find the history of this block. Return as a VersionTree of each place the block changed (except
deletion).
The block's history tracks its explicit changes but not the changes in its children starting
from when the block was created.
"""
# course_agnostic means we don't care if the head and version don't align, trust the version
course_struct = self._lookup_course(block_locator.course_key.course_agnostic()).structure
block_key = BlockKey.from_usage_key(block_locator)
all_versions_with_block = self.find_ancestor_structures(
original_version=course_struct['original_version'],
block_key=block_key
)
# find (all) root versions and build map {previous: {successors}..}
possible_roots = []
result = {}
for version in all_versions_with_block:
block_payload = self._get_block_from_structure(version, block_key)
if version['_id'] == block_payload.edit_info.update_version:
if block_payload.edit_info.previous_version is None:
# this was when this block was created
possible_roots.append(block_payload.edit_info.update_version)
else: # map previous to {update..}
result.setdefault(block_payload.edit_info.previous_version, set()).add(
block_payload.edit_info.update_version)
# more than one possible_root means usage was added and deleted > 1x.
if len(possible_roots) > 1:
# find the history segment including block_locator's version
element_to_find = self._get_block_from_structure(course_struct, block_key).edit_info.update_version
if element_to_find in possible_roots:
possible_roots = [element_to_find]
for possibility in possible_roots:
if self._find_local_root(element_to_find, possibility, result):
possible_roots = [possibility]
break
elif len(possible_roots) == 0:
return None
# convert the results value sets to locators
for k, versions in result.iteritems():
result[k] = [
block_locator.for_version(version)
for version in versions
]
return VersionTree(
block_locator.for_version(possible_roots[0]),
result
)
def get_definition_successors(self, definition_locator, version_history_depth=1):
"""
Find the version_history_depth next versions of this definition. Return as a VersionTree
"""
# TODO implement
pass
def get_block_original_usage(self, usage_key):
"""
If a block was inherited into another structure using copy_from_template,
this will return the original block usage locator and version from
which the copy was inherited.
Returns usage_key, version if the data is available, otherwise returns (None, None)
"""
blocks = self._lookup_course(usage_key.course_key).structure['blocks']
block = blocks.get(BlockKey.from_usage_key(usage_key))
if block and block.edit_info.original_usage is not None:
usage_key = BlockUsageLocator.from_string(block.edit_info.original_usage)
return usage_key, block.edit_info.original_usage_version
return None, None
def create_definition_from_data(self, course_key, new_def_data, category, user_id):
"""
Pull the definition fields out of descriptor and save to the db as a new definition
w/o a predecessor and return the new id.
:param user_id: request.user object
"""
new_def_data = self._serialize_fields(category, new_def_data)
new_id = ObjectId()
document = {
'_id': new_id,
"block_type": category,
"fields": new_def_data,
"edit_info": {
"edited_by": user_id,
"edited_on": datetime.datetime.now(UTC),
"previous_version": None,
"original_version": new_id,
},
'schema_version': self.SCHEMA_VERSION,
}
self.update_definition(course_key, document)
definition_locator = DefinitionLocator(category, new_id)
return definition_locator
def update_definition_from_data(self, course_key, definition_locator, new_def_data, user_id):
"""
See if new_def_data differs from the persisted version. If so, update
the persisted version and return the new id.
:param user_id: request.user
"""
def needs_saved():
for key, value in new_def_data.iteritems():
if key not in old_definition['fields'] or value != old_definition['fields'][key]:
return True
for key, value in old_definition.get('fields', {}).iteritems():
if key not in new_def_data:
return True
# if this looks in cache rather than fresh fetches, then it will probably not detect
# actual change b/c the descriptor and cache probably point to the same objects
old_definition = self.get_definition(course_key, definition_locator.definition_id)
if old_definition is None:
raise ItemNotFoundError(definition_locator)
new_def_data = self._serialize_fields(old_definition['block_type'], new_def_data)
if needs_saved():
definition_locator = self._update_definition_from_data(course_key, old_definition, new_def_data, user_id)
return definition_locator, True
else:
return definition_locator, False
def _update_definition_from_data(self, course_key, old_definition, new_def_data, user_id):
"""
Update the persisted version of the given definition and return the
locator of the new definition. Does not check if data differs from the
previous version.
"""
new_definition = copy.deepcopy(old_definition)
new_definition['_id'] = ObjectId()
new_definition['fields'] = new_def_data
new_definition['edit_info']['edited_by'] = user_id
new_definition['edit_info']['edited_on'] = datetime.datetime.now(UTC)
# previous version id
new_definition['edit_info']['previous_version'] = old_definition['_id']
new_definition['schema_version'] = self.SCHEMA_VERSION
self.update_definition(course_key, new_definition)
return DefinitionLocator(new_definition['block_type'], new_definition['_id'])
def _generate_block_key(self, course_blocks, category):
"""
Generate a somewhat readable block id unique w/in this course using the category
:param course_blocks: the current list of blocks.
:param category:
"""
# NOTE: a potential bug is that a block is deleted and another created which gets the old
# block's id. a possible fix is to cache the last serial in a dict in the structure
# {category: last_serial...}
# A potential confusion is if the name incorporates the parent's name, then if the child
# moves, its id won't change and will be confusing
serial = 1
while True:
potential_key = BlockKey(category, "{}{}".format(category, serial))
if potential_key not in course_blocks:
return potential_key
serial += 1
@contract(returns='XBlock')
def create_item(self, user_id, course_key, block_type, block_id=None, definition_locator=None, fields=None,
asides=None, force=False, **kwargs):
"""
Add a descriptor to persistence as an element
of the course. Return the resulting post saved version with populated locators.
:param course_key: If it has a version_guid and a course org + course + run + branch, this
method ensures that the version is the head of the given course branch before making the change.
raises InsufficientSpecificationError if there is no course locator.
raises VersionConflictError if the version_guid of the course_or_parent_locator is not the head
of the its course unless force is true.
:param force: fork the structure and don't update the course draftVersion if the above
:param continue_revision: for multistep transactions, continue revising the given version rather than creating
a new version. Setting force to True conflicts with setting this to True and will cause a VersionConflictError
:param definition_locator: should either be None to indicate this is a brand new definition or
a pointer to the existing definition to which this block should point or from which this was derived
or a LocalId to indicate that it's new.
If fields does not contain any Scope.content, then definition_locator must have a value meaning that this
block points
to the existing definition. If fields contains Scope.content and definition_locator is not None, then
the Scope.content fields are assumed to be a new payload for definition_locator.
:param block_id: if provided, must not already exist in the structure. Provides the block id for the
new item in this structure. Otherwise, one is computed using the category appended w/ a few digits.
This method creates a new version of the course structure unless the course has a bulk_write operation
active.
It creates and inserts the new block, makes the block point
to the definition which may be new or a new version of an existing or an existing.
Rules for course locator:
* If the course locator specifies a org and course and run and either it doesn't
specify version_guid or the one it specifies == the current head of the branch,
it progresses the course to point
to the new head and sets the active version to point to the new head
* If the locator has a org and course and run but its version_guid != current head, it raises VersionConflictError.
NOTE: using a version_guid will end up creating a new version of the course. Your new item won't be in
the course id'd by version_guid but instead in one w/ a new version_guid. Ensure in this case that you get
the new version_guid from the locator in the returned object!
"""
with self.bulk_operations(course_key):
# split handles all the fields in one dict not separated by scope
fields = fields or {}
fields.update(kwargs.pop('metadata', {}) or {})
definition_data = kwargs.pop('definition_data', {})
if definition_data:
if not isinstance(definition_data, dict):
definition_data = {'data': definition_data} # backward compatibility to mongo's hack
fields.update(definition_data)
# find course_index entry if applicable and structures entry
index_entry = self._get_index_if_valid(course_key, force)
structure = self._lookup_course(course_key).structure
partitioned_fields = self.partition_fields_by_scope(block_type, fields)
new_def_data = partitioned_fields.get(Scope.content, {})
# persist the definition if persisted != passed
if definition_locator is None or isinstance(definition_locator.definition_id, LocalId):
definition_locator = self.create_definition_from_data(course_key, new_def_data, block_type, user_id)
elif new_def_data:
definition_locator, _ = self.update_definition_from_data(course_key, definition_locator, new_def_data, user_id)
# copy the structure and modify the new one
new_structure = self.version_structure(course_key, structure, user_id)
new_id = new_structure['_id']
# generate usage id
if block_id is not None:
block_key = BlockKey(block_type, block_id)
if block_key in new_structure['blocks']:
raise DuplicateItemError(block_id, self, 'structures')
else:
block_key = self._generate_block_key(new_structure['blocks'], block_type)
block_fields = partitioned_fields.get(Scope.settings, {})
if Scope.children in partitioned_fields:
block_fields.update(partitioned_fields[Scope.children])
self._update_block_in_structure(new_structure, block_key, self._new_block(
user_id,
block_type,
block_fields,
definition_locator.definition_id,
new_id,
asides=asides
))
self.update_structure(course_key, new_structure)
# update the index entry if appropriate
if index_entry is not None:
# see if any search targets changed
if fields is not None:
self._update_search_targets(index_entry, fields)
self._update_head(course_key, index_entry, course_key.branch, new_id)
item_loc = BlockUsageLocator(
course_key.version_agnostic(),
block_type=block_type,
block_id=block_key.id,
)
else:
item_loc = BlockUsageLocator(
CourseLocator(version_guid=new_id),
block_type=block_type,
block_id=block_key.id,
)
if isinstance(course_key, LibraryLocator):
self._flag_library_updated_event(course_key)
# reconstruct the new_item from the cache
return self.get_item(item_loc)
def create_child(self, user_id, parent_usage_key, block_type, block_id=None, fields=None, asides=None, **kwargs):
"""
Creates and saves a new xblock that as a child of the specified block
Returns the newly created item.
Args:
user_id: ID of the user creating and saving the xmodule
parent_usage_key: a :class:`~opaque_key.edx.UsageKey` identifying the
block that this item should be parented under
block_type: The typo of block to create
block_id: a unique identifier for the new item. If not supplied,
a new identifier will be generated
fields (dict): A dictionary specifying initial values for some or all fields
in the newly created block
asides (dict): A dictionary specifying initial values for some or all aside fields
in the newly created block
"""
with self.bulk_operations(parent_usage_key.course_key):
xblock = self.create_item(
user_id, parent_usage_key.course_key, block_type, block_id=block_id, fields=fields, asides=asides,
**kwargs)
# skip attach to parent if xblock has 'detached' tag
if 'detached' in xblock._class_tags: # pylint: disable=protected-access
return xblock
# don't version the structure as create_item handled that already.
new_structure = self._lookup_course(xblock.location.course_key).structure
# add new block as child and update parent's version
block_id = BlockKey.from_usage_key(parent_usage_key)
if block_id not in new_structure['blocks']:
raise ItemNotFoundError(parent_usage_key)
parent = new_structure['blocks'][block_id]
# Originally added to support entrance exams (settings.FEATURES.get('ENTRANCE_EXAMS'))
if kwargs.get('position') is None:
parent.fields.setdefault('children', []).append(BlockKey.from_usage_key(xblock.location))
else:
parent.fields.setdefault('children', []).insert(
kwargs.get('position'),
BlockKey.from_usage_key(xblock.location)
)
if parent.edit_info.update_version != new_structure['_id']:
# if the parent hadn't been previously changed in this bulk transaction, indicate that it's
# part of the bulk transaction
self.version_block(parent, user_id, new_structure['_id'])
self.decache_block(parent_usage_key.course_key, new_structure['_id'], block_id)
# db update
self.update_structure(parent_usage_key.course_key, new_structure)
# don't need to update the index b/c create_item did it for this version
return xblock
def clone_course(self, source_course_id, dest_course_id, user_id, fields=None, **kwargs):
"""
See :meth: `.ModuleStoreWrite.clone_course` for documentation.
In split, other than copying the assets, this is cheap as it merely creates a new version of the
existing course.
"""
source_index = self.get_course_index_info(source_course_id)
if source_index is None:
raise ItemNotFoundError("Cannot find a course at {0}. Aborting".format(source_course_id))
with self.bulk_operations(dest_course_id):
new_course = self.create_course(
dest_course_id.org, dest_course_id.course, dest_course_id.run,
user_id,
fields=fields,
versions_dict=source_index['versions'],
search_targets=source_index['search_targets'],
skip_auto_publish=True,
**kwargs
)
# don't copy assets until we create the course in case something's awry
super(SplitMongoModuleStore, self).clone_course(source_course_id, dest_course_id, user_id, fields, **kwargs)
return new_course
DEFAULT_ROOT_COURSE_BLOCK_ID = 'course'
DEFAULT_ROOT_LIBRARY_BLOCK_ID = 'library'
def create_course(
self, org, course, run, user_id, master_branch=None, fields=None,
versions_dict=None, search_targets=None, root_category='course',
root_block_id=None, **kwargs
):
"""
Create a new entry in the active courses index which points to an existing or new structure. Returns
the course root of the resulting entry (the location has the course id)
Arguments:
org (str): the organization that owns the course
course (str): the course number of the course
run (str): the particular run of the course (e.g. 2013_T1)
user_id: id of the user creating the course
fields (dict): Fields to set on the course at initialization
kwargs: Any optional arguments understood by a subset of modulestores to customize instantiation
course + run: If there are duplicates, this method will raise DuplicateCourseError
fields: if scope.settings fields provided, will set the fields of the root course object in the
new course. If both
settings fields and a starting version are provided (via versions_dict), it will generate a successor version
to the given version,
and update the settings fields with any provided values (via update not setting).
fields (content): if scope.content fields provided, will update the fields of the new course
xblock definition to this. Like settings fields,
if provided, this will cause a new version of any given version as well as a new version of the
definition (which will point to the existing one if given a version). If not provided and given
a version_dict, it will reuse the same definition as that version's course
(obvious since it's reusing the
course). If not provided and no version_dict is given, it will be empty and get the field defaults
when
loaded.
master_branch: the tag (key) for the version name in the dict which is the DRAFT version. Not the actual
version guid, but what to call it.
search_targets: a dict of search key and value. For example, wiki_slug. Add any fields whose edits
should change the search targets to SplitMongoModuleStore.SEARCH_TARGET dict
versions_dict: the starting version ids where the keys are the tags such as DRAFT and PUBLISHED
and the values are structure guids. If provided, the new course will reuse this version (unless you also
provide any fields overrides, see above). if not provided, will create a mostly empty course
structure with just a category course root xblock.
"""
# either need to assert this or have a default
assert master_branch is not None
# check course and run's uniqueness
locator = CourseLocator(org=org, course=course, run=run, branch=master_branch)
return self._create_courselike(
locator, user_id, master_branch, fields, versions_dict,
search_targets, root_category, root_block_id, **kwargs
)
def _create_courselike(
self, locator, user_id, master_branch, fields=None,
versions_dict=None, search_targets=None, root_category='course',
root_block_id=None, **kwargs
):
"""
Internal code for creating a course or library
"""
index = self.get_course_index(locator)
if index is not None:
raise DuplicateCourseError(locator, index)
partitioned_fields = self.partition_fields_by_scope(root_category, fields)
block_fields = partitioned_fields[Scope.settings]
if Scope.children in partitioned_fields:
block_fields.update(partitioned_fields[Scope.children])
definition_fields = self._serialize_fields(root_category, partitioned_fields.get(Scope.content, {}))
# build from inside out: definition, structure, index entry
# if building a wholly new structure
if versions_dict is None or master_branch not in versions_dict:
# create new definition and structure
definition_id = self.create_definition_from_data(locator, definition_fields, root_category, user_id).definition_id
draft_structure = self._new_structure(
user_id,
BlockKey(
root_category,
root_block_id or SplitMongoModuleStore.DEFAULT_ROOT_COURSE_BLOCK_ID,
),
block_fields,
definition_id
)
new_id = draft_structure['_id']
if versions_dict is None:
versions_dict = {master_branch: new_id}
else:
versions_dict[master_branch] = new_id
elif block_fields or definition_fields: # pointing to existing course w/ some overrides
# just get the draft_version structure
draft_version = CourseLocator(version_guid=versions_dict[master_branch])
draft_structure = self._lookup_course(draft_version).structure
draft_structure = self.version_structure(locator, draft_structure, user_id)
new_id = draft_structure['_id']
root_block = draft_structure['blocks'][draft_structure['root']]
if block_fields is not None:
root_block.fields.update(self._serialize_fields(root_category, block_fields))
if definition_fields is not None:
old_def = self.get_definition(locator, root_block.definition)
new_fields = old_def['fields']
new_fields.update(definition_fields)
definition_id = self._update_definition_from_data(locator, old_def, new_fields, user_id).definition_id
root_block.definition = definition_id
root_block.edit_info.edited_on = datetime.datetime.now(UTC)
root_block.edit_info.edited_by = user_id
root_block.edit_info.previous_version = root_block.edit_info.update_version
root_block.edit_info.update_version = new_id
versions_dict[master_branch] = new_id
else: # Pointing to an existing course structure
new_id = versions_dict[master_branch]
draft_version = CourseLocator(version_guid=new_id)
draft_structure = self._lookup_course(draft_version).structure
locator = locator.replace(version_guid=new_id)
with self.bulk_operations(locator):
self.update_structure(locator, draft_structure)
index_entry = {
'_id': ObjectId(),
'org': locator.org,
'course': locator.course,
'run': locator.run,
'edited_by': user_id,
'edited_on': datetime.datetime.now(UTC),
'versions': versions_dict,
'schema_version': self.SCHEMA_VERSION,
'search_targets': search_targets or {},
}
if fields is not None:
self._update_search_targets(index_entry, fields)
self.insert_course_index(locator, index_entry)
# expensive hack to persist default field values set in __init__ method (e.g., wiki_slug)
if isinstance(locator, LibraryLocator):
course = self.get_library(locator, **kwargs)
else:
course = self.get_course(locator, **kwargs)
return self.update_item(course, user_id, **kwargs)
def create_library(self, org, library, user_id, fields, **kwargs):
"""
Create a new library. Arguments are similar to create_course().
"""
kwargs["fields"] = fields
kwargs["master_branch"] = kwargs.get("master_branch", ModuleStoreEnum.BranchName.library)
kwargs["root_category"] = kwargs.get("root_category", "library")
kwargs["root_block_id"] = kwargs.get("root_block_id", "library")
locator = LibraryLocator(org=org, library=library, branch=kwargs["master_branch"])
return self._create_courselike(locator, user_id, **kwargs)
def update_item(self, descriptor, user_id, allow_not_found=False, force=False, **kwargs):
"""
Save the descriptor's fields. it doesn't descend the course dag to save the children.
Return the new descriptor (updated location).
raises ItemNotFoundError if the location does not exist.
Creates a new course version. If the descriptor's location has a org and course and run, it moves the course head
pointer. If the version_guid of the descriptor points to a non-head version and there's been an intervening
change to this item, it raises a VersionConflictError unless force is True. In the force case, it forks
the course but leaves the head pointer where it is (this change will not be in the course head).
The implementation tries to detect which, if any changes, actually need to be saved and thus won't version
the definition, structure, nor course if they didn't change.
"""
partitioned_fields = self.partition_xblock_fields_by_scope(descriptor)
return self._update_item_from_fields(
user_id, descriptor.location.course_key, BlockKey.from_usage_key(descriptor.location),
partitioned_fields, descriptor.definition_locator, allow_not_found, force, **kwargs
) or descriptor
def _update_item_from_fields(self, user_id, course_key, block_key, partitioned_fields, # pylint: disable=too-many-statements
definition_locator, allow_not_found, force, asides=None, **kwargs):
"""
Broke out guts of update_item for short-circuited internal use only
"""
with self.bulk_operations(course_key):
if allow_not_found and isinstance(block_key.id, (LocalId, NoneType)):
fields = {}
for subfields in partitioned_fields.itervalues():
fields.update(subfields)
return self.create_item(
user_id, course_key, block_key.type, fields=fields, asides=asides, force=force
)
original_structure = self._lookup_course(course_key).structure
index_entry = self._get_index_if_valid(course_key, force)
original_entry = self._get_block_from_structure(original_structure, block_key)
if original_entry is None:
if allow_not_found:
fields = {}
for subfields in partitioned_fields.itervalues():
fields.update(subfields)
return self.create_item(user_id, course_key, block_key.type, block_id=block_key.id, fields=fields,
asides=asides, force=force)
else:
raise ItemNotFoundError(course_key.make_usage_key(block_key.type, block_key.id))
is_updated = False
definition_fields = partitioned_fields[Scope.content]
if definition_locator is None:
definition_locator = DefinitionLocator(original_entry.block_type, original_entry.definition)
if definition_fields:
definition_locator, is_updated = self.update_definition_from_data(
course_key, definition_locator, definition_fields, user_id
)
# check metadata
settings = partitioned_fields[Scope.settings]
settings = self._serialize_fields(block_key.type, settings)
if not is_updated:
is_updated = self._compare_settings(settings, original_entry.fields)
# check children
if partitioned_fields.get(Scope.children, {}): # purposely not 'is not None'
serialized_children = [BlockKey.from_usage_key(child) for child in partitioned_fields[Scope.children]['children']]
is_updated = is_updated or original_entry.fields.get('children', []) != serialized_children
if is_updated:
settings['children'] = serialized_children
asides_data_to_update = None
if asides:
asides_data_to_update, asides_updated = self._get_asides_to_update_from_structure(original_structure,
block_key, asides)
else:
asides_updated = False
# if updated, rev the structure
if is_updated or asides_updated:
new_structure = self.version_structure(course_key, original_structure, user_id)
block_data = self._get_block_from_structure(new_structure, block_key)
block_data.definition = definition_locator.definition_id
block_data.fields = settings
if asides_updated:
block_data.asides = asides_data_to_update
new_id = new_structure['_id']
# source_version records which revision a block was copied from. In this method, we're updating
# the block, so it's no longer a direct copy, and we can remove the source_version reference.
block_data.edit_info.source_version = None
self.version_block(block_data, user_id, new_id)
self.update_structure(course_key, new_structure)
# update the index entry if appropriate
if index_entry is not None:
self._update_search_targets(index_entry, definition_fields)
self._update_search_targets(index_entry, settings)
if isinstance(course_key, LibraryLocator):
course_key = LibraryLocator(
org=index_entry['org'],
library=index_entry['course'],
branch=course_key.branch,
version_guid=new_id
)
else:
course_key = CourseLocator(
org=index_entry['org'],
course=index_entry['course'],
run=index_entry['run'],
branch=course_key.branch,
version_guid=new_id
)
self._update_head(course_key, index_entry, course_key.branch, new_id)
elif isinstance(course_key, LibraryLocator):
course_key = LibraryLocator(version_guid=new_id)
else:
course_key = CourseLocator(version_guid=new_id)
if isinstance(course_key, LibraryLocator):
self._flag_library_updated_event(course_key)
# fetch and return the new item--fetching is unnecessary but a good qc step
new_locator = course_key.make_usage_key(block_key.type, block_key.id)
return self.get_item(new_locator, **kwargs)
else:
return None
def create_xblock(
self, runtime, course_key, block_type, block_id=None, fields=None,
definition_id=None, parent_xblock=None, **kwargs
):
"""
This method instantiates the correct subclass of XModuleDescriptor based
on the contents of json_data. It does not persist it and can create one which
has no usage id.
parent_xblock is used to compute inherited metadata as well as to append the new xblock.
json_data:
- 'block_type': the xmodule block_type
- 'fields': a dict of locally set fields (not inherited) in json format not pythonic typed format!
- 'definition': the object id of the existing definition
"""
assert runtime is not None
xblock_class = runtime.load_block_type(block_type)
json_data = {
'block_type': block_type,
'fields': {},
}
if definition_id is not None:
json_data['definition'] = definition_id
if parent_xblock is None:
# If no parent, then nothing to inherit.
inherited_settings = {}
else:
inherited_settings = parent_xblock.xblock_kvs.inherited_settings.copy()
if fields is not None:
for field_name in inheritance.InheritanceMixin.fields:
if field_name in fields:
inherited_settings[field_name] = fields[field_name]
new_block = runtime.xblock_from_json(
xblock_class,
course_key,
BlockKey(block_type, block_id) if block_id else None,
BlockData(**json_data),
**kwargs
)
for field_name, value in (fields or {}).iteritems():
setattr(new_block, field_name, value)
if parent_xblock is not None:
parent_xblock.children.append(new_block.scope_ids.usage_id)
# decache pending children field settings
parent_xblock.save()
return new_block
def persist_xblock_dag(self, xblock, user_id, force=False):
"""
create or update the xblock and all of its children. The xblock's location must specify a course.
If it doesn't specify a usage_id, then it's presumed to be new and need creation. This function
descends the children performing the same operation for any that are xblocks. Any children which
are block_ids just update the children pointer.
All updates go into the same course version (bulk updater).
Updates the objects which came in w/ updated location and definition_location info.
returns the post-persisted version of the incoming xblock. Note that its children will be ids not
objects.
:param xblock: the head of the dag
:param user_id: who's doing the change
"""
# find course_index entry if applicable and structures entry
course_key = xblock.location.course_key
with self.bulk_operations(course_key):
index_entry = self._get_index_if_valid(course_key, force)
structure = self._lookup_course(course_key).structure
new_structure = self.version_structure(course_key, structure, user_id)
new_id = new_structure['_id']
is_updated = self._persist_subdag(course_key, xblock, user_id, new_structure['blocks'], new_id)
if is_updated:
self.update_structure(course_key, new_structure)
# update the index entry if appropriate
if index_entry is not None:
self._update_head(course_key, index_entry, xblock.location.branch, new_id)
# fetch and return the new item--fetching is unnecessary but a good qc step
return self.get_item(xblock.location.for_version(new_id))
else:
return xblock
def _persist_subdag(self, course_key, xblock, user_id, structure_blocks, new_id):
# persist the definition if persisted != passed
partitioned_fields = self.partition_xblock_fields_by_scope(xblock)
new_def_data = self._serialize_fields(xblock.category, partitioned_fields[Scope.content])
is_updated = False
if xblock.definition_locator is None or isinstance(xblock.definition_locator.definition_id, LocalId):
xblock.definition_locator = self.create_definition_from_data(
course_key, new_def_data, xblock.category, user_id
)
is_updated = True
elif new_def_data:
xblock.definition_locator, is_updated = self.update_definition_from_data(
course_key, xblock.definition_locator, new_def_data, user_id
)
if isinstance(xblock.scope_ids.usage_id.block_id, LocalId):
# generate an id
is_new = True
is_updated = True
block_id = getattr(xblock.scope_ids.usage_id.block_id, 'block_id', None)
if block_id is None:
block_key = self._generate_block_key(structure_blocks, xblock.scope_ids.block_type)
else:
block_key = BlockKey(xblock.scope_ids.block_type, block_id)
new_usage_id = xblock.scope_ids.usage_id.replace(block_id=block_key.id)
xblock.scope_ids = xblock.scope_ids._replace(usage_id=new_usage_id)
else:
is_new = False
block_key = BlockKey(xblock.scope_ids.block_type, xblock.scope_ids.usage_id.block_id)
children = []
if xblock.has_children:
for child in xblock.children:
if isinstance(child.block_id, LocalId):
child_block = xblock.system.get_block(child)
is_updated = self._persist_subdag(course_key, child_block, user_id, structure_blocks, new_id) or is_updated
children.append(BlockKey.from_usage_key(child_block.location))
else:
children.append(BlockKey.from_usage_key(child))
is_updated = is_updated or structure_blocks[block_key].fields['children'] != children
block_fields = partitioned_fields[Scope.settings]
block_fields = self._serialize_fields(xblock.category, block_fields)
if not is_new and not is_updated:
is_updated = self._compare_settings(block_fields, structure_blocks[block_key].fields)
if children:
block_fields['children'] = children
if is_updated:
if is_new:
block_info = self._new_block(
user_id,
xblock.category,
block_fields,
xblock.definition_locator.definition_id,
new_id,
raw=True
)
else:
block_info = structure_blocks[block_key]
block_info.fields = block_fields
block_info.definition = xblock.definition_locator.definition_id
self.version_block(block_info, user_id, new_id)
structure_blocks[block_key] = block_info
return is_updated
def _compare_settings(self, settings, original_fields):
"""
Return True if the settings are not == to the original fields
:param settings:
:param original_fields:
"""
original_keys = original_fields.keys()
if 'children' in original_keys:
original_keys.remove('children')
if len(settings) != len(original_keys):
return True
else:
new_keys = settings.keys()
for key in original_keys:
if key not in new_keys or original_fields[key] != settings[key]:
return True
def copy(self, user_id, source_course, destination_course, subtree_list=None, blacklist=None):
"""
Copies each xblock in subtree_list and those blocks descendants excluding blacklist
from source_course to destination_course.
To delete a block in the destination_course, copy its parent and blacklist the other
sibs to keep them from being copies. You can also just call delete_item on the destination.
Ensures that each subtree occurs in the same place in destination as it does in source. If any
of the source's subtree parents are missing from destination, it raises ItemNotFound([parent_ids]).
To determine the same relative order vis-a-vis published siblings,
publishing may involve changing the order of previously published siblings. For example,
if publishing `[c, d]` and source parent has children `[a, b, c, d, e]` and destination parent
currently has children `[e, b]`, there's no obviously correct resulting order; thus, publish will
reorder destination to `[b, c, d, e]` to make it conform with the source.
:param source_course: a CourseLocator (can be a version or course w/ branch)
:param destination_course: a CourseLocator which must be an existing course but branch doesn't have
to exist yet. (The course must exist b/c Locator doesn't have everything necessary to create it).
Note, if the branch doesn't exist, then the source_course structure's root must be in subtree_list;
otherwise, the publish will violate the parents must exist rule.
:param subtree_list: a list of usage keys whose subtrees to publish.
:param blacklist: a list of usage keys to not change in the destination: i.e., don't add
if not there, don't update if there.
Raises:
ItemNotFoundError: if it cannot find the course. if the request is to publish a
subtree but the ancestors up to and including the course root are not published.
"""
# get the destination's index, and source and destination structures.
with self.bulk_operations(source_course):
source_structure = self._lookup_course(source_course).structure
with self.bulk_operations(destination_course):
index_entry = self.get_course_index(destination_course)
if index_entry is None:
# brand new course
raise ItemNotFoundError(destination_course)
if destination_course.branch not in index_entry['versions']:
# must be copying the dag root if there's no current dag
root_block_key = source_structure['root']
if not any(root_block_key == BlockKey.from_usage_key(subtree) for subtree in subtree_list):
raise ItemNotFoundError(u'Must publish course root {}'.format(root_block_key))
root_source = source_structure['blocks'][root_block_key]
# create branch
destination_structure = self._new_structure(
user_id, root_block_key,
# leave off the fields b/c the children must be filtered
definition_id=root_source.definition,
)
else:
destination_structure = self._lookup_course(destination_course).structure
destination_structure = self.version_structure(destination_course, destination_structure, user_id)
if blacklist != EXCLUDE_ALL:
blacklist = [BlockKey.from_usage_key(shunned) for shunned in blacklist or []]
# iterate over subtree list filtering out blacklist.
orphans = set()
destination_blocks = destination_structure['blocks']
for subtree_root in subtree_list:
if BlockKey.from_usage_key(subtree_root) != source_structure['root']:
# find the parents and put root in the right sequence
parents = self._get_parents_from_structure(BlockKey.from_usage_key(subtree_root), source_structure)
parent_found = False
for parent in parents:
# If a parent isn't found in the destination_blocks, it's possible it was renamed
# in the course export. Continue and only throw an exception if *no* parents are found.
if parent in destination_blocks:
parent_found = True
orphans.update(
self._sync_children(
source_structure['blocks'][parent],
destination_blocks[parent],
BlockKey.from_usage_key(subtree_root)
)
)
if len(parents) and not parent_found:
raise ItemNotFoundError(parents)
# update/create the subtree and its children in destination (skipping blacklist)
orphans.update(
self._copy_subdag(
user_id, destination_structure['_id'],
BlockKey.from_usage_key(subtree_root),
source_structure['blocks'],
destination_blocks,
blacklist
)
)
# remove any remaining orphans
for orphan in orphans:
# orphans will include moved as well as deleted xblocks. Only delete the deleted ones.
self._delete_if_true_orphan(orphan, destination_structure)
# update the db
self.update_structure(destination_course, destination_structure)
self._update_head(destination_course, index_entry, destination_course.branch, destination_structure['_id'])
@contract(source_keys="list(BlockUsageLocator)", dest_usage=BlockUsageLocator)
def copy_from_template(self, source_keys, dest_usage, user_id, head_validation=True):
"""
Flexible mechanism for inheriting content from an external course/library/etc.
Will copy all of the XBlocks whose keys are passed as `source_course` so that they become
children of the XBlock whose key is `dest_usage`. Any previously existing children of
`dest_usage` that haven't been replaced/updated by this copy_from_template operation will
be deleted.
Unlike `copy()`, this does not care whether the resulting blocks are positioned similarly
in their new course/library. However, the resulting blocks will be in the same relative
order as `source_keys`.
If any of the blocks specified already exist as children of the destination block, they
will be updated rather than duplicated or replaced. If they have Scope.settings field values
overriding inherited default values, those overrides will be preserved.
IMPORTANT: This method does not preserve block_id - in other words, every block that is
copied will be assigned a new block_id. This is because we assume that the same source block
may be copied into one course in multiple places. However, it *is* guaranteed that every
time this method is called for the same source block and dest_usage, the same resulting
block id will be generated.
:param source_keys: a list of BlockUsageLocators. Order is preserved.
:param dest_usage: The BlockUsageLocator that will become the parent of an inherited copy
of all the xblocks passed in `source_keys`.
:param user_id: The user who will get credit for making this change.
"""
# Preload the block structures for all source courses/libraries/etc.
# so that we can access descendant information quickly
source_structures = {}
for key in source_keys:
course_key = key.course_key
if course_key.branch is None:
raise ItemNotFoundError("branch is required for all source keys when using copy_from_template")
if course_key not in source_structures:
with self.bulk_operations(course_key):
source_structures[course_key] = self._lookup_course(
course_key, head_validation=head_validation
).structure
destination_course = dest_usage.course_key
with self.bulk_operations(destination_course):
index_entry = self.get_course_index(destination_course)
if index_entry is None:
raise ItemNotFoundError(destination_course)
dest_structure = self._lookup_course(destination_course).structure
old_dest_structure_version = dest_structure['_id']
dest_structure = self.version_structure(destination_course, dest_structure, user_id)
# Set of all descendent block IDs of dest_usage that are to be replaced:
block_key = BlockKey(dest_usage.block_type, dest_usage.block_id)
orig_descendants = set(self.descendants(dest_structure['blocks'], block_key, depth=None, descendent_map={}))
# The descendants() method used above adds the block itself, which we don't consider a descendant.
orig_descendants.remove(block_key)
new_descendants = self._copy_from_template(
source_structures, source_keys, dest_structure, block_key, user_id, head_validation
)
# Update the edit info:
dest_info = dest_structure['blocks'][block_key]
# Update the edit_info:
dest_info.edit_info.previous_version = dest_info.edit_info.update_version
dest_info.edit_info.update_version = old_dest_structure_version
dest_info.edit_info.edited_by = user_id
dest_info.edit_info.edited_on = datetime.datetime.now(UTC)
orphans = orig_descendants - new_descendants
for orphan in orphans:
del dest_structure['blocks'][orphan]
self.update_structure(destination_course, dest_structure)
self._update_head(destination_course, index_entry, destination_course.branch, dest_structure['_id'])
# Return usage locators for all the new children:
return [
destination_course.make_usage_key(*k)
for k in dest_structure['blocks'][block_key].fields['children']
]
def _copy_from_template(
self, source_structures, source_keys, dest_structure, new_parent_block_key, user_id, head_validation
):
"""
Internal recursive implementation of copy_from_template()
Returns the new set of BlockKeys that are the new descendants of the block with key 'block_key'
"""
new_blocks = set()
new_children = list() # ordered list of the new children of new_parent_block_key
for usage_key in source_keys:
src_course_key = usage_key.course_key
hashable_source_id = src_course_key.for_version(None)
block_key = BlockKey(usage_key.block_type, usage_key.block_id)
source_structure = source_structures[src_course_key]
if block_key not in source_structure['blocks']:
raise ItemNotFoundError(usage_key)
source_block_info = source_structure['blocks'][block_key]
# Compute a new block ID. This new block ID must be consistent when this
# method is called with the same (source_key, dest_structure) pair
unique_data = "{}:{}:{}".format(
unicode(hashable_source_id).encode("utf-8"),
block_key.id,
new_parent_block_key.id,
)
new_block_id = hashlib.sha1(unique_data).hexdigest()[:20]
new_block_key = BlockKey(block_key.type, new_block_id)
# Now clone block_key to new_block_key:
new_block_info = copy.deepcopy(source_block_info)
# Note that new_block_info now points to the same definition ID entry as source_block_info did
existing_block_info = dest_structure['blocks'].get(new_block_key, BlockData())
# Inherit the Scope.settings values from 'fields' to 'defaults'
new_block_info.defaults = new_block_info.fields
# <workaround>
# CAPA modules store their 'markdown' value (an alternate representation of their content)
# in Scope.settings rather than Scope.content :-/
# markdown is a field that really should not be overridable - it fundamentally changes the content.
# capa modules also use a custom editor that always saves their markdown field to the metadata,
# even if it hasn't changed, which breaks our override system.
# So until capa modules are fixed, we special-case them and remove their markdown fields,
# forcing the inherited version to use XML only.
if usage_key.block_type == 'problem' and 'markdown' in new_block_info.defaults:
del new_block_info.defaults['markdown']
# </workaround>
new_block_info.fields = existing_block_info.fields # Preserve any existing overrides
if 'children' in new_block_info.defaults:
del new_block_info.defaults['children'] # Will be set later
new_block_info.edit_info = existing_block_info.edit_info
new_block_info.edit_info.previous_version = new_block_info.edit_info.update_version
new_block_info.edit_info.update_version = dest_structure['_id']
# Note we do not set 'source_version' - it's only used for copying identical blocks
# from draft to published as part of publishing workflow.
# Setting it to the source_block_info structure version here breaks split_draft's has_changes() method.
new_block_info.edit_info.edited_by = user_id
new_block_info.edit_info.edited_on = datetime.datetime.now(UTC)
new_block_info.edit_info.original_usage = unicode(usage_key.replace(branch=None, version_guid=None))
new_block_info.edit_info.original_usage_version = source_block_info.edit_info.update_version
dest_structure['blocks'][new_block_key] = new_block_info
children = source_block_info.fields.get('children')
if children:
children = [src_course_key.make_usage_key(child.type, child.id) for child in children]
new_blocks |= self._copy_from_template(
source_structures, children, dest_structure, new_block_key, user_id, head_validation
)
new_blocks.add(new_block_key)
# And add new_block_key to the list of new_parent_block_key's new children:
new_children.append(new_block_key)
# Update the children of new_parent_block_key
dest_structure['blocks'][new_parent_block_key].fields['children'] = new_children
return new_blocks
def delete_item(self, usage_locator, user_id, force=False):
"""
Delete the block or tree rooted at block (if delete_children) and any references w/in the course to the block
from a new version of the course structure.
returns CourseLocator for new version
raises ItemNotFoundError if the location does not exist.
raises ValueError if usage_locator points to the structure root
Creates a new course version. If the descriptor's location has a org, a course, and a run, it moves the course head
pointer. If the version_guid of the descriptor points to a non-head version and there's been an intervening
change to this item, it raises a VersionConflictError unless force is True. In the force case, it forks
the course but leaves the head pointer where it is (this change will not be in the course head).
"""
if not isinstance(usage_locator, BlockUsageLocator) or usage_locator.deprecated:
# The supplied UsageKey is of the wrong type, so it can't possibly be stored in this modulestore.
raise ItemNotFoundError(usage_locator)
with self.bulk_operations(usage_locator.course_key):
original_structure = self._lookup_course(usage_locator.course_key).structure
block_key = BlockKey.from_usage_key(usage_locator)
if original_structure['root'] == block_key:
raise ValueError("Cannot delete the root of a course")
if block_key not in original_structure['blocks']:
raise ValueError("Cannot delete block_key {} from course {}, because that block does not exist.".format(
block_key,
usage_locator,
))
index_entry = self._get_index_if_valid(usage_locator.course_key, force)
new_structure = self.version_structure(usage_locator.course_key, original_structure, user_id)
new_blocks = new_structure['blocks']
new_id = new_structure['_id']
parent_block_keys = self._get_parents_from_structure(block_key, original_structure)
for parent_block_key in parent_block_keys:
parent_block = new_blocks[parent_block_key]
parent_block.fields['children'].remove(block_key)
parent_block.edit_info.edited_on = datetime.datetime.now(UTC)
parent_block.edit_info.edited_by = user_id
parent_block.edit_info.previous_version = parent_block.edit_info.update_version
parent_block.edit_info.update_version = new_id
# remove the source_version reference
parent_block.edit_info.source_version = None
self.decache_block(usage_locator.course_key, new_id, parent_block_key)
self._remove_subtree(BlockKey.from_usage_key(usage_locator), new_blocks)
# update index if appropriate and structures
self.update_structure(usage_locator.course_key, new_structure)
if index_entry is not None:
# update the index entry if appropriate
self._update_head(usage_locator.course_key, index_entry, usage_locator.branch, new_id)
result = usage_locator.course_key.for_version(new_id)
else:
result = CourseLocator(version_guid=new_id)
if isinstance(usage_locator.course_key, LibraryLocator):
self._flag_library_updated_event(usage_locator.course_key)
self._emit_item_deleted_signal(usage_locator, user_id)
return result
@contract(root_block_key=BlockKey, blocks='dict(BlockKey: BlockData)')
def _remove_subtree(self, root_block_key, blocks):
"""
Remove the subtree rooted at root_block_key
We do this breadth-first to make sure that we don't remove
any children that may have parents that we don't want to delete.
"""
# create mapping from each child's key to its parents' keys
child_parent_map = defaultdict(set)
for block_key, block_data in blocks.iteritems():
for child in block_data.fields.get('children', []):
child_parent_map[BlockKey(*child)].add(block_key)
to_delete = {root_block_key}
tier = {root_block_key}
while tier:
next_tier = set()
for block_key in tier:
for child in blocks[block_key].fields.get('children', []):
child_block_key = BlockKey(*child)
parents = child_parent_map[child_block_key]
# Make sure we want to delete all of the child's parents
# before slating it for deletion
if parents.issubset(to_delete):
next_tier.add(child_block_key)
tier = next_tier
to_delete.update(tier)
for block_key in to_delete:
del blocks[block_key]
def delete_course(self, course_key, user_id):
"""
Remove the given course from the course index.
Only removes the course from the index. The data remains. You can use create_course
with a versions hash to restore the course; however, the edited_on and
edited_by won't reflect the originals, of course.
"""
# this is the only real delete in the system. should it do something else?
log.info(u"deleting course from split-mongo: %s", course_key)
self.delete_course_index(course_key)
# We do NOT call the super class here since we need to keep the assets
# in case the course is later restored.
# super(SplitMongoModuleStore, self).delete_course(course_key, user_id)
self._emit_course_deleted_signal(course_key)
@contract(block_map="dict(BlockKey: dict)", block_key=BlockKey)
def inherit_settings(
self, block_map, block_key, inherited_settings_map, inheriting_settings=None, inherited_from=None
):
"""
Updates block_data with any inheritable setting set by an ancestor and recurses to children.
"""
if block_key not in block_map:
return
block_data = block_map[block_key]
if inheriting_settings is None:
inheriting_settings = {}
if inherited_from is None:
inherited_from = []
# the currently passed down values take precedence over any previously cached ones
# NOTE: this should show the values which all fields would have if inherited: i.e.,
# not set to the locally defined value but to value set by nearest ancestor who sets it
inherited_settings_map.setdefault(block_key, {}).update(inheriting_settings)
# update the inheriting w/ what should pass to children
inheriting_settings = inherited_settings_map[block_key].copy()
block_fields = block_data.fields
for field_name in inheritance.InheritanceMixin.fields:
if field_name in block_fields:
inheriting_settings[field_name] = block_fields[field_name]
for child in block_fields.get('children', []):
try:
if child in inherited_from:
raise Exception(u'Infinite loop detected when inheriting to {}, having already inherited from {}'.format(child, inherited_from))
self.inherit_settings(
block_map,
BlockKey(*child),
inherited_settings_map,
inheriting_settings,
inherited_from + [child]
)
except KeyError:
# here's where we need logic for looking up in other structures when we allow cross pointers
# but it's also getting this during course creation if creating top down w/ children set or
# migration where the old mongo published had pointers to privates
pass
def descendants(self, block_map, block_id, depth, descendent_map):
"""
adds block and its descendants out to depth to descendent_map
Depth specifies the number of levels of descendants to return
(0 => this usage only, 1 => this usage and its children, etc...)
A depth of None returns all descendants
"""
if block_id not in block_map:
return descendent_map
if block_id not in descendent_map:
descendent_map[block_id] = block_map[block_id]
if depth is None or depth > 0:
depth = depth - 1 if depth is not None else None
for child in descendent_map[block_id].fields.get('children', []):
descendent_map = self.descendants(block_map, child, depth, descendent_map)
return descendent_map
def get_modulestore_type(self, course_key=None):
"""
Returns an enumeration-like type reflecting the type of this modulestore, per ModuleStoreEnum.Type.
Args:
course_key: just for signature compatibility
"""
return ModuleStoreEnum.Type.split
def _find_course_assets(self, course_key):
"""
Split specific lookup
"""
try:
course_assets = self._lookup_course(course_key).structure.get('assets', {})
except (InsufficientSpecificationError, VersionConflictError) as err:
log.warning(u'Error finding assets for org "%s" course "%s" on asset '
u'request. Either version of course_key is None or invalid.',
course_key.org, course_key.course)
return {}
return course_assets
def _update_course_assets(self, user_id, asset_key, update_function):
"""
A wrapper for functions wanting to manipulate assets. Gets and versions the structure,
passes the mutable array for either 'assets' or 'thumbnails' as well as the idx to the function for it to
update, then persists the changed data back into the course.
The update function can raise an exception if it doesn't want to actually do the commit. The
surrounding method probably should catch that exception.
"""
with self.bulk_operations(asset_key.course_key):
original_structure = self._lookup_course(asset_key.course_key).structure
index_entry = self._get_index_if_valid(asset_key.course_key)
new_structure = self.version_structure(asset_key.course_key, original_structure, user_id)
course_assets = new_structure.setdefault('assets', {})
asset_type = asset_key.asset_type
all_assets = SortedAssetList(iterable=[])
# Assets should be pre-sorted, so add them efficiently without sorting.
# extend() will raise a ValueError if the passed-in list is not sorted.
all_assets.extend(course_assets.setdefault(asset_type, []))
asset_idx = all_assets.find(asset_key)
all_assets_updated = update_function(all_assets, asset_idx)
new_structure['assets'][asset_type] = all_assets_updated.as_list()
# update index if appropriate and structures
self.update_structure(asset_key.course_key, new_structure)
if index_entry is not None:
# update the index entry if appropriate
self._update_head(asset_key.course_key, index_entry, asset_key.branch, new_structure['_id'])
def save_asset_metadata_list(self, asset_metadata_list, user_id, import_only=False):
"""
Saves a list of AssetMetadata to the modulestore. The list can be composed of multiple
asset types. This method is optimized for multiple inserts at once - it only re-saves the structure
at the end of all saves/updates.
"""
# Determine course key to use in bulk operation. Use the first asset assuming that
# all assets will be for the same course.
asset_key = asset_metadata_list[0].asset_id
course_key = asset_key.course_key
with self.bulk_operations(course_key):
original_structure = self._lookup_course(course_key).structure
index_entry = self._get_index_if_valid(course_key)
new_structure = self.version_structure(course_key, original_structure, user_id)
course_assets = new_structure.setdefault('assets', {})
assets_by_type = self._save_assets_by_type(
course_key, asset_metadata_list, course_assets, user_id, import_only
)
for asset_type, assets in assets_by_type.iteritems():
new_structure['assets'][asset_type] = assets.as_list()
# update index if appropriate and structures
self.update_structure(course_key, new_structure)
if index_entry is not None:
# update the index entry if appropriate
self._update_head(course_key, index_entry, asset_key.branch, new_structure['_id'])
def save_asset_metadata(self, asset_metadata, user_id, import_only=False):
"""
Saves or updates a single asset. Simply makes it a list and calls the list save above.
"""
return self.save_asset_metadata_list([asset_metadata, ], user_id, import_only)
@contract(asset_key='AssetKey', attr_dict=dict)
def set_asset_metadata_attrs(self, asset_key, attr_dict, user_id):
"""
Add/set the given dict of attrs on the asset at the given location. Value can be any type which pymongo accepts.
Arguments:
asset_key (AssetKey): asset identifier
attr_dict (dict): attribute: value pairs to set
Raises:
ItemNotFoundError if no such item exists
AttributeError is attr is one of the build in attrs.
"""
def _internal_method(all_assets, asset_idx):
"""
Update the found item
"""
if asset_idx is None:
raise ItemNotFoundError(asset_key)
# Form an AssetMetadata.
mdata = AssetMetadata(asset_key, asset_key.path)
mdata.from_storable(all_assets[asset_idx])
mdata.update(attr_dict)
# Generate a Mongo doc from the metadata and update the course asset info.
all_assets[asset_idx] = mdata.to_storable()
return all_assets
self._update_course_assets(user_id, asset_key, _internal_method)
@contract(asset_key='AssetKey')
def delete_asset_metadata(self, asset_key, user_id):
"""
Internal; deletes a single asset's metadata.
Arguments:
asset_key (AssetKey): key containing original asset filename
Returns:
Number of asset metadata entries deleted (0 or 1)
"""
def _internal_method(all_asset_info, asset_idx):
"""
Remove the item if it was found
"""
if asset_idx is None:
raise ItemNotFoundError(asset_key)
all_asset_info.pop(asset_idx)
return all_asset_info
try:
self._update_course_assets(user_id, asset_key, _internal_method)
return 1
except ItemNotFoundError:
return 0
@contract(source_course_key='CourseKey', dest_course_key='CourseKey')
def copy_all_asset_metadata(self, source_course_key, dest_course_key, user_id):
"""
Copy all the course assets from source_course_key to dest_course_key.
Arguments:
source_course_key (CourseKey): identifier of course to copy from
dest_course_key (CourseKey): identifier of course to copy to
"""
source_structure = self._lookup_course(source_course_key).structure
with self.bulk_operations(dest_course_key):
original_structure = self._lookup_course(dest_course_key).structure
index_entry = self._get_index_if_valid(dest_course_key)
new_structure = self.version_structure(dest_course_key, original_structure, user_id)
new_structure['assets'] = source_structure.get('assets', {})
new_structure['thumbnails'] = source_structure.get('thumbnails', [])
# update index if appropriate and structures
self.update_structure(dest_course_key, new_structure)
if index_entry is not None:
# update the index entry if appropriate
self._update_head(dest_course_key, index_entry, dest_course_key.branch, new_structure['_id'])
def fix_not_found(self, course_locator, user_id):
"""
Only intended for rather low level methods to use. Goes through the children attrs of
each block removing any whose block_id is not a member of the course.
:param course_locator: the course to clean
"""
original_structure = self._lookup_course(course_locator).structure
index_entry = self._get_index_if_valid(course_locator)
new_structure = self.version_structure(course_locator, original_structure, user_id)
for block in new_structure['blocks'].itervalues():
if 'children' in block.fields:
block.fields['children'] = [
block_id for block_id in block.fields['children']
if block_id in new_structure['blocks']
]
self.update_structure(course_locator, new_structure)
if index_entry is not None:
# update the index entry if appropriate
self._update_head(course_locator, index_entry, course_locator.branch, new_structure['_id'])
def convert_references_to_keys(self, course_key, xblock_class, jsonfields, blocks):
"""
Convert the given serialized fields to the deserialized values by finding all references
and converting them.
:param jsonfields: the serialized copy of the xblock's fields
"""
@contract(block_key="BlockUsageLocator | seq[2]")
def robust_usage_key(block_key):
"""
create a course_key relative usage key for the block_key. If the block_key is in blocks,
use its correct category; otherwise, use 'unknown'.
The purpose for this is that some operations add pointers as they build up the
structure without worrying about order of creation. Because the category of the
usage_key is for the most part inert, it's better to hack a value than to work
out a dependency graph algorithm for those functions which may prereference blocks.
"""
# if this was taken from cache, then its fields are already converted
if isinstance(block_key, BlockUsageLocator):
return block_key.map_into_course(course_key)
elif not isinstance(block_key, BlockKey):
block_key = BlockKey(*block_key)
try:
return course_key.make_usage_key(
block_key.type, block_key.id
)
except KeyError:
return course_key.make_usage_key('unknown', block_key.id)
xblock_class = self.mixologist.mix(xblock_class)
# Make a shallow copy, so that we aren't manipulating a cached field dictionary
output_fields = dict(jsonfields)
for field_name, value in output_fields.iteritems():
if value:
field = xblock_class.fields.get(field_name)
if field is None:
continue
elif isinstance(field, Reference):
output_fields[field_name] = robust_usage_key(value)
elif isinstance(field, ReferenceList):
output_fields[field_name] = [robust_usage_key(ele) for ele in value]
elif isinstance(field, ReferenceValueDict):
for key, subvalue in value.iteritems():
value[key] = robust_usage_key(subvalue)
return output_fields
def _get_index_if_valid(self, course_key, force=False):
"""
If the course_key identifies a course and points to its draft (or plausibly its draft),
then return the index entry.
raises VersionConflictError if not the right version
:param course_key: a CourseLocator
:param force: if false, raises VersionConflictError if the current head of the course != the one identified
by course_key
"""
if course_key.org is None or course_key.course is None or course_key.run is None or course_key.branch is None:
return None
else:
index_entry = self.get_course_index(course_key)
is_head = (
course_key.version_guid is None or
index_entry['versions'][course_key.branch] == course_key.version_guid
)
if is_head or force:
return index_entry
else:
raise VersionConflictError(
course_key,
index_entry['versions'][course_key.branch]
)
def _find_local_root(self, element_to_find, possibility, tree):
if possibility not in tree:
return False
if element_to_find in tree[possibility]:
return True
for subtree in tree[possibility]:
if self._find_local_root(element_to_find, subtree, tree):
return True
return False
def _update_search_targets(self, index_entry, fields):
"""
Update the index entry if any of the given fields are in SEARCH_TARGET_DICT. (doesn't save
the changes, just changes them in the entry dict)
:param index_entry:
:param fields: a dictionary of fields and values usually only those explicitly set and already
ready for persisting (e.g., references converted to block_ids)
"""
for field_name, field_value in fields.iteritems():
if field_name in self.SEARCH_TARGET_DICT:
index_entry.setdefault('search_targets', {})[field_name] = field_value
def _update_head(self, course_key, index_entry, branch, new_id):
"""
Update the active index for the given course's branch to point to new_id
:param index_entry:
:param course_locator:
:param new_id:
"""
if not isinstance(new_id, ObjectId):
raise TypeError('new_id must be an ObjectId, but is {!r}'.format(new_id))
index_entry['versions'][branch] = new_id
self.update_course_index(course_key, index_entry)
def partition_xblock_fields_by_scope(self, xblock):
"""
Return a dictionary of scopes mapped to this xblock's explicitly set fields w/o any conversions
"""
# explicitly_set_fields_by_scope converts to json; so, avoiding it
# the existing partition_fields_by_scope works on a dict not an xblock
result = defaultdict(dict)
for field in xblock.fields.itervalues():
if field.is_set_on(xblock):
result[field.scope][field.name] = field.read_from(xblock)
return result
def _serialize_fields(self, category, fields):
"""
Convert any references to their serialized form. Handle some references already being unicoded
because the client passed them that way and nothing above this layer did the necessary deserialization.
Remove any fields which split or its kvs computes or adds but does not want persisted.
:param fields: a dict of fields
"""
assert isinstance(fields, dict)
xblock_class = XBlock.load_class(category, self.default_class)
xblock_class = self.mixologist.mix(xblock_class)
def reference_block_id(reference):
"""
Handle client possibly setting field to strings rather than keys to get the block_id
"""
# perhaps replace by fixing the views or Field Reference*.from_json to return a Key
if isinstance(reference, basestring):
reference = BlockUsageLocator.from_string(reference)
elif isinstance(reference, BlockKey):
return reference
return BlockKey.from_usage_key(reference)
for field_name, value in fields.iteritems():
if value is not None:
if isinstance(xblock_class.fields[field_name], Reference):
fields[field_name] = reference_block_id(value)
elif isinstance(xblock_class.fields[field_name], ReferenceList):
fields[field_name] = [
reference_block_id(ele) for ele in value
]
elif isinstance(xblock_class.fields[field_name], ReferenceValueDict):
for key, subvalue in value.iteritems():
value[key] = reference_block_id(subvalue)
# should this recurse down dicts and lists just in case they contain datetime?
elif not isinstance(value, datetime.datetime): # don't convert datetimes!
fields[field_name] = xblock_class.fields[field_name].to_json(value)
return fields
def _new_structure(self, user_id, root_block_key, block_fields=None, definition_id=None):
"""
Internal function: create a structure element with no previous version. Must provide the root id
but not necessarily the info needed to create it (for the use case of publishing). If providing
root_category, must also provide block_fields and definition_id
"""
new_id = ObjectId()
if root_block_key is not None:
if block_fields is None:
block_fields = {}
blocks = {
root_block_key: self._new_block(
user_id, root_block_key.type, block_fields, definition_id, new_id
)
}
else:
blocks = {}
return {
'_id': new_id,
'root': root_block_key,
'previous_version': None,
'original_version': new_id,
'edited_by': user_id,
'edited_on': datetime.datetime.now(UTC),
'blocks': blocks,
'schema_version': self.SCHEMA_VERSION,
}
@contract(block_key=BlockKey)
def _get_parents_from_structure(self, block_key, structure):
"""
Given a structure, find block_key's parent in that structure. Note returns
the encoded format for parent
"""
return [
parent_block_key
for parent_block_key, value in structure['blocks'].iteritems()
if block_key in value.fields.get('children', [])
]
def _sync_children(self, source_parent, destination_parent, new_child):
"""
Reorder destination's children to the same as source's and remove any no longer in source.
Return the removed ones as orphans (a set).
"""
destination_reordered = []
destination_children = set(destination_parent.fields['children'])
source_children = source_parent.fields['children']
orphans = destination_children - set(source_children)
for child in source_children:
if child == new_child or child in destination_children:
destination_reordered.append(child)
destination_parent.fields['children'] = destination_reordered
return orphans
@contract(
block_key=BlockKey,
source_blocks="dict(BlockKey: *)",
destination_blocks="dict(BlockKey: *)",
blacklist="list(BlockKey) | str",
)
def _copy_subdag(self, user_id, destination_version, block_key, source_blocks, destination_blocks, blacklist):
"""
Update destination_blocks for the sub-dag rooted at block_key to be like the one in
source_blocks excluding blacklist.
Return any newly discovered orphans (as a set)
"""
orphans = set()
destination_block = destination_blocks.get(block_key)
new_block = source_blocks[block_key]
if destination_block:
# reorder children to correspond to whatever order holds for source.
# remove any which source no longer claims (put into orphans)
# add any which are being copied
source_children = new_block.fields.get('children', [])
existing_children = destination_block.fields.get('children', [])
destination_reordered = SparseList()
for child in existing_children:
try:
index = source_children.index(child)
destination_reordered[index] = child
except ValueError:
orphans.add(BlockKey(*child))
if blacklist != EXCLUDE_ALL:
for index, child in enumerate(source_children):
if child not in blacklist:
destination_reordered[index] = child
# the history of the published leaps between publications and only points to
# previously published versions.
previous_version = destination_block.edit_info.update_version
destination_block = copy.deepcopy(new_block)
destination_block.fields['children'] = destination_reordered.compact_list()
destination_block.edit_info.previous_version = previous_version
destination_block.edit_info.update_version = destination_version
destination_block.edit_info.edited_by = user_id
destination_block.edit_info.edited_on = datetime.datetime.now(UTC)
else:
destination_block = self._new_block(
user_id, new_block.block_type,
self._filter_blacklist(copy.copy(new_block.fields), blacklist),
new_block.definition,
destination_version,
raw=True,
block_defaults=new_block.defaults
)
# Extend the block's new edit_info with any extra edit_info fields from the source (e.g. original_usage):
for key, val in new_block.edit_info.to_storable().iteritems():
if getattr(destination_block.edit_info, key) is None:
setattr(destination_block.edit_info, key, val)
# If the block we are copying from was itself a copy, then just
# reference the original source, rather than the copy.
destination_block.edit_info.source_version = (
new_block.edit_info.source_version or new_block.edit_info.update_version
)
if blacklist != EXCLUDE_ALL:
for child in destination_block.fields.get('children', []):
if child not in blacklist:
orphans.update(
self._copy_subdag(
user_id, destination_version, BlockKey(*child), source_blocks, destination_blocks, blacklist
)
)
destination_blocks[block_key] = destination_block
return orphans
@contract(blacklist='list(BlockKey) | str')
def _filter_blacklist(self, fields, blacklist):
"""
Filter out blacklist from the children field in fields. Will construct a new list for children;
so, no need to worry about copying the children field, but it will modify fiels.
"""
if blacklist == EXCLUDE_ALL:
fields['children'] = []
else:
fields['children'] = [child for child in fields.get('children', []) if BlockKey(*child) not in blacklist]
return fields
@contract(orphan=BlockKey)
def _delete_if_true_orphan(self, orphan, structure):
"""
Delete the orphan and any of its descendants which no longer have parents.
"""
if len(self._get_parents_from_structure(orphan, structure)) == 0:
orphan_data = structure['blocks'].pop(orphan)
for child in orphan_data.fields.get('children', []):
self._delete_if_true_orphan(BlockKey(*child), structure)
@contract(returns=BlockData)
def _new_block(self, user_id, category, block_fields, definition_id, new_id, raw=False,
asides=None, block_defaults=None):
"""
Create the core document structure for a block.
:param block_fields: the settings and children scoped fields as a dict or son
:param definition_id: the pointer to the content scoped fields
:param new_id: the structure's version id
:param raw: true if this block already has all references serialized
"""
if not raw:
block_fields = self._serialize_fields(category, block_fields)
if not asides:
asides = []
document = {
'block_type': category,
'definition': definition_id,
'fields': block_fields,
'asides': asides,
'edit_info': {
'edited_on': datetime.datetime.now(UTC),
'edited_by': user_id,
'previous_version': None,
'update_version': new_id
}
}
if block_defaults:
document['defaults'] = block_defaults
return BlockData(**document)
@contract(block_key=BlockKey, returns='BlockData | None')
def _get_block_from_structure(self, structure, block_key):
"""
Encodes the block key before retrieving it from the structure to ensure it can
be a json dict key.
"""
return structure['blocks'].get(block_key)
@contract(block_key=BlockKey)
def _get_asides_to_update_from_structure(self, structure, block_key, asides):
"""
Get list of aside fields that should be updated/inserted
"""
block = self._get_block_from_structure(structure, block_key)
if asides:
updated = False
tmp_new_asides_data = {}
for asd in asides:
aside_type = asd['aside_type']
tmp_new_asides_data[aside_type] = asd
result_list = []
for i, aside in enumerate(block.asides):
if aside['aside_type'] in tmp_new_asides_data:
result_list.append(tmp_new_asides_data.pop(aside['aside_type']))
updated = True
else:
result_list.append(aside)
if tmp_new_asides_data:
for _, asd in tmp_new_asides_data.iteritems():
result_list.append(asd)
updated = True
return result_list, updated
else:
return block.asides, False
@contract(block_key=BlockKey, content=BlockData)
def _update_block_in_structure(self, structure, block_key, content):
"""
Encodes the block key before accessing it in the structure to ensure it can
be a json dict key.
"""
structure['blocks'][block_key] = content
@autoretry_read()
def find_courses_by_search_target(self, field_name, field_value):
"""
Find all the courses which cached that they have the given field with the given value.
Returns: list of branch-agnostic course_keys
"""
entries = self.find_matching_course_indexes(
search_targets={field_name: field_value}
)
return [
CourseLocator(entry['org'], entry['course'], entry['run']) # Branch agnostic
for entry in entries
]
def get_courses_for_wiki(self, wiki_slug, **kwargs):
"""
Return the list of courses which use this wiki_slug
:param wiki_slug: the course wiki root slug
:return: list of course keys
"""
return self.find_courses_by_search_target('wiki_slug', wiki_slug)
def heartbeat(self):
"""
Check that the db is reachable.
"""
return {ModuleStoreEnum.Type.split: self.db_connection.heartbeat()}
def create_runtime(self, course_entry, lazy):
"""
Create the proper runtime for this course
"""
return CachingDescriptorSystem(
modulestore=self,
course_entry=course_entry,
module_data={},
lazy=lazy,
default_class=self.default_class,
error_tracker=self.error_tracker,
render_template=self.render_template,
mixins=self.xblock_mixins,
select=self.xblock_select,
disabled_xblock_types=self.disabled_xblock_types,
services=self.services,
)
def ensure_indexes(self):
"""
Ensure that all appropriate indexes are created that are needed by this modulestore, or raise
an exception if unable to.
This method is intended for use by tests and administrative commands, and not
to be run during server startup.
"""
self.db_connection.ensure_indexes()
class SparseList(list):
"""
Enable inserting items into a list in arbitrary order and then retrieving them.
"""
# taken from http://stackoverflow.com/questions/1857780/sparse-assignment-list-in-python
def __setitem__(self, index, value):
"""
Add value to the list ensuring the list is long enough to accommodate it at the given index
"""
missing = index - len(self) + 1
if missing > 0:
self.extend([None] * missing)
list.__setitem__(self, index, value)
def compact_list(self):
"""
Return as a regular lists w/ all Nones removed
"""
return [ele for ele in self if ele is not None]
| agpl-3.0 |
rockyzhang/zhangyanhit-python-for-android-mips | python3-alpha/python3-src/Lib/test/test_pep3120.py | 49 | 1280 | # This file is marked as binary in the CVS, to prevent MacCVS from recoding it.
import unittest
from test import support
class PEP3120Test(unittest.TestCase):
def test_pep3120(self):
self.assertEqual(
"Питон".encode("utf-8"),
b'\xd0\x9f\xd0\xb8\xd1\x82\xd0\xbe\xd0\xbd'
)
self.assertEqual(
"\П".encode("utf-8"),
b'\\\xd0\x9f'
)
def test_badsyntax(self):
try:
import test.badsyntax_pep3120
except SyntaxError as msg:
msg = str(msg)
self.assertTrue('UTF-8' in msg or 'utf8' in msg)
else:
self.fail("expected exception didn't occur")
class BuiltinCompileTests(unittest.TestCase):
# Issue 3574.
def test_latin1(self):
# Allow compile() to read Latin-1 source.
source_code = '# coding: Latin-1\nu = "Ç"\n'.encode("Latin-1")
try:
code = compile(source_code, '<dummy>', 'exec')
except SyntaxError:
self.fail("compile() cannot handle Latin-1 source")
ns = {}
exec(code, ns)
self.assertEqual('Ç', ns['u'])
def test_main():
support.run_unittest(PEP3120Test, BuiltinCompileTests)
if __name__=="__main__":
test_main()
| apache-2.0 |
NicoVarg99/daf-recipes | ckan/ckan/ckan/ckan/tests/logic/action/test_delete.py | 1 | 20446 | # encoding: utf-8
import nose.tools
import ckan.tests.helpers as helpers
import ckan.tests.factories as factories
import ckan.logic as logic
import ckan.model as model
import ckan.plugins as p
import ckan.lib.search as search
assert_equals = nose.tools.assert_equals
assert_raises = nose.tools.assert_raises
class TestDelete:
def setup(self):
helpers.reset_db()
def test_resource_delete(self):
user = factories.User()
sysadmin = factories.Sysadmin()
resource = factories.Resource(user=user)
context = {}
params = {'id': resource['id']}
helpers.call_action('resource_delete', context, **params)
# Not even a sysadmin can see it now
assert_raises(logic.NotFound, helpers.call_action, 'resource_show',
{'user': sysadmin['name']}, **params)
# It is still there but with state=deleted
res_obj = model.Resource.get(resource['id'])
assert_equals(res_obj.state, 'deleted')
class TestDeleteResourceViews(object):
@classmethod
def setup_class(cls):
if not p.plugin_loaded('image_view'):
p.load('image_view')
helpers.reset_db()
@classmethod
def teardown_class(cls):
p.unload('image_view')
def test_resource_view_delete(self):
resource_view = factories.ResourceView()
params = {'id': resource_view['id']}
helpers.call_action('resource_view_delete', context={}, **params)
assert_raises(logic.NotFound, helpers.call_action,
'resource_view_show',
context={}, **params)
# The model object is actually deleted
resource_view_obj = model.ResourceView.get(resource_view['id'])
assert_equals(resource_view_obj, None)
def test_delete_no_id_raises_validation_error(self):
params = {}
assert_raises(logic.ValidationError, helpers.call_action,
'resource_view_delete',
context={}, **params)
def test_delete_wrong_id_raises_not_found_error(self):
params = {'id': 'does_not_exist'}
assert_raises(logic.NotFound, helpers.call_action,
'resource_view_delete',
context={}, **params)
class TestClearResourceViews(object):
@classmethod
def setup_class(cls):
if not p.plugin_loaded('image_view'):
p.load('image_view')
if not p.plugin_loaded('recline_view'):
p.load('recline_view')
helpers.reset_db()
@classmethod
def teardown_class(cls):
p.unload('image_view')
p.unload('recline_view')
def test_resource_view_clear(self):
factories.ResourceView(view_type='image_view')
factories.ResourceView(view_type='image_view')
factories.ResourceView(view_type='recline_view')
factories.ResourceView(view_type='recline_view')
count = model.Session.query(model.ResourceView).count()
assert_equals(count, 4)
helpers.call_action('resource_view_clear', context={})
count = model.Session.query(model.ResourceView).count()
assert_equals(count, 0)
def test_resource_view_clear_with_types(self):
factories.ResourceView(view_type='image_view')
factories.ResourceView(view_type='image_view')
factories.ResourceView(view_type='recline_view')
factories.ResourceView(view_type='recline_view')
count = model.Session.query(model.ResourceView).count()
assert_equals(count, 4)
helpers.call_action('resource_view_clear', context={},
view_types=['image_view'])
view_types = model.Session.query(model.ResourceView.view_type).all()
assert_equals(len(view_types), 2)
for view_type in view_types:
assert_equals(view_type[0], 'recline_view')
class TestDeleteTags(object):
def test_tag_delete_with_unicode_returns_unicode_error(self):
# There is not a lot of call for it, but in theory there could be
# unicode in the ActionError error message, so ensure that comes
# through in NotFound as unicode.
try:
helpers.call_action('tag_delete', id=u'Delta symbol: \u0394')
except logic.NotFound, e:
assert u'Delta symbol: \u0394' in unicode(e)
else:
assert 0, 'Should have raised NotFound'
class TestGroupPurge(object):
def setup(self):
helpers.reset_db()
def test_a_non_sysadmin_cant_purge_group(self):
user = factories.User()
group = factories.Group(user=user)
assert_raises(logic.NotAuthorized,
helpers.call_action,
'group_purge',
context={'user': user['name'], 'ignore_auth': False},
id=group['name'])
def test_purged_group_does_not_show(self):
group = factories.Group()
helpers.call_action('group_purge', id=group['name'])
assert_raises(logic.NotFound, helpers.call_action, 'group_show',
context={}, id=group['name'])
def test_purged_group_is_not_listed(self):
group = factories.Group()
helpers.call_action('group_purge', id=group['name'])
assert_equals(helpers.call_action('group_list', context={}), [])
def test_dataset_in_a_purged_group_no_longer_shows_that_group(self):
group = factories.Group()
dataset = factories.Dataset(groups=[{'name': group['name']}])
helpers.call_action('group_purge', id=group['name'])
dataset_shown = helpers.call_action('package_show', context={},
id=dataset['id'])
assert_equals(dataset_shown['groups'], [])
def test_purged_group_is_not_in_search_results_for_its_ex_dataset(self):
search.clear_all()
group = factories.Group()
dataset = factories.Dataset(groups=[{'name': group['name']}])
def get_search_result_groups():
results = helpers.call_action('package_search',
q=dataset['title'])['results']
return [g['name'] for g in results[0]['groups']]
assert_equals(get_search_result_groups(), [group['name']])
helpers.call_action('group_purge', id=group['name'])
assert_equals(get_search_result_groups(), [])
def test_purged_group_leaves_no_trace_in_the_model(self):
factories.Group(name='parent')
user = factories.User()
group1 = factories.Group(name='group1',
extras=[{'key': 'key1', 'value': 'val1'}],
users=[{'name': user['name']}],
groups=[{'name': 'parent'}])
factories.Dataset(name='ds', groups=[{'name': 'group1'}])
factories.Group(name='child', groups=[{'name': 'group1'}])
num_revisions_before = model.Session.query(model.Revision).count()
helpers.call_action('group_purge', id=group1['name'])
num_revisions_after = model.Session.query(model.Revision).count()
# the Group and related objects are gone
assert_equals(sorted([g.name for g in
model.Session.query(model.Group).all()]),
['child', 'parent'])
assert_equals(model.Session.query(model.GroupExtra).all(), [])
# the only members left are the users for the parent and child
assert_equals(sorted([
(m.table_name, m.group.name)
for m in model.Session.query(model.Member).join(model.Group)]),
[('user', 'child'), ('user', 'parent')])
# the dataset is still there though
assert_equals([p.name for p in model.Session.query(model.Package)],
['ds'])
# the group's object revisions were purged too
assert_equals(sorted(
[gr.name for gr in model.Session.query(model.GroupRevision)]),
['child', 'parent'])
assert_equals(model.Session.query(model.GroupExtraRevision).all(),
[])
# Member is not revisioned
# No Revision objects were purged, in fact 1 is created for the purge
assert_equals(num_revisions_after - num_revisions_before, 1)
def test_missing_id_returns_error(self):
assert_raises(logic.ValidationError,
helpers.call_action, 'group_purge')
def test_bad_id_returns_404(self):
assert_raises(logic.NotFound,
helpers.call_action, 'group_purge', id='123')
class TestOrganizationPurge(object):
def setup(self):
helpers.reset_db()
def test_a_non_sysadmin_cant_purge_org(self):
user = factories.User()
org = factories.Organization(user=user)
assert_raises(logic.NotAuthorized,
helpers.call_action,
'organization_purge',
context={'user': user['name'], 'ignore_auth': False},
id=org['name'])
def test_purged_org_does_not_show(self):
org = factories.Organization()
helpers.call_action('organization_purge', id=org['name'])
assert_raises(logic.NotFound, helpers.call_action, 'organization_show',
context={}, id=org['name'])
def test_purged_org_is_not_listed(self):
org = factories.Organization()
helpers.call_action('organization_purge', id=org['name'])
assert_equals(helpers.call_action('organization_list', context={}), [])
def test_dataset_in_a_purged_org_no_longer_shows_that_org(self):
org = factories.Organization()
dataset = factories.Dataset(owner_org=org['id'])
helpers.call_action('organization_purge', id=org['name'])
dataset_shown = helpers.call_action('package_show', context={},
id=dataset['id'])
assert_equals(dataset_shown['owner_org'], None)
def test_purged_org_is_not_in_search_results_for_its_ex_dataset(self):
search.clear_all()
org = factories.Organization()
dataset = factories.Dataset(owner_org=org['id'])
def get_search_result_owner_org():
results = helpers.call_action('package_search',
q=dataset['title'])['results']
return results[0]['owner_org']
assert_equals(get_search_result_owner_org(), org['id'])
helpers.call_action('organization_purge', id=org['name'])
assert_equals(get_search_result_owner_org(), None)
def test_purged_organization_leaves_no_trace_in_the_model(self):
factories.Organization(name='parent')
user = factories.User()
org1 = factories.Organization(
name='org1',
extras=[{'key': 'key1', 'value': 'val1'}],
users=[{'name': user['name']}],
groups=[{'name': 'parent'}])
factories.Dataset(name='ds', owner_org=org1['id'])
factories.Organization(name='child', groups=[{'name': 'org1'}])
num_revisions_before = model.Session.query(model.Revision).count()
helpers.call_action('organization_purge', id=org1['name'])
num_revisions_after = model.Session.query(model.Revision).count()
# the Organization and related objects are gone
assert_equals(sorted([o.name for o in
model.Session.query(model.Group).all()]),
['child', 'parent'])
assert_equals(model.Session.query(model.GroupExtra).all(), [])
# the only members left are the users for the parent and child
assert_equals(sorted([
(m.table_name, m.group.name)
for m in model.Session.query(model.Member).join(model.Group)]),
[('user', 'child'), ('user', 'parent')])
# the dataset is still there though
assert_equals([p.name for p in model.Session.query(model.Package)],
['ds'])
# the organization's object revisions were purged too
assert_equals(sorted(
[gr.name for gr in model.Session.query(model.GroupRevision)]),
['child', 'parent'])
assert_equals(model.Session.query(model.GroupExtraRevision).all(),
[])
# Member is not revisioned
# No Revision objects were purged, in fact 1 is created for the purge
assert_equals(num_revisions_after - num_revisions_before, 1)
def test_missing_id_returns_error(self):
assert_raises(logic.ValidationError,
helpers.call_action, 'organization_purge')
def test_bad_id_returns_404(self):
assert_raises(logic.NotFound,
helpers.call_action, 'organization_purge', id='123')
class TestDatasetPurge(object):
def setup(self):
helpers.reset_db()
def test_a_non_sysadmin_cant_purge_dataset(self):
user = factories.User()
dataset = factories.Dataset(user=user)
assert_raises(logic.NotAuthorized,
helpers.call_action,
'dataset_purge',
context={'user': user['name'], 'ignore_auth': False},
id=dataset['name'])
def test_purged_dataset_does_not_show(self):
dataset = factories.Dataset()
helpers.call_action('dataset_purge',
context={'ignore_auth': True},
id=dataset['name'])
assert_raises(logic.NotFound, helpers.call_action, 'package_show',
context={}, id=dataset['name'])
def test_purged_dataset_is_not_listed(self):
dataset = factories.Dataset()
helpers.call_action('dataset_purge', id=dataset['name'])
assert_equals(helpers.call_action('package_list', context={}), [])
def test_group_no_longer_shows_its_purged_dataset(self):
group = factories.Group()
dataset = factories.Dataset(groups=[{'name': group['name']}])
helpers.call_action('dataset_purge', id=dataset['name'])
dataset_shown = helpers.call_action('group_show', context={},
id=group['id'],
include_datasets=True)
assert_equals(dataset_shown['packages'], [])
def test_purged_dataset_is_not_in_search_results(self):
search.clear_all()
dataset = factories.Dataset()
def get_search_results():
results = helpers.call_action('package_search',
q=dataset['title'])['results']
return [d['name'] for d in results]
assert_equals(get_search_results(), [dataset['name']])
helpers.call_action('dataset_purge', id=dataset['name'])
assert_equals(get_search_results(), [])
def test_purged_dataset_leaves_no_trace_in_the_model(self):
factories.Group(name='group1')
org = factories.Organization()
dataset = factories.Dataset(
tags=[{'name': 'tag1'}],
groups=[{'name': 'group1'}],
owner_org=org['id'],
extras=[{'key': 'testkey', 'value': 'testvalue'}])
factories.Resource(package_id=dataset['id'])
num_revisions_before = model.Session.query(model.Revision).count()
helpers.call_action('dataset_purge',
context={'ignore_auth': True},
id=dataset['name'])
num_revisions_after = model.Session.query(model.Revision).count()
# the Package and related objects are gone
assert_equals(model.Session.query(model.Package).all(), [])
assert_equals(model.Session.query(model.Resource).all(), [])
assert_equals(model.Session.query(model.PackageTag).all(), [])
# there is no clean-up of the tag object itself, just the PackageTag.
assert_equals([t.name for t in model.Session.query(model.Tag).all()],
['tag1'])
assert_equals(model.Session.query(model.PackageExtra).all(), [])
# the only member left is for the user created in factories.Group() and
# factories.Organization()
assert_equals(sorted(
[(m.table_name, m.group.name)
for m in model.Session.query(model.Member).join(model.Group)]),
[('user', 'group1'), ('user', org['name'])])
# all the object revisions were purged too
assert_equals(model.Session.query(model.PackageRevision).all(), [])
assert_equals(model.Session.query(model.ResourceRevision).all(), [])
assert_equals(model.Session.query(model.PackageTagRevision).all(), [])
assert_equals(model.Session.query(model.PackageExtraRevision).all(),
[])
# Member is not revisioned
# No Revision objects were purged or created
assert_equals(num_revisions_after - num_revisions_before, 0)
def test_purged_dataset_removed_from_relationships(self):
child = factories.Dataset()
parent = factories.Dataset()
grandparent = factories.Dataset()
helpers.call_action('package_relationship_create',
subject=child['id'],
type='child_of',
object=parent['id'])
helpers.call_action('package_relationship_create',
subject=parent['id'],
type='child_of',
object=grandparent['id'])
assert_equals(len(
model.Session.query(model.PackageRelationship).all()), 2)
helpers.call_action('dataset_purge',
context={'ignore_auth': True},
id=parent['name'])
assert_equals(model.Session.query(model.PackageRelationship).all(), [])
def test_missing_id_returns_error(self):
assert_raises(logic.ValidationError,
helpers.call_action, 'dataset_purge')
def test_bad_id_returns_404(self):
assert_raises(logic.NotFound,
helpers.call_action, 'dataset_purge', id='123')
class TestUserDelete(object):
def setup(self):
helpers.reset_db()
def test_user_delete(self):
user = factories.User()
context = {}
params = {u'id': user[u'id']}
helpers.call_action(u'user_delete', context, **params)
# It is still there but with state=deleted
user_obj = model.User.get(user[u'id'])
assert_equals(user_obj.state, u'deleted')
def test_user_delete_removes_memberships(self):
user = factories.User()
factories.Organization(
users=[{u'name': user[u'id'], u'capacity': u'admin'}])
factories.Group(
users=[{u'name': user[u'id'], u'capacity': u'admin'}])
user_memberships = model.Session.query(model.Member).filter(
model.Member.table_id == user[u'id']).all()
assert_equals(len(user_memberships), 2)
assert_equals([m.state for m in user_memberships],
[u'active', u'active'])
context = {}
params = {u'id': user[u'id']}
helpers.call_action(u'user_delete', context, **params)
user_memberships = model.Session.query(model.Member).filter(
model.Member.table_id == user[u'id']).all()
# Member objects are still there, but flagged as deleted
assert_equals(len(user_memberships), 2)
assert_equals([m.state for m in user_memberships],
[u'deleted', u'deleted'])
def test_user_delete_removes_memberships_when_using_name(self):
user = factories.User()
factories.Organization(
users=[{u'name': user[u'id'], u'capacity': u'admin'}])
factories.Group(
users=[{u'name': user[u'id'], u'capacity': u'admin'}])
context = {}
params = {u'id': user[u'name']}
helpers.call_action(u'user_delete', context, **params)
user_memberships = model.Session.query(model.Member).filter(
model.Member.table_id == user[u'id']).all()
# Member objects are still there, but flagged as deleted
assert_equals(len(user_memberships), 2)
assert_equals([m.state for m in user_memberships],
[u'deleted', u'deleted'])
| gpl-3.0 |
abalckin/cwavenet | examples/WNvsPWN/show_snr.py | 2 | 2454 | #! /usr/bin/python3
import pylab as plb
import numpy as np
from matplotlib import rc
rc('text', usetex=True)
rc('text.latex', unicode=True)
rc('text.latex', preamble=r'\usepackage[russian]{babel}')
#rc('font',**{'family':'serif'})
rc('font',**{'size':'19'})
res = np.loadtxt('result.txt', delimiter=', ')[0:7]
#import pdb; pdb.set_trace()
#plb.barh(y_pos, performance, xerr=error, align='center', alpha=0.4)
#plb.yscale('linear')
plb.errorbar(res[:, 1], res[:, 5], yerr=res[:, 6], label='Традиционная вейвлет-сеть', linestyle='--', marker='*', color='black')
plb.errorbar(res[:, 1], res[:, 11], yerr=res[:, 12], label='Полиморфная вейвлет-сеть', marker='o', color='green')
plb.errorbar(res[:, 1], res[:, 1], yerr=res[:, 2], label='Отношение сигнал/шум для временного ряда $d(t), S$', color='blue')
#import pdb; pdb.set_trace()
plb.fill_between(res[:, 1], res[:, 1], res[:, 1]-np.max(res[:, 1]), res[:, 1], alpha=0.1, color='blue')
plb.xscale('log')
plb.legend(loc=0)
plb.xlim(res[-1, 1]-0.1, res[0, 1]+20)
plb.ylim(0, 670)
plb.gca().set_xticks(res[:, 1])
#plb.gca().xaxis.set_major_locator(plb.LogLocator(numticks=50))
plb.gca().xaxis.set_major_formatter(plb.ScalarFormatter())
plb.ylabel('Отношение сигнал/шум для временного ряда $\hat{y}(t), M$')
plb.xlabel('Отношение сигнал/шум для временного ряда $d(t), S$')
plb.annotate('Область применения вейвлет-сетей', [7, 310])
plb.show()
polym_higest=res[:, 11]>res[:, 1]
polym_avg=res[polym_higest, 11][1:-2]
std_higest=res[:, 5]>res[:, 1]
std_avg=res[std_higest, 5][:-2]
inp_avg=res[std_higest, 1][:-2]
polym_min=res[polym_higest, 11][1:-2]-res[polym_higest, 12][1:-2]
polym_max=res[polym_higest, 11][1:-2]+res[polym_higest, 12][1:-2]
std_min=res[std_higest, 5][:-2]-res[std_higest, 6][:-2]
std_max=res[std_higest, 5][:-2]+res[std_higest, 6][:-2]
print('Улучшение в среднем на {}%'.format(np.average((polym_avg-std_avg)/std_avg*100)))
print('Улучшение в по диапазону на {0}-{1}%'.format(np.average((polym_min-std_min)/std_min*100),
np.average((polym_max-std_max)/std_max*100)))
polym_avg_db=10*np.log10(polym_avg-inp_avg)
std_avg_db=10*np.log10(std_avg-inp_avg)
print('Улучшение в среднем на {}дб'.format(np.average(polym_avg_db-std_avg_db)))
| gpl-2.0 |
117111302/PyGithub | github/IssueComment.py | 72 | 5924 | # -*- coding: utf-8 -*-
# ########################## Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 AKFish <akfish@gmail.com> #
# Copyright 2013 Michael Stead <michael.stead@gmail.com> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# #
# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##############################################################################
import github.GithubObject
import github.NamedUser
class IssueComment(github.GithubObject.CompletableGithubObject):
"""
This class represents IssueComments as returned for example by http://developer.github.com/v3/todo
"""
@property
def body(self):
"""
:type: string
"""
self._completeIfNotSet(self._body)
return self._body.value
@property
def created_at(self):
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._created_at)
return self._created_at.value
@property
def id(self):
"""
:type: integer
"""
self._completeIfNotSet(self._id)
return self._id.value
@property
def issue_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._issue_url)
return self._issue_url.value
@property
def updated_at(self):
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._updated_at)
return self._updated_at.value
@property
def url(self):
"""
:type: string
"""
self._completeIfNotSet(self._url)
return self._url.value
@property
def html_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._html_url)
return self._html_url.value
@property
def user(self):
"""
:type: :class:`github.NamedUser.NamedUser`
"""
self._completeIfNotSet(self._user)
return self._user.value
def delete(self):
"""
:calls: `DELETE /repos/:owner/:repo/issues/comments/:id <http://developer.github.com/v3/issues/comments>`_
:rtype: None
"""
headers, data = self._requester.requestJsonAndCheck(
"DELETE",
self.url
)
def edit(self, body):
"""
:calls: `PATCH /repos/:owner/:repo/issues/comments/:id <http://developer.github.com/v3/issues/comments>`_
:param body: string
:rtype: None
"""
assert isinstance(body, (str, unicode)), body
post_parameters = {
"body": body,
}
headers, data = self._requester.requestJsonAndCheck(
"PATCH",
self.url,
input=post_parameters
)
self._useAttributes(data)
def _initAttributes(self):
self._body = github.GithubObject.NotSet
self._created_at = github.GithubObject.NotSet
self._id = github.GithubObject.NotSet
self._issue_url = github.GithubObject.NotSet
self._updated_at = github.GithubObject.NotSet
self._url = github.GithubObject.NotSet
self._html_url = github.GithubObject.NotSet
self._user = github.GithubObject.NotSet
def _useAttributes(self, attributes):
if "body" in attributes: # pragma no branch
self._body = self._makeStringAttribute(attributes["body"])
if "created_at" in attributes: # pragma no branch
self._created_at = self._makeDatetimeAttribute(attributes["created_at"])
if "id" in attributes: # pragma no branch
self._id = self._makeIntAttribute(attributes["id"])
if "issue_url" in attributes: # pragma no branch
self._issue_url = self._makeStringAttribute(attributes["issue_url"])
if "updated_at" in attributes: # pragma no branch
self._updated_at = self._makeDatetimeAttribute(attributes["updated_at"])
if "url" in attributes: # pragma no branch
self._url = self._makeStringAttribute(attributes["url"])
if "html_url" in attributes: # pragma no branch
self._html_url = self._makeStringAttribute(attributes["html_url"])
if "user" in attributes: # pragma no branch
self._user = self._makeClassAttribute(github.NamedUser.NamedUser, attributes["user"])
| gpl-3.0 |
Pakketeretet2/lammps | tools/eff/lmp2xyz.py | 52 | 3446 | Info="""
Module name: lmp2xyz.py
Author: (c) Andres Jaramillo-Botero
California Institute of Technology
ajaramil@caltech.edu
Project: pEFF
Version: August 2009
Extracts the xyz from a lammps trajectory dump of style custom:
dump 1 all custom period dump_file id type x y z spin radius ...
Usage: python lmp2xyz.py lammps_dump_filename xyz_filename
"""
import os, sys
from math import log10,floor
from numpy import zeros
masses={"1.00794":"H","4.002602":"He","6.941":"Li","9.012182":"Be","10.811":"B","12.0107":"C","1.00":"Au","0.0005486":"Au"}
mass_floor={1:"H",4:"He",6:"Li",9:"Be",10:"B",12:"C",0:"Au",28:"Si"}
def lmp2xyz(lammps,xyz,xpos):
print "\nGenerating %s file"%(xyz)
fin=open(lammps,'r')
fout=open(xyz,'w')
data=raw_input("Do you have a corresponding data file? please enter filename or 'n': ")
count=1
if data!='n':
dataf=open(data,'r')
datafile=dataf.readlines()
dataf.close()
for line in datafile:
if line.find("atom types")>=0:
numtypes=int(line.split()[0])
mass=zeros(numtypes,dtype=float)
elif line.find("Masses")>=0:
count+=1+datafile.index(line)
elif line.find("Atoms")>=0:
break
for i in range(numtypes):
mass[i]=float(datafile[count].split()[1])
count+=1
else:
print "\nWill continue without a data file specification"
header=9
lines=fin.readlines()
numatoms=lines[3].split()[0]
fsize=os.system("wc -l %s> lines"%(lammps))
tmp=open('lines','r')
tlines=tmp.readline()
tmp.close()
os.system("rm lines")
flines=int(tlines.split()[0])
snaps=flines/(int(numatoms)+header)
countsnap=1
if data!='n': coords={}
else: coords=zeros((int(numatoms),4),dtype=float)
# sys.stdout.write("Writing %d snapshots\n"%(snaps))
# sys.stdout.flush()
read_atoms=0
types={}
for line in lines:
if line.find('ITEM: TIMESTEP')==0:
read_atom_flag=False
# sys.stdout.write("%d "%(countsnap))
# sys.stdout.flush()
fout.writelines("%s\nAtoms\n"%(numatoms))
countsnap+=1
continue
if line.find('ITEM: ATOMS')==0:
read_atom_flag=True
continue
if read_atom_flag==True:
read_atoms+=1
parse=line.split()
if parse[0]!="":
if data!='n':
if parse[1] not in types.keys():
type=raw_input("Atom name for type %s: "%parse[1])
types[parse[1]]=type
coords[int(parse[0])-1]=[types[parse[1]],float(parse[xpos-1]),float(parse[xpos]),float(parse[xpos+1])]
else:
coords[int(parse[0])-1][0]=int(parse[1])
coords[int(parse[0])-1][1]=float(parse[xpos-1])
coords[int(parse[0])-1][2]=float(parse[xpos])
coords[int(parse[0])-1][3]=float(parse[xpos+1])
if read_atoms==int(numatoms):
read_atoms=0
for i in range(int(numatoms)):
if data!='n': fout.writelines("%s %2.4f %2.4f %2.4f\n"%(coords[i][0],coords[i][1],coords[i][2],coords[i][3]))
else: fout.writelines("%d %2.4f %2.4f %2.4f\n"%(coords[i][0],coords[i][1],coords[i][2],coords[i][3]))
print "\nDone converting to xyz!!\n"
fin.close()
fout.close()
return
if __name__ == '__main__':
# if no input, print help and exit
if len(sys.argv) < 2:
print Info()
sys.exit(1)
inputfile=sys.argv[1]
outfile=sys.argv[2]
if len(sys.argv)==4:
xpos=sys.arv[3]-1
else: xpos=5
lmp2xyz(inputfile,outfile.split()[0],xpos)
| gpl-2.0 |
arnaldog12/Manual-Pratico-Deep-Learning | utils/samples_generator.py | 1 | 1868 | import numpy as np
def make_cubic(n_samples, x_min, x_max, a=1, b=0, c=0, d=0, noise=0.0, random_state=None):
np.random.seed(random_state)
x = np.linspace(x_min, x_max, n_samples)
y = a*x**3 + b*x**2 + c*x + d + (2*noise*np.random.random(n_samples) - noise)
return x.reshape(-1,1), y.reshape(-1,1)
def make_exp(n_samples, x_min, x_max, noise=0.0, random_state=None):
np.random.seed(random_state)
x = np.linspace(x_min, x_max, n_samples)
y = np.exp(x) + 2*noise*np.random.random(n_samples) - noise
return x.reshape(-1,1), y.reshape(-1,1)
def make_log10(n_samples, x_min, x_max, noise=0.0, random_state=None):
np.random.seed(random_state)
x = np.logspace(np.log10(x_min), np.log10(x_max), n_samples)
y = np.log10(x) + 2*noise*np.random.random(n_samples) - noise
return x.reshape(-1,1), y.reshape(-1,1)
def make_spiral(n_samples, n_class=2, radius=1, laps=1.0, noise=0.0, random_state=None):
np.random.seed(random_state)
x = np.zeros((n_samples * n_class, 2))
y = np.zeros((n_samples * n_class))
pi_2 = 2 * np.math.pi
points = np.linspace(0, 1, n_samples)
r = points * radius
t = points * pi_2 * laps
for label, delta_t in zip(range(n_class), np.arange(0, pi_2, pi_2/n_class)):
random_noise = (2 * np.random.rand(n_samples) - 1) * noise
index = np.arange(label*n_samples, (label+1)*n_samples)
x[index] = np.c_[r * np.sin(t + delta_t) + random_noise,
r * np.cos(t + delta_t) + random_noise]
y[index] = label
return x, y.reshape(-1, 1)
def make_square(n_samples, x_min, x_max, a=1, b=0, c=0, noise=0.0, random_state=None):
np.random.seed(random_state)
x = np.linspace(x_min, x_max, n_samples)
y = a*x**2 + b*x + c + (2*noise*np.random.random(n_samples) - noise)
return x.reshape(-1,1), y.reshape(-1,1)
| mit |
kenorb-contrib/BitTorrent | twisted/tap/ftp.py | 17 | 1442 |
# Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
I am the support module for making a ftp server with mktap.
"""
from twisted.protocols import ftp
from twisted.python import usage
from twisted.application import internet
from twisted.cred import error, portal, checkers, credentials
import os.path
class Options(usage.Options):
synopsis = """Usage: mktap ftp [options].
WARNING: This FTP server is probably INSECURE do not use it.
"""
optParameters = [
["port", "p", "2121", "set the port number"],
["root", "r", "/usr/local/ftp", "define the root of the ftp-site."],
["userAnonymous", "", "anonymous", "Name of the anonymous user."],
["password-file", "", None, "username:password-style credentials database"],
]
longdesc = ''
def makeService(config):
f = ftp.FTPFactory()
r = ftp.FTPRealm(config['root'])
p = portal.Portal(r)
p.registerChecker(checkers.AllowAnonymousAccess(), credentials.IAnonymous)
if config['password-file'] is not None:
p.registerChecker(checkers.FilePasswordDB(config['password-file'], cache=True))
f.tld = config['root']
f.userAnonymous = config['userAnonymous']
f.portal = p
f.protocol = ftp.FTP
try:
portno = int(config['port'])
except KeyError:
portno = 2121
return internet.TCPServer(portno, f)
| gpl-3.0 |
MiltosD/CEFELRC | lib/python2.7/site-packages/selenium/webdriver/common/touch_actions.py | 38 | 4701 | """"
Touch Actions implementation
"""
from selenium.webdriver.remote.command import Command
class TouchActions(object):
"""
Generate touch actions. Works like ActionChains; actions are stored in the
TouchActions object and are fired with perform().
"""
def __init__(self, driver):
"""
Creates a new TouchActions object.
Args:
-driver: The WebDriver instance, which must be touchscreen enabled.
"""
self._driver = driver
self._actions = []
def perform(self):
"""
Performs all stored actions.
"""
for action in self._actions:
action()
def tap(self, on_element):
"""
Taps on a given element.
Args:
-element: The element to tap.
"""
self._actions.append(lambda:
self._driver.execute(Command.SINGLE_TAP, {'element': on_element.id}))
return self
def double_tap(self, on_element):
"""
Double taps on a given element.
Args:
-element: The element to tap.
"""
self._actions.append(lambda:
self._driver.execute(Command.DOUBLE_TAP, {'element': on_element.id}))
return self
def tap_and_hold(self, xcoord, ycoord):
"""
Tap and hold a given element.
Args:
-xcoord: X Coordinates.
-ycoord: Y Coordinates.
"""
self._actions.append(lambda:
self._driver.execute(Command.TOUCH_DOWN, {
'x': xcoord,
'y': ycoord}))
return self
def move(self, xcoord, ycoord):
"""
Move held tap to specified location.
Args:
-xcoord: X Coordinates.
-ycoord: Y Coordinates.
"""
self._actions.append(lambda:
self._driver.execute(Command.TOUCH_MOVE, {
'x': xcoord,
'y': ycoord}))
return self
def release(self, xcoord, ycoord):
"""
Release previously issued tap and hold command, at specified location.
Args:
-xcoord: X Coordinates.
-ycoord: Y Coordinates.
"""
self._actions.append(lambda:
self._driver.execute(Command.TOUCH_UP, {
'x': xcoord,
'y': ycoord}))
return self
def scroll(self, xoffset, yoffset):
"""
Touch and scroll, moving by xoffset and yoffset.
Args:
-xoffset: X offset to move to.
-yoffset: Y offset to move to.
"""
self._actions.append(lambda:
self._driver.execute(Command.TOUCH_SCROLL, {
'xoffset': xoffset,
'yoffset': yoffset}))
return self
def scroll_from_element(self, on_element, xoffset, yoffset):
"""
Touch and scroll starting at on_element, moving by xoffset and yoffset.
Args:
-on_element: Element where scroll starts.
-xoffset: X offset to move to.
-yoffset: Y offset to move to.
"""
self._actions.append(lambda:
self._driver.execute(Command.TOUCH_SCROLL, {
'element': on_element.id,
'xoffset': xoffset,
'yoffset': yoffset}))
return self
def long_press(self, on_element):
"""
Long press on an element.
Args:
-on_element: The element to long press.
"""
self._actions.append(lambda:
self._driver.execute(Command.LONG_PRESS, {'element': on_element.id}))
return self
def flick(self, xspeed, yspeed):
"""
Flicks, starting anywhere on the screen.
Args:
-xspeed: The X speed in pixels per second.
-yspeed: The Y speed in pixels per second.
"""
self._actions.append(lambda:
self._driver.execute(Command.FLICK, {
'xSpeed': xspeed,
'ySpeed': yspeed}))
return self
def flick_element(self, on_element, xoffset, yoffset, speed):
"""
Flick starting at on_element, and moving by the xoffset and yoffset.
Args:
-on_element: Flick will start at center of element.
-xoffset: X offset to flick to.
-yoffset: Y offset to flick to.
-speed: Pixels per second to flick.
"""
self._actions.append(lambda:
self._driver.execute(Command.FLICK, {
'element': on_element.id,
'xoffset': xoffset,
'yoffset': yoffset,
'speed': speed}))
return self
| bsd-3-clause |
sthyme/ZFSchizophrenia | BehaviorAnalysis/HSMovieAnalysis/setResolutionWidget.py | 1 | 5960 | #-----------------------
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'selectUI.ui'
# Created: Thu Feb 26 13:45:32 2015 by: PyQt4 UI code generator 4.11.3
#
# Created by Emily Conklin
# February 2015
# This program is connected to the main widget (NeuroGUI.py) and is a sub-user interface
# Called from imageTools.setCameraResolution
# Allows the user to specify:
# 1) default resolution
# 2) fit-to-screen resolution
# 3) fit-to-projector resolution
#-----------------------
from PyQt4 import QtCore, QtGui
from PyQt4.QtGui import *
from PyQt4.QtCore import *
import sys
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_setResolutionWidget(QtGui.QDialog):
'''
sub-window class - QDialog type
'''
def __init__(self):
'''
initializes the dialog, data member
'''
QtGui.QDialog.__init__(self)
self.setupUi(self)
self.videoType=0
def setupUi(self, setResolutionWidget):
'''
called in the initialization method
sets up each layout, labels, buttons, etc.
'''
setResolutionWidget.setObjectName(_fromUtf8("setResolutionWidget"))
setResolutionWidget.resize(404, 300)
self.verticalLayout_2 = QtGui.QVBoxLayout(setResolutionWidget)
self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2"))
#line 1: label for desired resolution
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.desiredResolutionLabel = QtGui.QLabel(setResolutionWidget)
self.desiredResolutionLabel.setObjectName(_fromUtf8("desiredResolutionLabel"))
self.horizontalLayout.addWidget(self.desiredResolutionLabel)
#lines 2,3,4: resolution options
self.verticalLayout_3 = QtGui.QVBoxLayout()
self.verticalLayout_3.setObjectName(_fromUtf8("verticalLayout_3"))
self.defaultResRB = QtGui.QRadioButton(setResolutionWidget)
self.defaultResRB.setObjectName(_fromUtf8("defaultResRB"))
self.verticalLayout_3.addWidget(self.defaultResRB)
self.fitToScreenLE = QtGui.QRadioButton(setResolutionWidget)
self.fitToScreenLE.setObjectName(_fromUtf8("fitToScreenLE"))
self.verticalLayout_3.addWidget(self.fitToScreenLE)
self.fitToProjectorLE = QtGui.QRadioButton(setResolutionWidget)
self.fitToProjectorLE.setObjectName(_fromUtf8("fitToProjectorLE"))
self.verticalLayout_3.addWidget(self.fitToProjectorLE)
self.horizontalLayout.addLayout(self.verticalLayout_3)
self.verticalLayout_2.addLayout(self.horizontalLayout)
self.defaultResRB.setChecked(True) #defaults default resolution
#sets up button group with the three options
self.buttonGroup = QtGui.QButtonGroup()
self.buttonGroup.addButton(self.defaultResRB,0)
self.buttonGroup.addButton(self.fitToScreenLE,1)
self.buttonGroup.addButton(self.fitToProjectorLE,2)
#line 5: submit button
self.horizontalLayout_4 = QtGui.QHBoxLayout()
self.horizontalLayout_4.setObjectName(_fromUtf8("horizontalLayout_4"))
spacerItem4 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_4.addItem(spacerItem4)
self.Submit = QtGui.QPushButton(setResolutionWidget)
self.Submit.setObjectName(_fromUtf8("Submit"))
self.horizontalLayout_4.addWidget(self.Submit)
spacerItem5 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_4.addItem(spacerItem5)
self.verticalLayout_2.addLayout(self.horizontalLayout_4)
self.retranslateUi(setResolutionWidget)
QtCore.QMetaObject.connectSlotsByName(setResolutionWidget)
def retranslateUi(self, setResolutionWidget):
'''
called in the setup method
sets label/button text and window titles
links buttons to other methods
'''
setResolutionWidget.setWindowTitle(_translate("setResolutionWidget", "Resolution Options", None))
self.desiredResolutionLabel.setText(_translate("setResolutionWidget", "Choose desired resolution:", None))
self.defaultResRB.setText(_translate("setResolutionWidget", "Default resolution", None))
self.fitToScreenLE.setText(_translate("setResolutionWidget", "Fit to screen (~720p)", None))
self.fitToProjectorLE.setText(_translate("setResolutionWidget", "Fit to projector (~480p)", None))
self.Submit.setText(_translate("setResolutionWidget", "Submit",None))
#finds out which radio button was pressed
self.defaultResRB.clicked.connect(self.readSignal)
self.fitToScreenLE.clicked.connect(self.readSignal)
self.fitToProjectorLE.clicked.connect(self.readSignal)
self.Submit.clicked.connect(self.submitClose) #connects submit button to submitClose
def readSignal(self):
'''
checks button group signal to determine radio button clicked
'''
self.videoType = self.buttonGroup.checkedId() #checks radio button signal
def submitClose(self):
'''
closes window when user hits submit, passes videoType
'''
self.accept()
if __name__=='__main__':
'''
main function to test widget as a standalone
'''
app=QtGui.QApplication(sys.argv)
ex=Ui_setResolutionWidget()
ex.show()
sys.exit(app.exec_())
| mit |
mzizzi/ansible | lib/ansible/modules/cloud/univention/udm_dns_zone.py | 69 | 7633 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
# Copyright (c) 2016, Adfinis SyGroup AG
# Tobias Rueetschi <tobias.ruetschi@adfinis-sygroup.ch>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: udm_dns_zone
version_added: "2.2"
author: "Tobias Rueetschi (@2-B)"
short_description: Manage dns zones on a univention corporate server
description:
- "This module allows to manage dns zones on a univention corporate server (UCS).
It uses the python API of the UCS to create a new object or edit it."
requirements:
- Python >= 2.6
options:
state:
required: false
default: "present"
choices: [ present, absent ]
description:
- Whether the dns zone is present or not.
type:
required: true
choices: [ forward_zone, reverse_zone ]
description:
- Define if the zone is a forward or reverse DNS zone.
zone:
required: true
description:
- DNS zone name, e.g. C(example.com).
nameserver:
required: false
description:
- List of appropriate name servers. Required if C(state=present).
interfaces:
required: false
description:
- List of interface IP addresses, on which the server should
response this zone. Required if C(state=present).
refresh:
required: false
default: 3600
description:
- Interval before the zone should be refreshed.
retry:
required: false
default: 1800
description:
- Interval that should elapse before a failed refresh should be retried.
expire:
required: false
default: 604800
description:
- Specifies the upper limit on the time interval that can elapse before the zone is no longer authoritative.
ttl:
required: false
default: 600
description:
- Minimum TTL field that should be exported with any RR from this zone.
contact:
required: false
default: ''
description:
- Contact person in the SOA record.
mx:
required: false
default: []
description:
- List of MX servers. (Must declared as A or AAAA records).
'''
EXAMPLES = '''
# Create a DNS zone on a UCS
- udm_dns_zone:
zone: example.com
type: forward_zone
nameserver:
- ucs.example.com
interfaces:
- 192.0.2.1
'''
RETURN = '''# '''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.univention_umc import (
umc_module_for_add,
umc_module_for_edit,
ldap_search,
base_dn,
)
def convert_time(time):
"""Convert a time in seconds into the biggest unit"""
units = [
(24 * 60 * 60 , 'days'),
(60 * 60 , 'hours'),
(60 , 'minutes'),
(1 , 'seconds'),
]
if time == 0:
return ('0', 'seconds')
for unit in units:
if time >= unit[0]:
return ('{}'.format(time // unit[0]), unit[1])
def main():
module = AnsibleModule(
argument_spec = dict(
type = dict(required=True,
type='str'),
zone = dict(required=True,
aliases=['name'],
type='str'),
nameserver = dict(default=[],
type='list'),
interfaces = dict(default=[],
type='list'),
refresh = dict(default=3600,
type='int'),
retry = dict(default=1800,
type='int'),
expire = dict(default=604800,
type='int'),
ttl = dict(default=600,
type='int'),
contact = dict(default='',
type='str'),
mx = dict(default=[],
type='list'),
state = dict(default='present',
choices=['present', 'absent'],
type='str')
),
supports_check_mode=True,
required_if = ([
('state', 'present', ['nameserver', 'interfaces'])
])
)
type = module.params['type']
zone = module.params['zone']
nameserver = module.params['nameserver']
interfaces = module.params['interfaces']
refresh = module.params['refresh']
retry = module.params['retry']
expire = module.params['expire']
ttl = module.params['ttl']
contact = module.params['contact']
mx = module.params['mx']
state = module.params['state']
changed = False
obj = list(ldap_search(
'(&(objectClass=dNSZone)(zoneName={}))'.format(zone),
attr=['dNSZone']
))
exists = bool(len(obj))
container = 'cn=dns,{}'.format(base_dn())
dn = 'zoneName={},{}'.format(zone, container)
if contact == '':
contact = 'root@{}.'.format(zone)
if state == 'present':
try:
if not exists:
obj = umc_module_for_add('dns/{}'.format(type), container)
else:
obj = umc_module_for_edit('dns/{}'.format(type), dn)
obj['zone'] = zone
obj['nameserver'] = nameserver
obj['a'] = interfaces
obj['refresh'] = convert_time(refresh)
obj['retry'] = convert_time(retry)
obj['expire'] = convert_time(expire)
obj['ttl'] = convert_time(ttl)
obj['contact'] = contact
obj['mx'] = mx
diff = obj.diff()
if exists:
for k in obj.keys():
if obj.hasChanged(k):
changed = True
else:
changed = True
if not module.check_mode:
if not exists:
obj.create()
elif changed:
obj.modify()
except Exception as e:
module.fail_json(
msg='Creating/editing dns zone {} failed: {}'.format(zone, e)
)
if state == 'absent' and exists:
try:
obj = umc_module_for_edit('dns/{}'.format(type), dn)
if not module.check_mode:
obj.remove()
changed = True
except Exception as e:
module.fail_json(
msg='Removing dns zone {} failed: {}'.format(zone, e)
)
module.exit_json(
changed=changed,
diff=diff,
zone=zone
)
if __name__ == '__main__':
main()
| gpl-3.0 |
NaturalGIS/QGIS | tests/src/python/test_qgssvgcache.py | 8 | 7946 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsSvgCache.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = '(C) 2018 by Nyall Dawson'
__date__ = '29/03/2018'
__copyright__ = 'Copyright 2018, The QGIS Project'
import qgis # NOQA
import os
import socketserver
import threading
import http.server
import time
from qgis.PyQt.QtCore import QDir, QCoreApplication
from qgis.PyQt.QtGui import QColor, QImage, QPainter
from qgis.core import (QgsSvgCache, QgsRenderChecker, QgsApplication, QgsMultiRenderChecker)
from qgis.testing import start_app, unittest
from utilities import unitTestDataPath
start_app()
TEST_DATA_DIR = unitTestDataPath()
class SlowHTTPRequestHandler(http.server.SimpleHTTPRequestHandler):
def do_GET(self):
time.sleep(1)
return http.server.SimpleHTTPRequestHandler.do_GET(self)
class TestQgsSvgCache(unittest.TestCase):
@classmethod
def setUpClass(cls):
# Bring up a simple HTTP server, for remote SVG tests
os.chdir(unitTestDataPath() + '')
handler = SlowHTTPRequestHandler
cls.httpd = socketserver.TCPServer(('localhost', 0), handler)
cls.port = cls.httpd.server_address[1]
cls.httpd_thread = threading.Thread(target=cls.httpd.serve_forever)
cls.httpd_thread.setDaemon(True)
cls.httpd_thread.start()
def setUp(self):
self.report = "<h1>Python QgsSvgCache Tests</h1>\n"
self.fetched = True
QgsApplication.svgCache().remoteSvgFetched.connect(self.svgFetched)
def tearDown(self):
report_file_path = "%s/qgistest.html" % QDir.tempPath()
with open(report_file_path, 'a') as report_file:
report_file.write(self.report)
def svgFetched(self):
self.fetched = True
def waitForFetch(self):
self.fetched = False
while not self.fetched:
QCoreApplication.processEvents()
@unittest.skipIf(os.environ.get('TRAVIS', '') == 'true', 'Failing on Travis')
def testRemoteSVG(self):
"""Test fetching remote svg."""
url = 'http://localhost:{}/qgis_local_server/sample_svg.svg'.format(str(TestQgsSvgCache.port))
image, in_cache = QgsApplication.svgCache().svgAsImage(url, 100, fill=QColor(0, 0, 0), stroke=QColor(0, 0, 0),
strokeWidth=0.1, widthScaleFactor=1)
# first should be waiting image
self.assertTrue(self.imageCheck('Remote SVG', 'waiting_svg', image))
self.waitForFetch()
# second should be correct image
image, in_cache = QgsApplication.svgCache().svgAsImage(url, 100, fill=QColor(0, 0, 0), stroke=QColor(0, 0, 0),
strokeWidth=0.1, widthScaleFactor=1)
self.assertTrue(self.imageCheck('Remote SVG', 'remote_svg', image))
@unittest.skipIf(os.environ.get('TRAVIS', '') == 'true', 'Failing on Travis')
def testRemoteSvgAsText(self):
"""Test fetching remote svg with text mime format - e.g. github raw svgs"""
url = 'http://localhost:{}/qgis_local_server/svg_as_text.txt'.format(str(TestQgsSvgCache.port))
image, in_cache = QgsApplication.svgCache().svgAsImage(url, 100, fill=QColor(0, 0, 0), stroke=QColor(0, 0, 0),
strokeWidth=0.1, widthScaleFactor=1)
# first should be waiting image
self.assertTrue(self.imageCheck('Remote SVG as Text', 'waiting_svg', image))
self.waitForFetch()
# second should be correct image
image, in_cache = QgsApplication.svgCache().svgAsImage(url, 100, fill=QColor(0, 0, 0), stroke=QColor(0, 0, 0),
strokeWidth=0.1, widthScaleFactor=1)
# first should be waiting image
self.assertTrue(self.imageCheck('Remote SVG as Text', 'remote_svg', image))
def testRemoteSvgBadMime(self):
"""Test fetching remote svg with bad mime type"""
url = 'http://localhost:{}/qgis_local_server/logo.png'.format(str(TestQgsSvgCache.port))
image, in_cache = QgsApplication.svgCache().svgAsImage(url, 100, fill=QColor(0, 0, 0), stroke=QColor(0, 0, 0),
strokeWidth=0.1, widthScaleFactor=1)
# first should be waiting image
self.assertTrue(self.imageCheck('Remote SVG bad MIME type', 'waiting_svg', image))
# second should be correct image
self.waitForFetch()
image, in_cache = QgsApplication.svgCache().svgAsImage(url, 100, fill=QColor(0, 0, 0), stroke=QColor(0, 0, 0),
strokeWidth=0.1, widthScaleFactor=1)
self.assertTrue(self.imageCheck('Remote SVG bad MIME type', 'bad_svg', image))
def testRemoteSvgMissing(self):
"""Test fetching remote svg with bad url"""
url = 'http://localhost:{}/qgis_local_server/xxx.svg'.format(str(TestQgsSvgCache.port)) # oooo naughty
image, in_cache = QgsApplication.svgCache().svgAsImage(url, 100, fill=QColor(0, 0, 0), stroke=QColor(0, 0, 0),
strokeWidth=0.1, widthScaleFactor=1)
self.assertTrue(self.imageCheck('Remote SVG missing', 'waiting_svg', image))
def testRemoteSVGBlocking(self):
"""Test fetching remote svg."""
# remote not yet requested so not in cache
url = 'http://localhost:{}/qgis_local_server/QGIS_logo_2017.svg'.format(str(TestQgsSvgCache.port))
image, in_cache = QgsApplication.svgCache().svgAsImage(url, 100, fill=QColor(0, 0, 0), stroke=QColor(0, 0, 0),
strokeWidth=0.1, widthScaleFactor=1, blocking=1)
# first should be correct image
self.assertTrue(self.imageCheck('Remote SVG sync', 'remote_svg_blocking', image))
# remote probably in cache
url = 'http://localhost:{}/qgis_local_server/sample_svg.svg'.format(str(TestQgsSvgCache.port))
image, in_cache = QgsApplication.svgCache().svgAsImage(url, 100, fill=QColor(0, 0, 0), stroke=QColor(0, 0, 0),
strokeWidth=0.1, widthScaleFactor=1, blocking=1)
self.assertTrue(self.imageCheck('Remote SVG', 'remote_svg', image))
# missing
url = 'http://localhost:{}/qgis_local_server/xxx.svg'.format(str(TestQgsSvgCache.port)) # oooo naughty
image, in_cache = QgsApplication.svgCache().svgAsImage(url, 100, fill=QColor(0, 0, 0), stroke=QColor(0, 0, 0),
strokeWidth=0.1, widthScaleFactor=1, blocking=1)
self.assertTrue(self.imageCheck('Remote SVG missing', 'waiting_svg', image))
def imageCheck(self, name, reference_image, image):
self.report += "<h2>Render {}</h2>\n".format(name)
temp_dir = QDir.tempPath() + '/'
file_name = temp_dir + 'svg_' + name + ".png"
output_image = QImage(image.size(), QImage.Format_RGB32)
QgsMultiRenderChecker.drawBackground(output_image)
painter = QPainter(output_image)
painter.drawImage(0, 0, image)
painter.end()
output_image.save(file_name, "PNG")
checker = QgsRenderChecker()
checker.setControlPathPrefix("svg_cache")
checker.setControlName("expected_" + reference_image)
checker.setRenderedImage(file_name)
checker.setColorTolerance(2)
result = checker.compareImages(name, 20)
self.report += checker.report()
print((self.report))
return result
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
zoyoe/ectool | zoyoeec/requests/structures.py | 398 | 3575 | # -*- coding: utf-8 -*-
"""
requests.structures
~~~~~~~~~~~~~~~~~~~
Data structures that power Requests.
"""
import os
import collections
from itertools import islice
class IteratorProxy(object):
"""docstring for IteratorProxy"""
def __init__(self, i):
self.i = i
# self.i = chain.from_iterable(i)
def __iter__(self):
return self.i
def __len__(self):
if hasattr(self.i, '__len__'):
return len(self.i)
if hasattr(self.i, 'len'):
return self.i.len
if hasattr(self.i, 'fileno'):
return os.fstat(self.i.fileno()).st_size
def read(self, n):
return "".join(islice(self.i, None, n))
class CaseInsensitiveDict(collections.MutableMapping):
"""
A case-insensitive ``dict``-like object.
Implements all methods and operations of
``collections.MutableMapping`` as well as dict's ``copy``. Also
provides ``lower_items``.
All keys are expected to be strings. The structure remembers the
case of the last key to be set, and ``iter(instance)``,
``keys()``, ``items()``, ``iterkeys()``, and ``iteritems()``
will contain case-sensitive keys. However, querying and contains
testing is case insensitive:
cid = CaseInsensitiveDict()
cid['Accept'] = 'application/json'
cid['aCCEPT'] == 'application/json' # True
list(cid) == ['Accept'] # True
For example, ``headers['content-encoding']`` will return the
value of a ``'Content-Encoding'`` response header, regardless
of how the header name was originally stored.
If the constructor, ``.update``, or equality comparison
operations are given keys that have equal ``.lower()``s, the
behavior is undefined.
"""
def __init__(self, data=None, **kwargs):
self._store = dict()
if data is None:
data = {}
self.update(data, **kwargs)
def __setitem__(self, key, value):
# Use the lowercased key for lookups, but store the actual
# key alongside the value.
self._store[key.lower()] = (key, value)
def __getitem__(self, key):
return self._store[key.lower()][1]
def __delitem__(self, key):
del self._store[key.lower()]
def __iter__(self):
return (casedkey for casedkey, mappedvalue in self._store.values())
def __len__(self):
return len(self._store)
def lower_items(self):
"""Like iteritems(), but with all lowercase keys."""
return (
(lowerkey, keyval[1])
for (lowerkey, keyval)
in self._store.items()
)
def __eq__(self, other):
if isinstance(other, collections.Mapping):
other = CaseInsensitiveDict(other)
else:
return NotImplemented
# Compare insensitively
return dict(self.lower_items()) == dict(other.lower_items())
# Copy is required
def copy(self):
return CaseInsensitiveDict(self._store.values())
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, dict(self.items()))
class LookupDict(dict):
"""Dictionary lookup object."""
def __init__(self, name=None):
self.name = name
super(LookupDict, self).__init__()
def __repr__(self):
return '<lookup \'%s\'>' % (self.name)
def __getitem__(self, key):
# We allow fall-through here, so values default to None
return self.__dict__.get(key, None)
def get(self, key, default=None):
return self.__dict__.get(key, default)
| gpl-2.0 |
40223137/2015cbaa | static/Brython3.1.1-20150328-091302/Lib/_sre.py | 622 | 51369 | # NOT_RPYTHON
"""
A pure Python reimplementation of the _sre module from CPython 2.4
Copyright 2005 Nik Haldimann, licensed under the MIT license
This code is based on material licensed under CNRI's Python 1.6 license and
copyrighted by: Copyright (c) 1997-2001 by Secret Labs AB
"""
MAXREPEAT = 2147483648
#import array
import operator, sys
from sre_constants import ATCODES, OPCODES, CHCODES
from sre_constants import SRE_INFO_PREFIX, SRE_INFO_LITERAL
from sre_constants import SRE_FLAG_UNICODE, SRE_FLAG_LOCALE
import sys
# Identifying as _sre from Python 2.3 or 2.4
#if sys.version_info[:2] >= (2, 4):
MAGIC = 20031017
#else:
# MAGIC = 20030419
# In _sre.c this is bytesize of the code word type of the C implementation.
# There it's 2 for normal Python builds and more for wide unicode builds (large
# enough to hold a 32-bit UCS-4 encoded character). Since here in pure Python
# we only see re bytecodes as Python longs, we shouldn't have to care about the
# codesize. But sre_compile will compile some stuff differently depending on the
# codesize (e.g., charsets).
# starting with python 3.3 CODESIZE is 4
#if sys.maxunicode == 65535:
# CODESIZE = 2
#else:
CODESIZE = 4
copyright = "_sre.py 2.4c Copyright 2005 by Nik Haldimann"
def getcodesize():
return CODESIZE
def compile(pattern, flags, code, groups=0, groupindex={}, indexgroup=[None]):
"""Compiles (or rather just converts) a pattern descriptor to a SRE_Pattern
object. Actual compilation to opcodes happens in sre_compile."""
return SRE_Pattern(pattern, flags, code, groups, groupindex, indexgroup)
def getlower(char_ord, flags):
if (char_ord < 128) or (flags & SRE_FLAG_UNICODE) \
or (flags & SRE_FLAG_LOCALE and char_ord < 256):
#return ord(unichr(char_ord).lower())
return ord(chr(char_ord).lower())
else:
return char_ord
class SRE_Pattern:
def __init__(self, pattern, flags, code, groups=0, groupindex={}, indexgroup=[None]):
self.pattern = pattern
self.flags = flags
self.groups = groups
self.groupindex = groupindex # Maps group names to group indices
self._indexgroup = indexgroup # Maps indices to group names
self._code = code
def match(self, string, pos=0, endpos=sys.maxsize):
"""If zero or more characters at the beginning of string match this
regular expression, return a corresponding MatchObject instance. Return
None if the string does not match the pattern."""
state = _State(string, pos, endpos, self.flags)
if state.match(self._code):
return SRE_Match(self, state)
return None
def search(self, string, pos=0, endpos=sys.maxsize):
"""Scan through string looking for a location where this regular
expression produces a match, and return a corresponding MatchObject
instance. Return None if no position in the string matches the
pattern."""
state = _State(string, pos, endpos, self.flags)
if state.search(self._code):
return SRE_Match(self, state)
else:
return None
def findall(self, string, pos=0, endpos=sys.maxsize):
"""Return a list of all non-overlapping matches of pattern in string."""
matchlist = []
state = _State(string, pos, endpos, self.flags)
while state.start <= state.end:
state.reset()
state.string_position = state.start
if not state.search(self._code):
break
match = SRE_Match(self, state)
if self.groups == 0 or self.groups == 1:
item = match.group(self.groups)
else:
item = match.groups("")
matchlist.append(item)
if state.string_position == state.start:
state.start += 1
else:
state.start = state.string_position
return matchlist
def _subx(self, template, string, count=0, subn=False):
filter = template
if not callable(template) and "\\" in template:
# handle non-literal strings ; hand it over to the template compiler
#import sre #sre was renamed to re
#fix me brython
#print("possible issue at _sre.py line 116")
import re as sre
filter = sre._subx(self, template)
state = _State(string, 0, sys.maxsize, self.flags)
sublist = []
n = last_pos = 0
while not count or n < count:
state.reset()
state.string_position = state.start
if not state.search(self._code):
break
if last_pos < state.start:
sublist.append(string[last_pos:state.start])
if not (last_pos == state.start and
last_pos == state.string_position and n > 0):
# the above ignores empty matches on latest position
if callable(filter):
sublist.append(filter(SRE_Match(self, state)))
else:
sublist.append(filter)
last_pos = state.string_position
n += 1
if state.string_position == state.start:
state.start += 1
else:
state.start = state.string_position
if last_pos < state.end:
sublist.append(string[last_pos:state.end])
item = "".join(sublist)
if subn:
return item, n
else:
return item
def sub(self, repl, string, count=0):
"""Return the string obtained by replacing the leftmost non-overlapping
occurrences of pattern in string by the replacement repl."""
return self._subx(repl, string, count, False)
def subn(self, repl, string, count=0):
"""Return the tuple (new_string, number_of_subs_made) found by replacing
the leftmost non-overlapping occurrences of pattern with the replacement
repl."""
return self._subx(repl, string, count, True)
def split(self, string, maxsplit=0):
"""Split string by the occurrences of pattern."""
splitlist = []
state = _State(string, 0, sys.maxsize, self.flags)
n = 0
last = state.start
while not maxsplit or n < maxsplit:
state.reset()
state.string_position = state.start
if not state.search(self._code):
break
if state.start == state.string_position: # zero-width match
if last == state.end: # or end of string
break
state.start += 1
continue
splitlist.append(string[last:state.start])
# add groups (if any)
if self.groups:
match = SRE_Match(self, state)
splitlist.extend(list(match.groups(None)))
n += 1
last = state.start = state.string_position
splitlist.append(string[last:state.end])
return splitlist
def finditer(self, string, pos=0, endpos=sys.maxsize):
"""Return a list of all non-overlapping matches of pattern in string."""
#scanner = self.scanner(string, pos, endpos)
_list=[]
_m=self.scanner(string, pos, endpos)
_re=SRE_Scanner(self, string, pos, endpos)
_m=_re.search()
while _m:
_list.append(_m)
_m=_re.search()
return _list
#return iter(scanner.search, None)
def scanner(self, string, start=0, end=sys.maxsize):
return SRE_Scanner(self, string, start, end)
def __copy__(self):
raise TypeError("cannot copy this pattern object")
def __deepcopy__(self):
raise TypeError("cannot copy this pattern object")
class SRE_Scanner:
"""Undocumented scanner interface of sre."""
def __init__(self, pattern, string, start, end):
self.pattern = pattern
self._state = _State(string, start, end, self.pattern.flags)
def _match_search(self, matcher):
state = self._state
state.reset()
state.string_position = state.start
match = None
if matcher(self.pattern._code):
match = SRE_Match(self.pattern, state)
if match is None or state.string_position == state.start:
state.start += 1
else:
state.start = state.string_position
return match
def match(self):
return self._match_search(self._state.match)
def search(self):
return self._match_search(self._state.search)
class SRE_Match:
def __init__(self, pattern, state):
self.re = pattern
self.string = state.string
self.pos = state.pos
self.endpos = state.end
self.lastindex = state.lastindex
if self.lastindex < 0:
self.lastindex = None
self.regs = self._create_regs(state)
#statement below is not valid under python3 ( 0 <= None)
#if pattern._indexgroup and 0 <= self.lastindex < len(pattern._indexgroup):
if self.lastindex is not None and pattern._indexgroup and 0 <= self.lastindex < len(pattern._indexgroup):
# The above upper-bound check should not be necessary, as the re
# compiler is supposed to always provide an _indexgroup list long
# enough. But the re.Scanner class seems to screw up something
# there, test_scanner in test_re won't work without upper-bound
# checking. XXX investigate this and report bug to CPython.
self.lastgroup = pattern._indexgroup[self.lastindex]
else:
self.lastgroup = None
def _create_regs(self, state):
"""Creates a tuple of index pairs representing matched groups."""
regs = [(state.start, state.string_position)]
for group in range(self.re.groups):
mark_index = 2 * group
if mark_index + 1 < len(state.marks) \
and state.marks[mark_index] is not None \
and state.marks[mark_index + 1] is not None:
regs.append((state.marks[mark_index], state.marks[mark_index + 1]))
else:
regs.append((-1, -1))
return tuple(regs)
def _get_index(self, group):
if isinstance(group, int):
if group >= 0 and group <= self.re.groups:
return group
else:
if group in self.re.groupindex:
return self.re.groupindex[group]
raise IndexError("no such group")
def _get_slice(self, group, default):
group_indices = self.regs[group]
if group_indices[0] >= 0:
return self.string[group_indices[0]:group_indices[1]]
else:
return default
def start(self, group=0):
"""Returns the indices of the start of the substring matched by group;
group defaults to zero (meaning the whole matched substring). Returns -1
if group exists but did not contribute to the match."""
return self.regs[self._get_index(group)][0]
def end(self, group=0):
"""Returns the indices of the end of the substring matched by group;
group defaults to zero (meaning the whole matched substring). Returns -1
if group exists but did not contribute to the match."""
return self.regs[self._get_index(group)][1]
def span(self, group=0):
"""Returns the 2-tuple (m.start(group), m.end(group))."""
return self.start(group), self.end(group)
def expand(self, template):
"""Return the string obtained by doing backslash substitution and
resolving group references on template."""
import sre
return sre._expand(self.re, self, template)
def groups(self, default=None):
"""Returns a tuple containing all the subgroups of the match. The
default argument is used for groups that did not participate in the
match (defaults to None)."""
groups = []
for indices in self.regs[1:]:
if indices[0] >= 0:
groups.append(self.string[indices[0]:indices[1]])
else:
groups.append(default)
return tuple(groups)
def groupdict(self, default=None):
"""Return a dictionary containing all the named subgroups of the match.
The default argument is used for groups that did not participate in the
match (defaults to None)."""
groupdict = {}
for key, value in self.re.groupindex.items():
groupdict[key] = self._get_slice(value, default)
return groupdict
def group(self, *args):
"""Returns one or more subgroups of the match. Each argument is either a
group index or a group name."""
if len(args) == 0:
args = (0,)
grouplist = []
for group in args:
grouplist.append(self._get_slice(self._get_index(group), None))
if len(grouplist) == 1:
return grouplist[0]
else:
return tuple(grouplist)
def __copy__():
raise TypeError("cannot copy this pattern object")
def __deepcopy__():
raise TypeError("cannot copy this pattern object")
class _State:
def __init__(self, string, start, end, flags):
self.string = string
if start < 0:
start = 0
if end > len(string):
end = len(string)
self.start = start
self.string_position = self.start
self.end = end
self.pos = start
self.flags = flags
self.reset()
def reset(self):
self.marks = []
self.lastindex = -1
self.marks_stack = []
self.context_stack = []
self.repeat = None
def match(self, pattern_codes):
# Optimization: Check string length. pattern_codes[3] contains the
# minimum length for a string to possibly match.
# brython.. the optimization doesn't work
#if pattern_codes[0] == OPCODES["info"] and pattern_codes[3]:
# if self.end - self.string_position < pattern_codes[3]:
# #_log("reject (got %d chars, need %d)"
# # % (self.end - self.string_position, pattern_codes[3]))
# return False
dispatcher = _OpcodeDispatcher()
self.context_stack.append(_MatchContext(self, pattern_codes))
has_matched = None
while len(self.context_stack) > 0:
context = self.context_stack[-1]
has_matched = dispatcher.match(context)
if has_matched is not None: # don't pop if context isn't done
self.context_stack.pop()
return has_matched
def search(self, pattern_codes):
flags = 0
if pattern_codes[0] == OPCODES["info"]:
# optimization info block
# <INFO> <1=skip> <2=flags> <3=min> <4=max> <5=prefix info>
if pattern_codes[2] & SRE_INFO_PREFIX and pattern_codes[5] > 1:
return self.fast_search(pattern_codes)
flags = pattern_codes[2]
pattern_codes = pattern_codes[pattern_codes[1] + 1:]
string_position = self.start
if pattern_codes[0] == OPCODES["literal"]:
# Special case: Pattern starts with a literal character. This is
# used for short prefixes
character = pattern_codes[1]
while True:
while string_position < self.end \
and ord(self.string[string_position]) != character:
string_position += 1
if string_position >= self.end:
return False
self.start = string_position
string_position += 1
self.string_position = string_position
if flags & SRE_INFO_LITERAL:
return True
if self.match(pattern_codes[2:]):
return True
return False
# General case
while string_position <= self.end:
self.reset()
self.start = self.string_position = string_position
if self.match(pattern_codes):
return True
string_position += 1
return False
def fast_search(self, pattern_codes):
"""Skips forward in a string as fast as possible using information from
an optimization info block."""
# pattern starts with a known prefix
# <5=length> <6=skip> <7=prefix data> <overlap data>
flags = pattern_codes[2]
prefix_len = pattern_codes[5]
prefix_skip = pattern_codes[6] # don't really know what this is good for
prefix = pattern_codes[7:7 + prefix_len]
overlap = pattern_codes[7 + prefix_len - 1:pattern_codes[1] + 1]
pattern_codes = pattern_codes[pattern_codes[1] + 1:]
i = 0
string_position = self.string_position
while string_position < self.end:
while True:
if ord(self.string[string_position]) != prefix[i]:
if i == 0:
break
else:
i = overlap[i]
else:
i += 1
if i == prefix_len:
# found a potential match
self.start = string_position + 1 - prefix_len
self.string_position = string_position + 1 \
- prefix_len + prefix_skip
if flags & SRE_INFO_LITERAL:
return True # matched all of pure literal pattern
if self.match(pattern_codes[2 * prefix_skip:]):
return True
i = overlap[i]
break
string_position += 1
return False
def set_mark(self, mark_nr, position):
if mark_nr & 1:
# This id marks the end of a group.
# fix python 3 division incompatability
#self.lastindex = mark_nr / 2 + 1
self.lastindex = mark_nr // 2 + 1
if mark_nr >= len(self.marks):
self.marks.extend([None] * (mark_nr - len(self.marks) + 1))
self.marks[mark_nr] = position
def get_marks(self, group_index):
marks_index = 2 * group_index
if len(self.marks) > marks_index + 1:
return self.marks[marks_index], self.marks[marks_index + 1]
else:
return None, None
def marks_push(self):
self.marks_stack.append((self.marks[:], self.lastindex))
def marks_pop(self):
self.marks, self.lastindex = self.marks_stack.pop()
def marks_pop_keep(self):
self.marks, self.lastindex = self.marks_stack[-1]
def marks_pop_discard(self):
self.marks_stack.pop()
def lower(self, char_ord):
return getlower(char_ord, self.flags)
class _MatchContext:
def __init__(self, state, pattern_codes):
self.state = state
self.pattern_codes = pattern_codes
self.string_position = state.string_position
self.code_position = 0
self.has_matched = None
def push_new_context(self, pattern_offset):
"""Creates a new child context of this context and pushes it on the
stack. pattern_offset is the offset off the current code position to
start interpreting from."""
child_context = _MatchContext(self.state,
self.pattern_codes[self.code_position + pattern_offset:])
#print("_sre.py:517:pushing new context") #, child_context.has_matched)
#print(self.state.string_position)
#print(self.pattern_codes[self.code_position + pattern_offset:])
#print(pattern_offset)
self.state.context_stack.append(child_context)
return child_context
def peek_char(self, peek=0):
return self.state.string[self.string_position + peek]
def skip_char(self, skip_count):
self.string_position += skip_count
def remaining_chars(self):
return self.state.end - self.string_position
def peek_code(self, peek=0):
return self.pattern_codes[self.code_position + peek]
def skip_code(self, skip_count):
self.code_position += skip_count
def remaining_codes(self):
return len(self.pattern_codes) - self.code_position
def at_beginning(self):
return self.string_position == 0
def at_end(self):
return self.string_position == self.state.end
def at_linebreak(self):
return not self.at_end() and _is_linebreak(self.peek_char())
def at_boundary(self, word_checker):
if self.at_beginning() and self.at_end():
return False
that = not self.at_beginning() and word_checker(self.peek_char(-1))
this = not self.at_end() and word_checker(self.peek_char())
return this != that
class _RepeatContext(_MatchContext):
def __init__(self, context):
_MatchContext.__init__(self, context.state,
context.pattern_codes[context.code_position:])
self.count = -1
#print('569:repeat', context.state.repeat)
self.previous = context.state.repeat
self.last_position = None
class _Dispatcher:
DISPATCH_TABLE = None
def dispatch(self, code, context):
method = self.DISPATCH_TABLE.get(code, self.__class__.unknown)
return method(self, context)
def unknown(self, code, ctx):
raise NotImplementedError()
def build_dispatch_table(cls, code_dict, method_prefix):
if cls.DISPATCH_TABLE is not None:
return
table = {}
for key, value in code_dict.items():
if hasattr(cls, "%s%s" % (method_prefix, key)):
table[value] = getattr(cls, "%s%s" % (method_prefix, key))
cls.DISPATCH_TABLE = table
build_dispatch_table = classmethod(build_dispatch_table)
class _OpcodeDispatcher(_Dispatcher):
def __init__(self):
self.executing_contexts = {}
self.at_dispatcher = _AtcodeDispatcher()
self.ch_dispatcher = _ChcodeDispatcher()
self.set_dispatcher = _CharsetDispatcher()
def match(self, context):
"""Returns True if the current context matches, False if it doesn't and
None if matching is not finished, ie must be resumed after child
contexts have been matched."""
while context.remaining_codes() > 0 and context.has_matched is None:
opcode = context.peek_code()
if not self.dispatch(opcode, context):
return None
if context.has_matched is None:
context.has_matched = False
return context.has_matched
def dispatch(self, opcode, context):
"""Dispatches a context on a given opcode. Returns True if the context
is done matching, False if it must be resumed when next encountered."""
#if self.executing_contexts.has_key(id(context)):
if id(context) in self.executing_contexts:
generator = self.executing_contexts[id(context)]
del self.executing_contexts[id(context)]
has_finished = next(generator)
else:
method = self.DISPATCH_TABLE.get(opcode, _OpcodeDispatcher.unknown)
has_finished = method(self, context)
if hasattr(has_finished, "__next__"): # avoid using the types module
generator = has_finished
has_finished = next(generator)
if not has_finished:
self.executing_contexts[id(context)] = generator
return has_finished
def op_success(self, ctx):
# end of pattern
#self._log(ctx, "SUCCESS")
ctx.state.string_position = ctx.string_position
ctx.has_matched = True
return True
def op_failure(self, ctx):
# immediate failure
#self._log(ctx, "FAILURE")
ctx.has_matched = False
return True
def general_op_literal(self, ctx, compare, decorate=lambda x: x):
#print(ctx.peek_char())
if ctx.at_end() or not compare(decorate(ord(ctx.peek_char())),
decorate(ctx.peek_code(1))):
ctx.has_matched = False
ctx.skip_code(2)
ctx.skip_char(1)
def op_literal(self, ctx):
# match literal string
# <LITERAL> <code>
#self._log(ctx, "LITERAL", ctx.peek_code(1))
self.general_op_literal(ctx, operator.eq)
return True
def op_not_literal(self, ctx):
# match anything that is not the given literal character
# <NOT_LITERAL> <code>
#self._log(ctx, "NOT_LITERAL", ctx.peek_code(1))
self.general_op_literal(ctx, operator.ne)
return True
def op_literal_ignore(self, ctx):
# match literal regardless of case
# <LITERAL_IGNORE> <code>
#self._log(ctx, "LITERAL_IGNORE", ctx.peek_code(1))
self.general_op_literal(ctx, operator.eq, ctx.state.lower)
return True
def op_not_literal_ignore(self, ctx):
# match literal regardless of case
# <LITERAL_IGNORE> <code>
#self._log(ctx, "LITERAL_IGNORE", ctx.peek_code(1))
self.general_op_literal(ctx, operator.ne, ctx.state.lower)
return True
def op_at(self, ctx):
# match at given position
# <AT> <code>
#self._log(ctx, "AT", ctx.peek_code(1))
if not self.at_dispatcher.dispatch(ctx.peek_code(1), ctx):
ctx.has_matched = False
#print('_sre.py:line693, update context.has_matched variable')
return True
ctx.skip_code(2)
return True
def op_category(self, ctx):
# match at given category
# <CATEGORY> <code>
#self._log(ctx, "CATEGORY", ctx.peek_code(1))
if ctx.at_end() or not self.ch_dispatcher.dispatch(ctx.peek_code(1), ctx):
ctx.has_matched = False
#print('_sre.py:line703, update context.has_matched variable')
return True
ctx.skip_code(2)
ctx.skip_char(1)
return True
def op_any(self, ctx):
# match anything (except a newline)
# <ANY>
#self._log(ctx, "ANY")
if ctx.at_end() or ctx.at_linebreak():
ctx.has_matched = False
#print('_sre.py:line714, update context.has_matched variable')
return True
ctx.skip_code(1)
ctx.skip_char(1)
return True
def op_any_all(self, ctx):
# match anything
# <ANY_ALL>
#self._log(ctx, "ANY_ALL")
if ctx.at_end():
ctx.has_matched = False
#print('_sre.py:line725, update context.has_matched variable')
return True
ctx.skip_code(1)
ctx.skip_char(1)
return True
def general_op_in(self, ctx, decorate=lambda x: x):
#self._log(ctx, "OP_IN")
#print('general_op_in')
if ctx.at_end():
ctx.has_matched = False
#print('_sre.py:line734, update context.has_matched variable')
return
skip = ctx.peek_code(1)
ctx.skip_code(2) # set op pointer to the set code
#print(ctx.peek_char(), ord(ctx.peek_char()),
# decorate(ord(ctx.peek_char())))
if not self.check_charset(ctx, decorate(ord(ctx.peek_char()))):
#print('_sre.py:line738, update context.has_matched variable')
ctx.has_matched = False
return
ctx.skip_code(skip - 1)
ctx.skip_char(1)
#print('end:general_op_in')
def op_in(self, ctx):
# match set member (or non_member)
# <IN> <skip> <set>
#self._log(ctx, "OP_IN")
self.general_op_in(ctx)
return True
def op_in_ignore(self, ctx):
# match set member (or non_member), disregarding case of current char
# <IN_IGNORE> <skip> <set>
#self._log(ctx, "OP_IN_IGNORE")
self.general_op_in(ctx, ctx.state.lower)
return True
def op_jump(self, ctx):
# jump forward
# <JUMP> <offset>
#self._log(ctx, "JUMP", ctx.peek_code(1))
ctx.skip_code(ctx.peek_code(1) + 1)
return True
# skip info
# <INFO> <skip>
op_info = op_jump
def op_mark(self, ctx):
# set mark
# <MARK> <gid>
#self._log(ctx, "OP_MARK", ctx.peek_code(1))
ctx.state.set_mark(ctx.peek_code(1), ctx.string_position)
ctx.skip_code(2)
return True
def op_branch(self, ctx):
# alternation
# <BRANCH> <0=skip> code <JUMP> ... <NULL>
#self._log(ctx, "BRANCH")
ctx.state.marks_push()
ctx.skip_code(1)
current_branch_length = ctx.peek_code(0)
while current_branch_length:
# The following tries to shortcut branches starting with a
# (unmatched) literal. _sre.c also shortcuts charsets here.
if not (ctx.peek_code(1) == OPCODES["literal"] and \
(ctx.at_end() or ctx.peek_code(2) != ord(ctx.peek_char()))):
ctx.state.string_position = ctx.string_position
child_context = ctx.push_new_context(1)
#print("_sre.py:803:op_branch")
yield False
if child_context.has_matched:
ctx.has_matched = True
yield True
ctx.state.marks_pop_keep()
ctx.skip_code(current_branch_length)
current_branch_length = ctx.peek_code(0)
ctx.state.marks_pop_discard()
ctx.has_matched = False
#print('_sre.py:line805, update context.has_matched variable')
yield True
def op_repeat_one(self, ctx):
# match repeated sequence (maximizing).
# this operator only works if the repeated item is exactly one character
# wide, and we're not already collecting backtracking points.
# <REPEAT_ONE> <skip> <1=min> <2=max> item <SUCCESS> tail
mincount = ctx.peek_code(2)
maxcount = ctx.peek_code(3)
#print("repeat one", mincount, maxcount)
#self._log(ctx, "REPEAT_ONE", mincount, maxcount)
if ctx.remaining_chars() < mincount:
ctx.has_matched = False
yield True
ctx.state.string_position = ctx.string_position
count = self.count_repetitions(ctx, maxcount)
ctx.skip_char(count)
if count < mincount:
ctx.has_matched = False
yield True
if ctx.peek_code(ctx.peek_code(1) + 1) == OPCODES["success"]:
# tail is empty. we're finished
ctx.state.string_position = ctx.string_position
ctx.has_matched = True
yield True
ctx.state.marks_push()
if ctx.peek_code(ctx.peek_code(1) + 1) == OPCODES["literal"]:
# Special case: Tail starts with a literal. Skip positions where
# the rest of the pattern cannot possibly match.
char = ctx.peek_code(ctx.peek_code(1) + 2)
while True:
while count >= mincount and \
(ctx.at_end() or ord(ctx.peek_char()) != char):
ctx.skip_char(-1)
count -= 1
if count < mincount:
break
ctx.state.string_position = ctx.string_position
child_context = ctx.push_new_context(ctx.peek_code(1) + 1)
#print("_sre.py:856:push_new_context")
yield False
if child_context.has_matched:
ctx.has_matched = True
yield True
ctx.skip_char(-1)
count -= 1
ctx.state.marks_pop_keep()
else:
# General case: backtracking
while count >= mincount:
ctx.state.string_position = ctx.string_position
child_context = ctx.push_new_context(ctx.peek_code(1) + 1)
yield False
if child_context.has_matched:
ctx.has_matched = True
yield True
ctx.skip_char(-1)
count -= 1
ctx.state.marks_pop_keep()
ctx.state.marks_pop_discard()
ctx.has_matched = False
#ctx.has_matched = True # <== this should be True (so match object gets returned to program)
yield True
def op_min_repeat_one(self, ctx):
# match repeated sequence (minimizing)
# <MIN_REPEAT_ONE> <skip> <1=min> <2=max> item <SUCCESS> tail
mincount = ctx.peek_code(2)
maxcount = ctx.peek_code(3)
#self._log(ctx, "MIN_REPEAT_ONE", mincount, maxcount)
if ctx.remaining_chars() < mincount:
ctx.has_matched = False
yield True
ctx.state.string_position = ctx.string_position
if mincount == 0:
count = 0
else:
count = self.count_repetitions(ctx, mincount)
if count < mincount:
ctx.has_matched = False
#print('_sre.py:line891, update context.has_matched variable')
yield True
ctx.skip_char(count)
if ctx.peek_code(ctx.peek_code(1) + 1) == OPCODES["success"]:
# tail is empty. we're finished
ctx.state.string_position = ctx.string_position
ctx.has_matched = True
yield True
ctx.state.marks_push()
while maxcount == MAXREPEAT or count <= maxcount:
ctx.state.string_position = ctx.string_position
child_context = ctx.push_new_context(ctx.peek_code(1) + 1)
#print('_sre.py:916:push new context')
yield False
if child_context.has_matched:
ctx.has_matched = True
yield True
ctx.state.string_position = ctx.string_position
if self.count_repetitions(ctx, 1) == 0:
break
ctx.skip_char(1)
count += 1
ctx.state.marks_pop_keep()
ctx.state.marks_pop_discard()
ctx.has_matched = False
yield True
def op_repeat(self, ctx):
# create repeat context. all the hard work is done by the UNTIL
# operator (MAX_UNTIL, MIN_UNTIL)
# <REPEAT> <skip> <1=min> <2=max> item <UNTIL> tail
#self._log(ctx, "REPEAT", ctx.peek_code(2), ctx.peek_code(3))
#if ctx.state.repeat is None:
# print("951:ctx.state.repeat is None")
# #ctx.state.repeat=_RepeatContext(ctx)
repeat = _RepeatContext(ctx)
ctx.state.repeat = repeat
ctx.state.string_position = ctx.string_position
child_context = ctx.push_new_context(ctx.peek_code(1) + 1)
#print("_sre.py:941:push new context", id(child_context))
#print(child_context.state.repeat)
#print(ctx.state.repeat)
# are these two yields causing the issue?
yield False
ctx.state.repeat = repeat.previous
ctx.has_matched = child_context.has_matched
yield True
def op_max_until(self, ctx):
# maximizing repeat
# <REPEAT> <skip> <1=min> <2=max> item <MAX_UNTIL> tail
repeat = ctx.state.repeat
#print("op_max_until") #, id(ctx.state.repeat))
if repeat is None:
#print(id(ctx), id(ctx.state))
raise RuntimeError("Internal re error: MAX_UNTIL without REPEAT.")
mincount = repeat.peek_code(2)
maxcount = repeat.peek_code(3)
ctx.state.string_position = ctx.string_position
count = repeat.count + 1
#self._log(ctx, "MAX_UNTIL", count)
if count < mincount:
# not enough matches
repeat.count = count
child_context = repeat.push_new_context(4)
yield False
ctx.has_matched = child_context.has_matched
if not ctx.has_matched:
repeat.count = count - 1
ctx.state.string_position = ctx.string_position
yield True
if (count < maxcount or maxcount == MAXREPEAT) \
and ctx.state.string_position != repeat.last_position:
# we may have enough matches, if we can match another item, do so
repeat.count = count
ctx.state.marks_push()
save_last_position = repeat.last_position # zero-width match protection
repeat.last_position = ctx.state.string_position
child_context = repeat.push_new_context(4)
yield False
repeat.last_position = save_last_position
if child_context.has_matched:
ctx.state.marks_pop_discard()
ctx.has_matched = True
yield True
ctx.state.marks_pop()
repeat.count = count - 1
ctx.state.string_position = ctx.string_position
# cannot match more repeated items here. make sure the tail matches
ctx.state.repeat = repeat.previous
child_context = ctx.push_new_context(1)
#print("_sre.py:987:op_max_until")
yield False
ctx.has_matched = child_context.has_matched
if not ctx.has_matched:
ctx.state.repeat = repeat
ctx.state.string_position = ctx.string_position
yield True
def op_min_until(self, ctx):
# minimizing repeat
# <REPEAT> <skip> <1=min> <2=max> item <MIN_UNTIL> tail
repeat = ctx.state.repeat
if repeat is None:
raise RuntimeError("Internal re error: MIN_UNTIL without REPEAT.")
mincount = repeat.peek_code(2)
maxcount = repeat.peek_code(3)
ctx.state.string_position = ctx.string_position
count = repeat.count + 1
#self._log(ctx, "MIN_UNTIL", count)
if count < mincount:
# not enough matches
repeat.count = count
child_context = repeat.push_new_context(4)
yield False
ctx.has_matched = child_context.has_matched
if not ctx.has_matched:
repeat.count = count - 1
ctx.state.string_position = ctx.string_position
yield True
# see if the tail matches
ctx.state.marks_push()
ctx.state.repeat = repeat.previous
child_context = ctx.push_new_context(1)
#print('_sre.py:1022:push new context')
yield False
if child_context.has_matched:
ctx.has_matched = True
yield True
ctx.state.repeat = repeat
ctx.state.string_position = ctx.string_position
ctx.state.marks_pop()
# match more until tail matches
if count >= maxcount and maxcount != MAXREPEAT:
ctx.has_matched = False
#print('_sre.py:line1022, update context.has_matched variable')
yield True
repeat.count = count
child_context = repeat.push_new_context(4)
yield False
ctx.has_matched = child_context.has_matched
if not ctx.has_matched:
repeat.count = count - 1
ctx.state.string_position = ctx.string_position
yield True
def general_op_groupref(self, ctx, decorate=lambda x: x):
group_start, group_end = ctx.state.get_marks(ctx.peek_code(1))
if group_start is None or group_end is None or group_end < group_start:
ctx.has_matched = False
return True
while group_start < group_end:
if ctx.at_end() or decorate(ord(ctx.peek_char())) \
!= decorate(ord(ctx.state.string[group_start])):
ctx.has_matched = False
#print('_sre.py:line1042, update context.has_matched variable')
return True
group_start += 1
ctx.skip_char(1)
ctx.skip_code(2)
return True
def op_groupref(self, ctx):
# match backreference
# <GROUPREF> <zero-based group index>
#self._log(ctx, "GROUPREF", ctx.peek_code(1))
return self.general_op_groupref(ctx)
def op_groupref_ignore(self, ctx):
# match backreference case-insensitive
# <GROUPREF_IGNORE> <zero-based group index>
#self._log(ctx, "GROUPREF_IGNORE", ctx.peek_code(1))
return self.general_op_groupref(ctx, ctx.state.lower)
def op_groupref_exists(self, ctx):
# <GROUPREF_EXISTS> <group> <skip> codeyes <JUMP> codeno ...
#self._log(ctx, "GROUPREF_EXISTS", ctx.peek_code(1))
group_start, group_end = ctx.state.get_marks(ctx.peek_code(1))
if group_start is None or group_end is None or group_end < group_start:
ctx.skip_code(ctx.peek_code(2) + 1)
else:
ctx.skip_code(3)
return True
def op_assert(self, ctx):
# assert subpattern
# <ASSERT> <skip> <back> <pattern>
#self._log(ctx, "ASSERT", ctx.peek_code(2))
ctx.state.string_position = ctx.string_position - ctx.peek_code(2)
if ctx.state.string_position < 0:
ctx.has_matched = False
yield True
child_context = ctx.push_new_context(3)
yield False
if child_context.has_matched:
ctx.skip_code(ctx.peek_code(1) + 1)
else:
ctx.has_matched = False
yield True
def op_assert_not(self, ctx):
# assert not subpattern
# <ASSERT_NOT> <skip> <back> <pattern>
#self._log(ctx, "ASSERT_NOT", ctx.peek_code(2))
ctx.state.string_position = ctx.string_position - ctx.peek_code(2)
if ctx.state.string_position >= 0:
child_context = ctx.push_new_context(3)
yield False
if child_context.has_matched:
ctx.has_matched = False
yield True
ctx.skip_code(ctx.peek_code(1) + 1)
yield True
def unknown(self, ctx):
#self._log(ctx, "UNKNOWN", ctx.peek_code())
raise RuntimeError("Internal re error. Unknown opcode: %s" % ctx.peek_code())
def check_charset(self, ctx, char):
"""Checks whether a character matches set of arbitrary length. Assumes
the code pointer is at the first member of the set."""
self.set_dispatcher.reset(char)
save_position = ctx.code_position
result = None
while result is None:
result = self.set_dispatcher.dispatch(ctx.peek_code(), ctx)
ctx.code_position = save_position
#print("_sre.py:1123:check_charset", result)
return result
def count_repetitions(self, ctx, maxcount):
"""Returns the number of repetitions of a single item, starting from the
current string position. The code pointer is expected to point to a
REPEAT_ONE operation (with the repeated 4 ahead)."""
count = 0
real_maxcount = ctx.state.end - ctx.string_position
if maxcount < real_maxcount and maxcount != MAXREPEAT:
real_maxcount = maxcount
# XXX could special case every single character pattern here, as in C.
# This is a general solution, a bit hackisch, but works and should be
# efficient.
code_position = ctx.code_position
string_position = ctx.string_position
ctx.skip_code(4)
reset_position = ctx.code_position
while count < real_maxcount:
# this works because the single character pattern is followed by
# a success opcode
ctx.code_position = reset_position
self.dispatch(ctx.peek_code(), ctx)
#print("count_repetitions", ctx.has_matched, count)
if ctx.has_matched is False: # could be None as well
break
count += 1
ctx.has_matched = None
ctx.code_position = code_position
ctx.string_position = string_position
return count
def _log(self, context, opname, *args):
arg_string = ("%s " * len(args)) % args
_log("|%s|%s|%s %s" % (context.pattern_codes,
context.string_position, opname, arg_string))
_OpcodeDispatcher.build_dispatch_table(OPCODES, "op_")
class _CharsetDispatcher(_Dispatcher):
def __init__(self):
self.ch_dispatcher = _ChcodeDispatcher()
def reset(self, char):
self.char = char
self.ok = True
def set_failure(self, ctx):
return not self.ok
def set_literal(self, ctx):
# <LITERAL> <code>
if ctx.peek_code(1) == self.char:
return self.ok
else:
ctx.skip_code(2)
def set_category(self, ctx):
# <CATEGORY> <code>
if self.ch_dispatcher.dispatch(ctx.peek_code(1), ctx):
return self.ok
else:
ctx.skip_code(2)
def set_charset(self, ctx):
# <CHARSET> <bitmap> (16 bits per code word)
char_code = self.char
ctx.skip_code(1) # point to beginning of bitmap
if CODESIZE == 2:
if char_code < 256 and ctx.peek_code(char_code >> 4) \
& (1 << (char_code & 15)):
return self.ok
ctx.skip_code(16) # skip bitmap
else:
if char_code < 256 and ctx.peek_code(char_code >> 5) \
& (1 << (char_code & 31)):
return self.ok
ctx.skip_code(8) # skip bitmap
def set_range(self, ctx):
# <RANGE> <lower> <upper>
if ctx.peek_code(1) <= self.char <= ctx.peek_code(2):
return self.ok
ctx.skip_code(3)
def set_negate(self, ctx):
self.ok = not self.ok
ctx.skip_code(1)
#fixme brython. array module doesn't exist
def set_bigcharset(self, ctx):
raise NotImplementationError("_sre.py: set_bigcharset, array not implemented")
# <BIGCHARSET> <blockcount> <256 blockindices> <blocks>
char_code = self.char
count = ctx.peek_code(1)
ctx.skip_code(2)
if char_code < 65536:
block_index = char_code >> 8
# NB: there are CODESIZE block indices per bytecode
a = array.array("B")
a.fromstring(array.array(CODESIZE == 2 and "H" or "I",
[ctx.peek_code(block_index // CODESIZE)]).tostring())
block = a[block_index % CODESIZE]
ctx.skip_code(256 // CODESIZE) # skip block indices
block_value = ctx.peek_code(block * (32 // CODESIZE)
+ ((char_code & 255) >> (CODESIZE == 2 and 4 or 5)))
if block_value & (1 << (char_code & ((8 * CODESIZE) - 1))):
return self.ok
else:
ctx.skip_code(256 // CODESIZE) # skip block indices
ctx.skip_code(count * (32 // CODESIZE)) # skip blocks
def unknown(self, ctx):
return False
_CharsetDispatcher.build_dispatch_table(OPCODES, "set_")
class _AtcodeDispatcher(_Dispatcher):
def at_beginning(self, ctx):
return ctx.at_beginning()
at_beginning_string = at_beginning
def at_beginning_line(self, ctx):
return ctx.at_beginning() or _is_linebreak(ctx.peek_char(-1))
def at_end(self, ctx):
return (ctx.remaining_chars() == 1 and ctx.at_linebreak()) or ctx.at_end()
def at_end_line(self, ctx):
return ctx.at_linebreak() or ctx.at_end()
def at_end_string(self, ctx):
return ctx.at_end()
def at_boundary(self, ctx):
return ctx.at_boundary(_is_word)
def at_non_boundary(self, ctx):
return not ctx.at_boundary(_is_word)
def at_loc_boundary(self, ctx):
return ctx.at_boundary(_is_loc_word)
def at_loc_non_boundary(self, ctx):
return not ctx.at_boundary(_is_loc_word)
def at_uni_boundary(self, ctx):
return ctx.at_boundary(_is_uni_word)
def at_uni_non_boundary(self, ctx):
return not ctx.at_boundary(_is_uni_word)
def unknown(self, ctx):
return False
_AtcodeDispatcher.build_dispatch_table(ATCODES, "")
class _ChcodeDispatcher(_Dispatcher):
def category_digit(self, ctx):
return _is_digit(ctx.peek_char())
def category_not_digit(self, ctx):
return not _is_digit(ctx.peek_char())
def category_space(self, ctx):
return _is_space(ctx.peek_char())
def category_not_space(self, ctx):
return not _is_space(ctx.peek_char())
def category_word(self, ctx):
return _is_word(ctx.peek_char())
def category_not_word(self, ctx):
return not _is_word(ctx.peek_char())
def category_linebreak(self, ctx):
return _is_linebreak(ctx.peek_char())
def category_not_linebreak(self, ctx):
return not _is_linebreak(ctx.peek_char())
def category_loc_word(self, ctx):
return _is_loc_word(ctx.peek_char())
def category_loc_not_word(self, ctx):
return not _is_loc_word(ctx.peek_char())
def category_uni_digit(self, ctx):
return ctx.peek_char().isdigit()
def category_uni_not_digit(self, ctx):
return not ctx.peek_char().isdigit()
def category_uni_space(self, ctx):
return ctx.peek_char().isspace()
def category_uni_not_space(self, ctx):
return not ctx.peek_char().isspace()
def category_uni_word(self, ctx):
return _is_uni_word(ctx.peek_char())
def category_uni_not_word(self, ctx):
return not _is_uni_word(ctx.peek_char())
def category_uni_linebreak(self, ctx):
return ord(ctx.peek_char()) in _uni_linebreaks
def category_uni_not_linebreak(self, ctx):
return ord(ctx.peek_char()) not in _uni_linebreaks
def unknown(self, ctx):
return False
_ChcodeDispatcher.build_dispatch_table(CHCODES, "")
_ascii_char_info = [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 6, 2,
2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 25, 25, 25, 25, 25, 25, 25, 25,
25, 25, 0, 0, 0, 0, 0, 0, 0, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 0, 0,
0, 0, 16, 0, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 0, 0, 0, 0, 0 ]
def _is_digit(char):
code = ord(char)
return code < 128 and _ascii_char_info[code] & 1
def _is_space(char):
code = ord(char)
return code < 128 and _ascii_char_info[code] & 2
def _is_word(char):
# NB: non-ASCII chars aren't words according to _sre.c
code = ord(char)
return code < 128 and _ascii_char_info[code] & 16
def _is_loc_word(char):
return (not (ord(char) & ~255) and char.isalnum()) or char == '_'
def _is_uni_word(char):
# not valid in python 3
#return unichr(ord(char)).isalnum() or char == '_'
return chr(ord(char)).isalnum() or char == '_'
def _is_linebreak(char):
return char == "\n"
# Static list of all unicode codepoints reported by Py_UNICODE_ISLINEBREAK.
_uni_linebreaks = [10, 13, 28, 29, 30, 133, 8232, 8233]
def _log(message):
if 0:
print(message)
| gpl-3.0 |
glmcdona/meddle | examples/base/Lib/distutils/command/clean.py | 251 | 2814 | """distutils.command.clean
Implements the Distutils 'clean' command."""
# contributed by Bastian Kleineidam <calvin@cs.uni-sb.de>, added 2000-03-18
__revision__ = "$Id$"
import os
from distutils.core import Command
from distutils.dir_util import remove_tree
from distutils import log
class clean(Command):
description = "clean up temporary files from 'build' command"
user_options = [
('build-base=', 'b',
"base build directory (default: 'build.build-base')"),
('build-lib=', None,
"build directory for all modules (default: 'build.build-lib')"),
('build-temp=', 't',
"temporary build directory (default: 'build.build-temp')"),
('build-scripts=', None,
"build directory for scripts (default: 'build.build-scripts')"),
('bdist-base=', None,
"temporary directory for built distributions"),
('all', 'a',
"remove all build output, not just temporary by-products")
]
boolean_options = ['all']
def initialize_options(self):
self.build_base = None
self.build_lib = None
self.build_temp = None
self.build_scripts = None
self.bdist_base = None
self.all = None
def finalize_options(self):
self.set_undefined_options('build',
('build_base', 'build_base'),
('build_lib', 'build_lib'),
('build_scripts', 'build_scripts'),
('build_temp', 'build_temp'))
self.set_undefined_options('bdist',
('bdist_base', 'bdist_base'))
def run(self):
# remove the build/temp.<plat> directory (unless it's already
# gone)
if os.path.exists(self.build_temp):
remove_tree(self.build_temp, dry_run=self.dry_run)
else:
log.debug("'%s' does not exist -- can't clean it",
self.build_temp)
if self.all:
# remove build directories
for directory in (self.build_lib,
self.bdist_base,
self.build_scripts):
if os.path.exists(directory):
remove_tree(directory, dry_run=self.dry_run)
else:
log.warn("'%s' does not exist -- can't clean it",
directory)
# just for the heck of it, try to remove the base build directory:
# we might have emptied it right now, but if not we don't care
if not self.dry_run:
try:
os.rmdir(self.build_base)
log.info("removing '%s'", self.build_base)
except OSError:
pass
# class clean
| mit |
mahak/ansible | test/units/parsing/test_ajson.py | 34 | 7169 | # Copyright 2018, Matt Martz <matt@sivel.net>
# Copyright 2019, Andrew Klychkov @Andersson007 <aaklychkov@mail.ru>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import os
import json
import pytest
from datetime import date, datetime
from pytz import timezone as tz
from ansible.module_utils.common._collections_compat import Mapping
from ansible.parsing.ajson import AnsibleJSONEncoder, AnsibleJSONDecoder
from ansible.parsing.yaml.objects import AnsibleVaultEncryptedUnicode
from ansible.utils.unsafe_proxy import AnsibleUnsafeText
def test_AnsibleJSONDecoder_vault():
with open(os.path.join(os.path.dirname(__file__), 'fixtures/ajson.json')) as f:
data = json.load(f, cls=AnsibleJSONDecoder)
assert isinstance(data['password'], AnsibleVaultEncryptedUnicode)
assert isinstance(data['bar']['baz'][0]['password'], AnsibleVaultEncryptedUnicode)
assert isinstance(data['foo']['password'], AnsibleVaultEncryptedUnicode)
def test_encode_decode_unsafe():
data = {
'key_value': AnsibleUnsafeText(u'{#NOTACOMMENT#}'),
'list': [AnsibleUnsafeText(u'{#NOTACOMMENT#}')],
'list_dict': [{'key_value': AnsibleUnsafeText(u'{#NOTACOMMENT#}')}]}
json_expected = (
'{"key_value": {"__ansible_unsafe": "{#NOTACOMMENT#}"}, '
'"list": [{"__ansible_unsafe": "{#NOTACOMMENT#}"}], '
'"list_dict": [{"key_value": {"__ansible_unsafe": "{#NOTACOMMENT#}"}}]}'
)
assert json.dumps(data, cls=AnsibleJSONEncoder, preprocess_unsafe=True, sort_keys=True) == json_expected
assert json.loads(json_expected, cls=AnsibleJSONDecoder) == data
def vault_data():
"""
Prepare AnsibleVaultEncryptedUnicode test data for AnsibleJSONEncoder.default().
Return a list of tuples (input, expected).
"""
with open(os.path.join(os.path.dirname(__file__), 'fixtures/ajson.json')) as f:
data = json.load(f, cls=AnsibleJSONDecoder)
data_0 = data['password']
data_1 = data['bar']['baz'][0]['password']
expected_0 = (u'$ANSIBLE_VAULT;1.1;AES256\n34646264306632313333393636316'
'562356435376162633631326264383934326565333633366238\n3863'
'373264326461623132613931346165636465346337310a32643431383'
'0316337393263616439\n646539373134633963666338613632666334'
'65663730303633323534363331316164623237363831\n35363335613'
'93238370a313330316263373938326162386433313336613532653538'
'376662306435\n3339\n')
expected_1 = (u'$ANSIBLE_VAULT;1.1;AES256\n34646264306632313333393636316'
'562356435376162633631326264383934326565333633366238\n3863'
'373264326461623132613931346165636465346337310a32643431383'
'0316337393263616439\n646539373134633963666338613632666334'
'65663730303633323534363331316164623237363831\n35363335613'
'93238370a313330316263373938326162386433313336613532653538'
'376662306435\n3338\n')
return [
(data_0, expected_0),
(data_1, expected_1),
]
class TestAnsibleJSONEncoder:
"""
Namespace for testing AnsibleJSONEncoder.
"""
@pytest.fixture(scope='class')
def mapping(self, request):
"""
Returns object of Mapping mock class.
The object is used for testing handling of Mapping objects
in AnsibleJSONEncoder.default().
Using a plain dictionary instead is not suitable because
it is handled by default encoder of the superclass (json.JSONEncoder).
"""
class M(Mapping):
"""Mock mapping class."""
def __init__(self, *args, **kwargs):
self.__dict__.update(*args, **kwargs)
def __getitem__(self, key):
return self.__dict__[key]
def __iter__(self):
return iter(self.__dict__)
def __len__(self):
return len(self.__dict__)
return M(request.param)
@pytest.fixture
def ansible_json_encoder(self):
"""Return AnsibleJSONEncoder object."""
return AnsibleJSONEncoder()
###############
# Test methods:
@pytest.mark.parametrize(
'test_input,expected',
[
(datetime(2019, 5, 14, 13, 39, 38, 569047), '2019-05-14T13:39:38.569047'),
(datetime(2019, 5, 14, 13, 47, 16, 923866), '2019-05-14T13:47:16.923866'),
(date(2019, 5, 14), '2019-05-14'),
(date(2020, 5, 14), '2020-05-14'),
(datetime(2019, 6, 15, 14, 45, tzinfo=tz('UTC')), '2019-06-15T14:45:00+00:00'),
(datetime(2019, 6, 15, 14, 45, tzinfo=tz('Europe/Helsinki')), '2019-06-15T14:45:00+01:40'),
]
)
def test_date_datetime(self, ansible_json_encoder, test_input, expected):
"""
Test for passing datetime.date or datetime.datetime objects to AnsibleJSONEncoder.default().
"""
assert ansible_json_encoder.default(test_input) == expected
@pytest.mark.parametrize(
'mapping,expected',
[
({1: 1}, {1: 1}),
({2: 2}, {2: 2}),
({1: 2}, {1: 2}),
({2: 1}, {2: 1}),
], indirect=['mapping'],
)
def test_mapping(self, ansible_json_encoder, mapping, expected):
"""
Test for passing Mapping object to AnsibleJSONEncoder.default().
"""
assert ansible_json_encoder.default(mapping) == expected
@pytest.mark.parametrize('test_input,expected', vault_data())
def test_ansible_json_decoder_vault(self, ansible_json_encoder, test_input, expected):
"""
Test for passing AnsibleVaultEncryptedUnicode to AnsibleJSONEncoder.default().
"""
assert ansible_json_encoder.default(test_input) == {'__ansible_vault': expected}
assert json.dumps(test_input, cls=AnsibleJSONEncoder, preprocess_unsafe=True) == '{"__ansible_vault": "%s"}' % expected.replace('\n', '\\n')
@pytest.mark.parametrize(
'test_input,expected',
[
({1: 'first'}, {1: 'first'}),
({2: 'second'}, {2: 'second'}),
]
)
def test_default_encoder(self, ansible_json_encoder, test_input, expected):
"""
Test for the default encoder of AnsibleJSONEncoder.default().
If objects of different classes that are not tested above were passed,
AnsibleJSONEncoder.default() invokes 'default()' method of json.JSONEncoder superclass.
"""
assert ansible_json_encoder.default(test_input) == expected
@pytest.mark.parametrize('test_input', [1, 1.1, 'string', [1, 2], set('set'), True, None])
def test_default_encoder_unserializable(self, ansible_json_encoder, test_input):
"""
Test for the default encoder of AnsibleJSONEncoder.default(), not serializable objects.
It must fail with TypeError 'object is not serializable'.
"""
with pytest.raises(TypeError):
ansible_json_encoder.default(test_input)
| gpl-3.0 |
xin1195/smartSearch | setting.py | 1 | 1207 | #!/usr/bin/env python3
# _*_coding:utf-8_*_
import os
import motor.motor_tornado
import redis
from pymongo import MongoClient
from common.logManageLib import get_logger
settings = dict(
template_path=os.path.join(os.path.dirname(__file__), "templates"),
static_path=os.path.join(os.path.dirname(__file__), "static"),
cookie_secret="bZJc2sWbQLKoscdGkHn/VytuyfgXwQt8S0R0kRvJ5/xJ89E=",
login_url="/login",
xsrf_cookies=True,
debug=True,
)
# 设置mongodb的连接
client = motor.motor_tornado.MotorClient('mongodb://112.74.204.250:27017')
# 获取数据库连接
g_py_client = MongoClient("mongodb://112.74.204.250:27017")
# 设置redis的连接
g_redis_db = redis.StrictRedis(host='112.74.204.250', port=6379, password=None, db=1)
g_redis_time_5m = 5 * 60
g_redis_time_10m = 10 * 60
g_redis_time_30m = 30 * 60
g_redis_time_1h = 1 * 60 * 60
g_redis_time_2h = 2 * 60 * 60
g_redis_time_5h = 5 * 60 * 60
g_redis_time_1d = 24 * 60 * 60
g_redis_time_1w = 7 * 24 * 60 * 60
# 日志配置
logger = get_logger(strFileName="smartSearch.log", debug=20, showStreamLog=True, saveLogPath=None)
# domain 域名配置
domain = "http://www.liuhub.com/"
# domain = "http://127.0.0.1:8000/"
| apache-2.0 |
Detry322/map-creator | app/random.py | 1 | 1453 | from app.models import all_models
from app.utils import mkdir_p
from app import GENERATED_TILES_FOLDER, RANDOM_FOLDER, BACKPROPS_FOLDER
from scipy import misc
import glob
import numpy as np
import os
from keras.models import load_model, Model
from keras.optimizers import Adam, SGD, Adagrad
from keras.layers import LocallyConnected1D, Input, Reshape
from app import BACKPROPS_FOLDER, FORWARDPROPS_FOLDER, RANDOM_FOLDER
from app.utils import mkdir_p
from app.forwardprop import forwardprop_single_image
NOISE_SIZE = 100
import time
def random(model_file):
model = load_model(model_file)
generator = model.layers[0]
generator.trainable = False
for layer in generator.layers:
layer.trainable = False
api_key_water = [np.loadtxt(filename) for filename in glob.glob(os.path.join(BACKPROPS_FOLDER, 'api_key', 'water', '*.txt'))]
no_api_key_water = [np.loadtxt(filename) for filename in glob.glob(os.path.join(BACKPROPS_FOLDER, 'no_api_key', 'water', '*.txt'))]
no_api_key_trees = np.loadtxt(os.path.join(BACKPROPS_FOLDER, 'no_api_key', 'trees', '3391.png.txt'))
folder = os.path.join(RANDOM_FOLDER, '{}'.format(time.time()))
mkdir_p(folder)
for a in api_key_water:
for na in no_api_key_water:
api_key_trees = a - na + no_api_key_trees
image = forwardprop_single_image(generator, api_key_trees)
misc.imsave(os.path.join(folder, 'land-{}.png'.format(time.time())), ((image + 1)*128).astype('uint8'))
| mit |
ClearCorp/odoo-clearcorp | TODO-6.1/address_name_inc/__openerp__.py | 4 | 2266 | # -*- encoding: utf-8 -*-
##############################################################################
#
# address_name_inc.py
# address_name_inc
# First author: Mag Guevara <mag.guevara@clearcorp.co.cr> (ClearCorp S.A.)
# Copyright (c) 2011-TODAY ClearCorp S.A. (http://clearcorp.co.cr). All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are
# permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of
# conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list
# of conditions and the following disclaimer in the documentation and/or other materials
# provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY <COPYRIGHT HOLDER> ``AS IS'' AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are those of the
# authors and should not be interpreted as representing official policies, either expressed
# or implied, of ClearCorp S.A..
#
##############################################################################
{
"name" : "Address name include",
"author" : "ClearCorp S.A.",
"version" : "0.1",
"depends" : ["base","base_contact"],
"init_xml" : [],
"update_xml" : ["address_name_inc_view.xml"],
"category" : "Base",
"active" : False,
"instalable" : True,
}
| agpl-3.0 |
PHSCRC/phsled | nfc/handover/server.py | 5 | 5318 | # -*- coding: latin-1 -*-
# -----------------------------------------------------------------------------
# Copyright 2009-2011 Stephen Tiedemann <stephen.tiedemann@googlemail.com>
#
# Licensed under the EUPL, Version 1.1 or - as soon they
# will be approved by the European Commission - subsequent
# versions of the EUPL (the "Licence");
# You may not use this work except in compliance with the
# Licence.
# You may obtain a copy of the Licence at:
#
# http://www.osor.eu/eupl
#
# Unless required by applicable law or agreed to in
# writing, software distributed under the Licence is
# distributed on an "AS IS" basis,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied.
# See the Licence for the specific language governing
# permissions and limitations under the Licence.
# -----------------------------------------------------------------------------
#
# Negotiated Connection Handover - Server Base Class
#
import logging
log = logging.getLogger(__name__)
from threading import Thread
import nfc.llcp
class HandoverServer(Thread):
""" NFC Forum Connection Handover server
"""
def __init__(self, llc, request_size_limit=0x10000,
recv_miu=1984, recv_buf=15):
socket = nfc.llcp.Socket(llc, nfc.llcp.DATA_LINK_CONNECTION)
recv_miu = socket.setsockopt(nfc.llcp.SO_RCVMIU, recv_miu)
recv_buf = socket.setsockopt(nfc.llcp.SO_RCVBUF, recv_buf)
socket.bind('urn:nfc:sn:handover')
log.info("handover server bound to port {0} (MIU={1}, RW={2})"
.format(socket.getsockname(), recv_miu, recv_buf))
socket.listen(backlog=2)
Thread.__init__(self, name='urn:nfc:sn:handover',
target=self.listen, args=(llc, socket))
def listen(self, llc, socket):
log.debug("handover listen thread started")
try:
while True:
client_socket = socket.accept()
client_thread = Thread(target=HandoverServer.serve,
args=(client_socket, self))
client_thread.start()
except nfc.llcp.Error as e:
(log.debug if e.errno == nfc.llcp.errno.EPIPE else log.error)(e)
finally:
socket.close()
log.debug("handover listen thread terminated")
@staticmethod
def serve(socket, handover_server):
peer_sap = socket.getpeername()
log.info("serving handover client on remote sap {0}".format(peer_sap))
send_miu = socket.getsockopt(nfc.llcp.SO_SNDMIU)
try:
while True:
request_data = ''
while socket.poll("recv"):
data = socket.recv()
if data is not None:
request_data += data
try:
request = nfc.ndef.Message(request_data)
break # message complete
except nfc.ndef.LengthError:
continue # need more data
else: return # connection closed
else: return # connection closed
log.debug("<<< {0!r}".format(request_data))
response = handover_server._process_request(request)
response_data = str(response)
log.debug(">>> {0!r}".format(response_data))
while len(response_data) > 0:
if socket.send(response_data[0:send_miu]):
response_data = response_data[send_miu:]
else:
return # connection closed
except nfc.llcp.Error as e:
(log.debug if e.errno == nfc.llcp.errno.EPIPE else log.error)(e)
finally:
socket.close()
log.debug("handover serve thread terminated")
def _process_request(self, request):
log.debug("rcvd handover request {0}\n{1}"
.format(request.type, request.pretty()))
response = nfc.ndef.Message("\xd1\x02\x01Hs\x12")
if not request.type == 'urn:nfc:wkt:Hr':
log.error("received message which is not a handover request")
else:
try:
request = nfc.ndef.HandoverRequestMessage(request)
except nfc.ndef.DecodeError as e:
log.error("error decoding 'Hr' message: {0}".format(e))
else:
response = self.process_request(request)
log.debug("send handover response {0}\n{1}"
.format(response.type, response.pretty()))
return response
def process_request(self, request):
"""Process a handover request message. The *request* argument
is a :class:`nfc.ndef.HandoverRequestMessage` object. The
return value must be a :class:`nfc.ndef.HandoverSelectMessage`
object to be sent back to the client.
This method should be overwritten by a subclass of
:class:`HandoverServer` to customize it's behavior. The
default implementation returns a version ``1.2``
:class:`nfc.ndef.HandoverSelectMessage` with no carriers.
"""
log.warning("default process_request method should be overwritten")
return nfc.ndef.HandoverSelectMessage(version="1.2")
| mit |
prefetchnta/questlab | bin/x64bin/python/36/Lib/calendar.py | 1 | 23926 | """Calendar printing functions
Note when comparing these calendars to the ones printed by cal(1): By
default, these calendars have Monday as the first day of the week, and
Sunday as the last (the European convention). Use setfirstweekday() to
set the first day of the week (0=Monday, 6=Sunday)."""
import sys
import datetime
import locale as _locale
from itertools import repeat
__all__ = ["IllegalMonthError", "IllegalWeekdayError", "setfirstweekday",
"firstweekday", "isleap", "leapdays", "weekday", "monthrange",
"monthcalendar", "prmonth", "month", "prcal", "calendar",
"timegm", "month_name", "month_abbr", "day_name", "day_abbr",
"Calendar", "TextCalendar", "HTMLCalendar", "LocaleTextCalendar",
"LocaleHTMLCalendar", "weekheader"]
# Exception raised for bad input (with string parameter for details)
error = ValueError
# Exceptions raised for bad input
class IllegalMonthError(ValueError):
def __init__(self, month):
self.month = month
def __str__(self):
return "bad month number %r; must be 1-12" % self.month
class IllegalWeekdayError(ValueError):
def __init__(self, weekday):
self.weekday = weekday
def __str__(self):
return "bad weekday number %r; must be 0 (Monday) to 6 (Sunday)" % self.weekday
# Constants for months referenced later
January = 1
February = 2
# Number of days per month (except for February in leap years)
mdays = [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
# This module used to have hard-coded lists of day and month names, as
# English strings. The classes following emulate a read-only version of
# that, but supply localized names. Note that the values are computed
# fresh on each call, in case the user changes locale between calls.
class _localized_month:
_months = [datetime.date(2001, i+1, 1).strftime for i in range(12)]
_months.insert(0, lambda x: "")
def __init__(self, format):
self.format = format
def __getitem__(self, i):
funcs = self._months[i]
if isinstance(i, slice):
return [f(self.format) for f in funcs]
else:
return funcs(self.format)
def __len__(self):
return 13
class _localized_day:
# January 1, 2001, was a Monday.
_days = [datetime.date(2001, 1, i+1).strftime for i in range(7)]
def __init__(self, format):
self.format = format
def __getitem__(self, i):
funcs = self._days[i]
if isinstance(i, slice):
return [f(self.format) for f in funcs]
else:
return funcs(self.format)
def __len__(self):
return 7
# Full and abbreviated names of weekdays
day_name = _localized_day('%A')
day_abbr = _localized_day('%a')
# Full and abbreviated names of months (1-based arrays!!!)
month_name = _localized_month('%B')
month_abbr = _localized_month('%b')
# Constants for weekdays
(MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY) = range(7)
def isleap(year):
"""Return True for leap years, False for non-leap years."""
return year % 4 == 0 and (year % 100 != 0 or year % 400 == 0)
def leapdays(y1, y2):
"""Return number of leap years in range [y1, y2).
Assume y1 <= y2."""
y1 -= 1
y2 -= 1
return (y2//4 - y1//4) - (y2//100 - y1//100) + (y2//400 - y1//400)
def weekday(year, month, day):
"""Return weekday (0-6 ~ Mon-Sun) for year (1970-...), month (1-12),
day (1-31)."""
return datetime.date(year, month, day).weekday()
def monthrange(year, month):
"""Return weekday (0-6 ~ Mon-Sun) and number of days (28-31) for
year, month."""
if not 1 <= month <= 12:
raise IllegalMonthError(month)
day1 = weekday(year, month, 1)
ndays = mdays[month] + (month == February and isleap(year))
return day1, ndays
class Calendar(object):
"""
Base calendar class. This class doesn't do any formatting. It simply
provides data to subclasses.
"""
def __init__(self, firstweekday=0):
self.firstweekday = firstweekday # 0 = Monday, 6 = Sunday
def getfirstweekday(self):
return self._firstweekday % 7
def setfirstweekday(self, firstweekday):
self._firstweekday = firstweekday
firstweekday = property(getfirstweekday, setfirstweekday)
def iterweekdays(self):
"""
Return an iterator for one week of weekday numbers starting with the
configured first one.
"""
for i in range(self.firstweekday, self.firstweekday + 7):
yield i%7
def itermonthdates(self, year, month):
"""
Return an iterator for one month. The iterator will yield datetime.date
values and will always iterate through complete weeks, so it will yield
dates outside the specified month.
"""
date = datetime.date(year, month, 1)
# Go back to the beginning of the week
days = (date.weekday() - self.firstweekday) % 7
date -= datetime.timedelta(days=days)
oneday = datetime.timedelta(days=1)
while True:
yield date
try:
date += oneday
except OverflowError:
# Adding one day could fail after datetime.MAXYEAR
break
if date.month != month and date.weekday() == self.firstweekday:
break
def itermonthdays2(self, year, month):
"""
Like itermonthdates(), but will yield (day number, weekday number)
tuples. For days outside the specified month the day number is 0.
"""
for i, d in enumerate(self.itermonthdays(year, month), self.firstweekday):
yield d, i % 7
def itermonthdays(self, year, month):
"""
Like itermonthdates(), but will yield day numbers. For days outside
the specified month the day number is 0.
"""
day1, ndays = monthrange(year, month)
days_before = (day1 - self.firstweekday) % 7
yield from repeat(0, days_before)
yield from range(1, ndays + 1)
days_after = (self.firstweekday - day1 - ndays) % 7
yield from repeat(0, days_after)
def monthdatescalendar(self, year, month):
"""
Return a matrix (list of lists) representing a month's calendar.
Each row represents a week; week entries are datetime.date values.
"""
dates = list(self.itermonthdates(year, month))
return [ dates[i:i+7] for i in range(0, len(dates), 7) ]
def monthdays2calendar(self, year, month):
"""
Return a matrix representing a month's calendar.
Each row represents a week; week entries are
(day number, weekday number) tuples. Day numbers outside this month
are zero.
"""
days = list(self.itermonthdays2(year, month))
return [ days[i:i+7] for i in range(0, len(days), 7) ]
def monthdayscalendar(self, year, month):
"""
Return a matrix representing a month's calendar.
Each row represents a week; days outside this month are zero.
"""
days = list(self.itermonthdays(year, month))
return [ days[i:i+7] for i in range(0, len(days), 7) ]
def yeardatescalendar(self, year, width=3):
"""
Return the data for the specified year ready for formatting. The return
value is a list of month rows. Each month row contains up to width months.
Each month contains between 4 and 6 weeks and each week contains 1-7
days. Days are datetime.date objects.
"""
months = [
self.monthdatescalendar(year, i)
for i in range(January, January+12)
]
return [months[i:i+width] for i in range(0, len(months), width) ]
def yeardays2calendar(self, year, width=3):
"""
Return the data for the specified year ready for formatting (similar to
yeardatescalendar()). Entries in the week lists are
(day number, weekday number) tuples. Day numbers outside this month are
zero.
"""
months = [
self.monthdays2calendar(year, i)
for i in range(January, January+12)
]
return [months[i:i+width] for i in range(0, len(months), width) ]
def yeardayscalendar(self, year, width=3):
"""
Return the data for the specified year ready for formatting (similar to
yeardatescalendar()). Entries in the week lists are day numbers.
Day numbers outside this month are zero.
"""
months = [
self.monthdayscalendar(year, i)
for i in range(January, January+12)
]
return [months[i:i+width] for i in range(0, len(months), width) ]
class TextCalendar(Calendar):
"""
Subclass of Calendar that outputs a calendar as a simple plain text
similar to the UNIX program cal.
"""
def prweek(self, theweek, width):
"""
Print a single week (no newline).
"""
print(self.formatweek(theweek, width), end=' ')
def formatday(self, day, weekday, width):
"""
Returns a formatted day.
"""
if day == 0:
s = ''
else:
s = '%2i' % day # right-align single-digit days
return s.center(width)
def formatweek(self, theweek, width):
"""
Returns a single week in a string (no newline).
"""
return ' '.join(self.formatday(d, wd, width) for (d, wd) in theweek)
def formatweekday(self, day, width):
"""
Returns a formatted week day name.
"""
if width >= 9:
names = day_name
else:
names = day_abbr
return names[day][:width].center(width)
def formatweekheader(self, width):
"""
Return a header for a week.
"""
return ' '.join(self.formatweekday(i, width) for i in self.iterweekdays())
def formatmonthname(self, theyear, themonth, width, withyear=True):
"""
Return a formatted month name.
"""
s = month_name[themonth]
if withyear:
s = "%s %r" % (s, theyear)
return s.center(width)
def prmonth(self, theyear, themonth, w=0, l=0):
"""
Print a month's calendar.
"""
print(self.formatmonth(theyear, themonth, w, l), end='')
def formatmonth(self, theyear, themonth, w=0, l=0):
"""
Return a month's calendar string (multi-line).
"""
w = max(2, w)
l = max(1, l)
s = self.formatmonthname(theyear, themonth, 7 * (w + 1) - 1)
s = s.rstrip()
s += '\n' * l
s += self.formatweekheader(w).rstrip()
s += '\n' * l
for week in self.monthdays2calendar(theyear, themonth):
s += self.formatweek(week, w).rstrip()
s += '\n' * l
return s
def formatyear(self, theyear, w=2, l=1, c=6, m=3):
"""
Returns a year's calendar as a multi-line string.
"""
w = max(2, w)
l = max(1, l)
c = max(2, c)
colwidth = (w + 1) * 7 - 1
v = []
a = v.append
a(repr(theyear).center(colwidth*m+c*(m-1)).rstrip())
a('\n'*l)
header = self.formatweekheader(w)
for (i, row) in enumerate(self.yeardays2calendar(theyear, m)):
# months in this row
months = range(m*i+1, min(m*(i+1)+1, 13))
a('\n'*l)
names = (self.formatmonthname(theyear, k, colwidth, False)
for k in months)
a(formatstring(names, colwidth, c).rstrip())
a('\n'*l)
headers = (header for k in months)
a(formatstring(headers, colwidth, c).rstrip())
a('\n'*l)
# max number of weeks for this row
height = max(len(cal) for cal in row)
for j in range(height):
weeks = []
for cal in row:
if j >= len(cal):
weeks.append('')
else:
weeks.append(self.formatweek(cal[j], w))
a(formatstring(weeks, colwidth, c).rstrip())
a('\n' * l)
return ''.join(v)
def pryear(self, theyear, w=0, l=0, c=6, m=3):
"""Print a year's calendar."""
print(self.formatyear(theyear, w, l, c, m))
class HTMLCalendar(Calendar):
"""
This calendar returns complete HTML pages.
"""
# CSS classes for the day <td>s
cssclasses = ["mon", "tue", "wed", "thu", "fri", "sat", "sun"]
def formatday(self, day, weekday):
"""
Return a day as a table cell.
"""
if day == 0:
return '<td class="noday"> </td>' # day outside month
else:
return '<td class="%s">%d</td>' % (self.cssclasses[weekday], day)
def formatweek(self, theweek):
"""
Return a complete week as a table row.
"""
s = ''.join(self.formatday(d, wd) for (d, wd) in theweek)
return '<tr>%s</tr>' % s
def formatweekday(self, day):
"""
Return a weekday name as a table header.
"""
return '<th class="%s">%s</th>' % (self.cssclasses[day], day_abbr[day])
def formatweekheader(self):
"""
Return a header for a week as a table row.
"""
s = ''.join(self.formatweekday(i) for i in self.iterweekdays())
return '<tr>%s</tr>' % s
def formatmonthname(self, theyear, themonth, withyear=True):
"""
Return a month name as a table row.
"""
if withyear:
s = '%s %s' % (month_name[themonth], theyear)
else:
s = '%s' % month_name[themonth]
return '<tr><th colspan="7" class="month">%s</th></tr>' % s
def formatmonth(self, theyear, themonth, withyear=True):
"""
Return a formatted month as a table.
"""
v = []
a = v.append
a('<table border="0" cellpadding="0" cellspacing="0" class="month">')
a('\n')
a(self.formatmonthname(theyear, themonth, withyear=withyear))
a('\n')
a(self.formatweekheader())
a('\n')
for week in self.monthdays2calendar(theyear, themonth):
a(self.formatweek(week))
a('\n')
a('</table>')
a('\n')
return ''.join(v)
def formatyear(self, theyear, width=3):
"""
Return a formatted year as a table of tables.
"""
v = []
a = v.append
width = max(width, 1)
a('<table border="0" cellpadding="0" cellspacing="0" class="year">')
a('\n')
a('<tr><th colspan="%d" class="year">%s</th></tr>' % (width, theyear))
for i in range(January, January+12, width):
# months in this row
months = range(i, min(i+width, 13))
a('<tr>')
for m in months:
a('<td>')
a(self.formatmonth(theyear, m, withyear=False))
a('</td>')
a('</tr>')
a('</table>')
return ''.join(v)
def formatyearpage(self, theyear, width=3, css='calendar.css', encoding=None):
"""
Return a formatted year as a complete HTML page.
"""
if encoding is None:
encoding = sys.getdefaultencoding()
v = []
a = v.append
a('<?xml version="1.0" encoding="%s"?>\n' % encoding)
a('<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">\n')
a('<html>\n')
a('<head>\n')
a('<meta http-equiv="Content-Type" content="text/html; charset=%s" />\n' % encoding)
if css is not None:
a('<link rel="stylesheet" type="text/css" href="%s" />\n' % css)
a('<title>Calendar for %d</title>\n' % theyear)
a('</head>\n')
a('<body>\n')
a(self.formatyear(theyear, width))
a('</body>\n')
a('</html>\n')
return ''.join(v).encode(encoding, "xmlcharrefreplace")
class different_locale:
def __init__(self, locale):
self.locale = locale
def __enter__(self):
self.oldlocale = _locale.getlocale(_locale.LC_TIME)
_locale.setlocale(_locale.LC_TIME, self.locale)
def __exit__(self, *args):
_locale.setlocale(_locale.LC_TIME, self.oldlocale)
class LocaleTextCalendar(TextCalendar):
"""
This class can be passed a locale name in the constructor and will return
month and weekday names in the specified locale. If this locale includes
an encoding all strings containing month and weekday names will be returned
as unicode.
"""
def __init__(self, firstweekday=0, locale=None):
TextCalendar.__init__(self, firstweekday)
if locale is None:
locale = _locale.getdefaultlocale()
self.locale = locale
def formatweekday(self, day, width):
with different_locale(self.locale):
if width >= 9:
names = day_name
else:
names = day_abbr
name = names[day]
return name[:width].center(width)
def formatmonthname(self, theyear, themonth, width, withyear=True):
with different_locale(self.locale):
s = month_name[themonth]
if withyear:
s = "%s %r" % (s, theyear)
return s.center(width)
class LocaleHTMLCalendar(HTMLCalendar):
"""
This class can be passed a locale name in the constructor and will return
month and weekday names in the specified locale. If this locale includes
an encoding all strings containing month and weekday names will be returned
as unicode.
"""
def __init__(self, firstweekday=0, locale=None):
HTMLCalendar.__init__(self, firstweekday)
if locale is None:
locale = _locale.getdefaultlocale()
self.locale = locale
def formatweekday(self, day):
with different_locale(self.locale):
s = day_abbr[day]
return '<th class="%s">%s</th>' % (self.cssclasses[day], s)
def formatmonthname(self, theyear, themonth, withyear=True):
with different_locale(self.locale):
s = month_name[themonth]
if withyear:
s = '%s %s' % (s, theyear)
return '<tr><th colspan="7" class="month">%s</th></tr>' % s
# Support for old module level interface
c = TextCalendar()
firstweekday = c.getfirstweekday
def setfirstweekday(firstweekday):
if not MONDAY <= firstweekday <= SUNDAY:
raise IllegalWeekdayError(firstweekday)
c.firstweekday = firstweekday
monthcalendar = c.monthdayscalendar
prweek = c.prweek
week = c.formatweek
weekheader = c.formatweekheader
prmonth = c.prmonth
month = c.formatmonth
calendar = c.formatyear
prcal = c.pryear
# Spacing of month columns for multi-column year calendar
_colwidth = 7*3 - 1 # Amount printed by prweek()
_spacing = 6 # Number of spaces between columns
def format(cols, colwidth=_colwidth, spacing=_spacing):
"""Prints multi-column formatting for year calendars"""
print(formatstring(cols, colwidth, spacing))
def formatstring(cols, colwidth=_colwidth, spacing=_spacing):
"""Returns a string formatted from n strings, centered within n columns."""
spacing *= ' '
return spacing.join(c.center(colwidth) for c in cols)
EPOCH = 1970
_EPOCH_ORD = datetime.date(EPOCH, 1, 1).toordinal()
def timegm(tuple):
"""Unrelated but handy function to calculate Unix timestamp from GMT."""
year, month, day, hour, minute, second = tuple[:6]
days = datetime.date(year, month, 1).toordinal() - _EPOCH_ORD + day - 1
hours = days*24 + hour
minutes = hours*60 + minute
seconds = minutes*60 + second
return seconds
def main(args):
import argparse
parser = argparse.ArgumentParser()
textgroup = parser.add_argument_group('text only arguments')
htmlgroup = parser.add_argument_group('html only arguments')
textgroup.add_argument(
"-w", "--width",
type=int, default=2,
help="width of date column (default 2)"
)
textgroup.add_argument(
"-l", "--lines",
type=int, default=1,
help="number of lines for each week (default 1)"
)
textgroup.add_argument(
"-s", "--spacing",
type=int, default=6,
help="spacing between months (default 6)"
)
textgroup.add_argument(
"-m", "--months",
type=int, default=3,
help="months per row (default 3)"
)
htmlgroup.add_argument(
"-c", "--css",
default="calendar.css",
help="CSS to use for page"
)
parser.add_argument(
"-L", "--locale",
default=None,
help="locale to be used from month and weekday names"
)
parser.add_argument(
"-e", "--encoding",
default=None,
help="encoding to use for output"
)
parser.add_argument(
"-t", "--type",
default="text",
choices=("text", "html"),
help="output type (text or html)"
)
parser.add_argument(
"year",
nargs='?', type=int,
help="year number (1-9999)"
)
parser.add_argument(
"month",
nargs='?', type=int,
help="month number (1-12, text only)"
)
options = parser.parse_args(args[1:])
if options.locale and not options.encoding:
parser.error("if --locale is specified --encoding is required")
sys.exit(1)
locale = options.locale, options.encoding
if options.type == "html":
if options.locale:
cal = LocaleHTMLCalendar(locale=locale)
else:
cal = HTMLCalendar()
encoding = options.encoding
if encoding is None:
encoding = sys.getdefaultencoding()
optdict = dict(encoding=encoding, css=options.css)
write = sys.stdout.buffer.write
if options.year is None:
write(cal.formatyearpage(datetime.date.today().year, **optdict))
elif options.month is None:
write(cal.formatyearpage(options.year, **optdict))
else:
parser.error("incorrect number of arguments")
sys.exit(1)
else:
if options.locale:
cal = LocaleTextCalendar(locale=locale)
else:
cal = TextCalendar()
optdict = dict(w=options.width, l=options.lines)
if options.month is None:
optdict["c"] = options.spacing
optdict["m"] = options.months
if options.year is None:
result = cal.formatyear(datetime.date.today().year, **optdict)
elif options.month is None:
result = cal.formatyear(options.year, **optdict)
else:
result = cal.formatmonth(options.year, options.month, **optdict)
write = sys.stdout.write
if options.encoding:
result = result.encode(options.encoding)
write = sys.stdout.buffer.write
write(result)
if __name__ == "__main__":
main(sys.argv)
| lgpl-2.1 |
40223246/2015cd_midterm | static/Brython3.1.1-20150328-091302/Lib/site-packages/highlight.py | 617 | 2518 | import keyword
import _jsre as re
from browser import html
letters = 'abcdefghijklmnopqrstuvwxyz'
letters += letters.upper()+'_'
digits = '0123456789'
builtin_funcs = ("abs|divmod|input|open|staticmethod|all|enumerate|int|ord|str|any|" +
"eval|isinstance|pow|sum|basestring|execfile|issubclass|print|super|" +
"binfile|iter|property|tuple|bool|filter|len|range|type|bytearray|" +
"float|list|raw_input|unichr|callable|format|locals|reduce|unicode|" +
"chr|frozenset|long|reload|vars|classmethod|getattr|map|repr|xrange|" +
"cmp|globals|max|reversed|zip|compile|hasattr|memoryview|round|" +
"__import__|complex|hash|min|set|apply|delattr|help|next|setattr|" +
"buffer|dict|hex|object|slice|coerce|dir|id|oct|sorted|intern")
kw_pattern = '^('+'|'.join(keyword.kwlist)+')$'
bf_pattern = '^('+builtin_funcs+')$'
def highlight(txt, string_color="blue", comment_color="green",
keyword_color="purple"):
res = html.PRE()
i = 0
name = ''
while i<len(txt):
car = txt[i]
if car in ["'",'"']:
k = i+1
while k<len(txt):
if txt[k]==car:
nb_as = 0
j = k-1
while True:
if txt[j]=='\\':
nb_as+=1
j -= 1
else:
break
if nb_as % 2 == 0:
res <= html.SPAN(txt[i:k+1],
style=dict(color=string_color))
i = k
break
k += 1
elif car == '#': # comment
end = txt.find('\n', i)
if end== -1:
res <= html.SPAN(txt[i:],style=dict(color=comment_color))
break
else:
res <= html.SPAN(txt[i:end],style=dict(color=comment_color))
i = end-1
elif car in letters:
name += car
elif car in digits and name:
name += car
else:
if name:
if re.search(kw_pattern,name):
res <= html.SPAN(name,style=dict(color=keyword_color))
elif re.search(bf_pattern,name):
res <= html.SPAN(name,style=dict(color=keyword_color))
else:
res <= name
name = ''
res <= car
i += 1
res <= name
return res | gpl-3.0 |
befair/gasistafelice | gasistafelice/gf/gas/management/commands/import_gasmembers.py | 3 | 11585 |
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from django.db import IntegrityError, transaction
from django.core.files import File
from django.contrib.auth.models import User
from lib.csvmanager import CSVManager
from lib import get_params_from_template
from gf.gas.models import GAS, GASMember
from gf.base.models import Place, Person, Contact
from pprint import pprint
import logging
log = logging.getLogger(__name__)
ENCODING = "iso-8859-1"
class Command(BaseCommand):
args = "<gas_pk> <csv_file> [delimiter] [python_template]"
allowed_keys = ['name','surname','email','city','image','address','display_name','phone']
help = """Import gasmembers from csv file. Attributes allowed in python template are:
""" + ",".join(allowed_keys) + """.
"""
def handle(self, *args, **options):
try:
gas_pk = int(args[0])
csv_filename = args[1]
except:
raise CommandError("Usage import_gasmembers: %s" % (self.args))
if len(args) > 2:
delimiter = args[2]
else:
delimiter = ";"
if len(args) == 4:
tmpl = args[3]
else:
tmpl = "%(name)s %(surname)s %(email)s %(city)s"
# STEP 0: prepare data in dicts
f = file(csv_filename, "rb")
csvdata = f.read()
f.close()
fieldnames = get_params_from_template(tmpl)
m = CSVManager(fieldnames=fieldnames, delimiter=delimiter, encoding=ENCODING)
data = m.read(csvdata)
log.debug(pprint(m.read(csvdata)))
# Data prepared
g = GAS.objects.get(pk=gas_pk)
g.config.auto_populate_products = True
g.config.save()
# STEP 2: process data and create instances
with transaction.atomic():
for d in data:
log.info("#### ---- start new user import... ----####")
try:
user, updated = self._get_or_create_user(d)
try:
pers = user.person
except Person.DoesNotExist:
contacts = self._get_or_create_contacts(d)
place = self._get_or_create_place(d)
pers = self._get_or_create_person(d, contacts, place)
pers.user = user
pers.save()
else:
# This is a user of an already created person
log.info(("PERSON %s ALREADY EXISTENT" % user.person).decode(ENCODING))
if updated:
log.debug("UPDATE PERSON DETAILS")
contacts = self._update_contacts(user.person, d)
place = self._update_place(user.person, d)
pers = self._update_person(user.person, d, contacts, place, force=True)
else:
log.debug("SKIP IT")
except KeyError, e:
if e.message not in self.allowed_keys:
raise CommandError("Invalid key '%s' provided. Allowed keys in python template are: %s" % (e.message, self.allowed_keys))
else:
raise CommandError("Key '%s' is REQUIRED." % e.message)
gm, created = GASMember.objects.get_or_create(person=pers, gas=g)
gm.save()
log.info(("CREATED GASMEMBER %s" % gm).decode(ENCODING))
return 0
def _manage_multiple_duplicates(self, qs, display_attrs):
model_verbose_plural = qs.model._meta.verbose_name_plural
log.info(u"FOUND MANY DUPLICATES FOR %s: %s" % (
model_verbose_plural,
"\n".join([str(t) for t in qs.values_list(*display_attrs)])
))
# 3 options:
# a. one instance of the querySet if the one that we need
# b. same as "a." but overwrite info with new info
# c. create another model instance
raise NotImplementedError("cannot merge many %s" % model_verbose_plural)
def _update_contacts(self, pers, d):
"""Process contacts of bound person."""
emails = map(lambda x : x[0], pers.contacts.filter(flavour="EMAIL").values_list('value'))
ans = "N"
if d['email'] not in emails:
ans = raw_input("Email %s not found for person %s. Found emails %s. Add it [y/N]?" % (d['email'], pers, emails))
if ans == "Y":
c_email = Contact.objects.create(flavour="EMAIL", value=d['email'])
pers.contacts.add(c_email)
return pers.contacts
def _update_place(self, pers, d):
"""Process place already bound to person."""
place = pers.address
ans = "N"
if place:
ans = raw_input("Found address %s for person %s. Overwrite with new info %s [y/N]?" % \
(place, pers, "city=%s address=%s name=''" % (d['city'], d.get('address','')))
)
else:
place = Place()
ans = "Y"
if ans.upper() == "Y":
place.city = d['city']
place.address = d.get('address','')
place.name = ''
place.save()
return place
def _get_or_create_contacts(self, d):
"""Create contacts. Fields:
* email: **required**
* phone: optional
"""
email = d['email'] or settings.JUNK_EMAIL
try:
c_email, created = Contact.objects.get_or_create(flavour="EMAIL", value=email)
except Contact.MultipleObjectsReturned:
contacts = Contact.objects.filter(flavour="EMAIL", value=email)
try:
ans = self._manage_multiple_duplicates(contacts, ('value', 'person', 'gas', 'supplier'))
except NotImplementedError:
#FIXME: create a new email contact entry
c_email = Contact.objects.create(flavour="EMAIL", value=email)
c_email.save()
log.debug("CREATED EMAIL CONTACT %s" % c_email)
rv = [c_email]
if d.has_key('phone'):
c_phone, created = Contact.objects.get_or_create(flavour="PHONE", value=d['phone'])
c_phone.save()
rv.append(c_phone)
log.debug("CREATED PHONE CONTACT %s" % c_email)
return rv
def _get_or_create_place(self, d):
"""Create place. Fields:
* city: **required**
* address: optional
"""
kw = {
'city' : d['city'],
'address' : d.get('address',''),
'name' : '',
}
pl, created = Place.objects.get_or_create(**kw)
pl.save()
if created:
log.debug((u"CREATED PLACE %s" % pl).decode(ENCODING))
return pl
def _get_or_create_person(self, d, contacts, place):
kw = {
'name' : d['name'],
'surname' : d['surname'],
}
# Person
pers, created = Person.objects.get_or_create(**kw)
if created:
log.info(("CREATED PERSON %s" % pers).decode(ENCODING))
if d.get('display_name'):
pers.display_name = d['display_name']
# Upload image
if d.get('image'):
log.info(("Setting image %s for person %s" % (d['image'], pers)).decode(ENCODING))
f = File(file(d['image'], "rb"))
pers.avatar = f
pers.address = place
pers.save()
pers.contact_set.add(*contacts)
else:
pers = self._update_person(pers, d, contacts, place)
pers.save()
return pers
def _update_person(self, pers, d, contacts, place, force=False):
ans = "N"
if force:
ans = "Y"
if pers.address != place:
if not force:
ans = raw_input("Found address %s for person %s. Overwrite with new address %s [y/N]?" % (pers.address, pers, place))
if ans.upper() == "Y":
pers.address = place
if d.get('display_name'):
if pers.display_name != d['display_name']:
if not force:
ans = raw_input("Found display_name %s for person %s. Overwrite with new display_name %s [y/N]?" % (pers.address, pers, d['display_name']))
if ans.upper() == "Y":
pers.display_name = d['display_name']
# Upload image
if not pers.avatar and d.get('image'):
log.info(("Setting image %s for person %s" % (d['image'], pers)).decode(ENCODING))
f = File(file(d['image'], "rb"))
pers.avatar = f
log.debug(("FOUND PERSON %s" % pers).decode(ENCODING))
pers.name = d['name'].capitalize()
pers.surname = d['surname'].capitalize()
pers.save()
return pers
def _get_or_create_user(self, d):
base_username = d.get('username') or d['name'].replace(' ','').lower()
ans = ""
updated = False
users = User.objects.filter(email=d['email'])
if users.count() > 1:
ans = self._manage_multiple_duplicates(users, ('id','first_name', 'last_name', 'username', 'email'))
# TODO TODO TODO
elif users.count() == 1:
user = users[0]
log.info("FOUND USER WITH EMAIL %s: %s" % (
d['email'],
"name=%s surname=%s username=%s" % (user.first_name, user.last_name, user.username)
))
log.info("USER TO BE ADDED: name=%s surname=%s username=%s" % (d['name'], d['surname'], base_username))
ans = "A"
if user.username == base_username:
log.info("Usernames match. Assuming person, contacts, and place info unchanged")
else:
msg = "Usernames don't match"
ans = raw_input("%s. What should I do?\na. Keep current user as is\nb. Overwrite current user with new info\nc. Create a new user\n[A/b/c] ?" % msg)
ans = ans.upper()
if ans == "C":
user = User(username=base_username, email=d['email'])
elif ans == "B":
user.first_name = d['name'].capitalize()
user.last_name = d['surname'].capitalize()
if User.objects.filter(username=base_username).count():
log.warning("CANNOT update username because it is already used")
else:
user.username = base_username
updated = True
else:
user = User(username=base_username, email=d['email'])
user.first_name = d['name'].capitalize()
user.last_name = d['surname'].capitalize()
# Process only if we are creating a new user
c = 1
while not user.pk:
try:
sid = transaction.savepoint()
user.save()
transaction.savepoint_commit(sid)
except IntegrityError:
transaction.savepoint_rollback(sid)
user.username = "%s%s" % ( base_username, c)
c += 1
else:
user.set_password("default")
user.is_active=False
user.save()
log.info("CREATED USER %s" % user)
user.save()
return user, updated
| agpl-3.0 |
linsalrob/EdwardsLab | phage_protein_blast_genera/tax_violin_plots.py | 1 | 2239 | """
"""
import os
import sys
import argparse
import matplotlib
#matplotlib.use('Agg')
import matplotlib.pyplot as plt
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="")
parser.add_argument('-f', help='Genome average output file (from genera_per_phage_protein.py', default='/home/redwards/Desktop/gav_all_host.out')
parser.add_argument('-n', help='taxonomy name one of: kingdom / phylum / genus / species', default='genus')
parser.add_argument('-v', help='verbose output', action="store_true")
args = parser.parse_args()
ynames = {'kingdom' : 'kingdoms', 'phylum' : 'phyla', 'genus' : 'genera', 'species' : 'species'}
col = None
colkey = {'kingdom' : 3, 'phylum' : 4, 'genus' : 5, 'species' : 6}
if args.n not in colkey:
sys.stderr.write("Sorry, taxonomy name must be one of {}\n".format("|".join(list(colkey.keys()))))
sys.exit(-1)
col = colkey[args.n]
want = {'Gut', 'Mouth', 'Nose', 'Skin', 'Lungs'}
data = {}
with open(args.f, 'r') as fin:
for l in fin:
p=l.strip().split("\t")
if p[2] not in want:
p[2] = 'All phages'
#continue ## comment or uncomment this to include/exclude all data
if p[2] not in data:
data[p[2]] = []
data[p[2]].append(float(p[col]))
labels = sorted(data.keys())
scores = []
count = 1
ticks = []
for l in labels:
scores.append(data[l])
ticks.append(count)
count += 1
fig = plt.figure()
ax = fig.add_subplot(111)
# ax.boxplot(alldata)
vp = ax.violinplot(scores, showmeans=True)
for i, j in enumerate(vp['bodies']):
if i == 0:
j.set_color('gray')
elif i == 1:
j.set_color('sandybrown')
else:
j.set_color('lightpink')
ax.set_xlabel("Body Site")
ax.set_ylabel("Average number of {}".format(ynames[args.n]))
ax.set_xticks(ticks)
ax.set_xticklabels(labels, rotation='vertical')
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
fig.set_facecolor('white')
plt.tight_layout()
#plt.show()
fig.savefig("/home/redwards/Desktop/bodysites.png")
| mit |
euccas/CodingPuzzles-Python | leet/source/searchDFS/permutations.py | 1 | 1421 | class Solution():
def permute(self, nums):
if nums is None:
return [[]]
elif len(nums) <= 1:
return [nums]
result = []
for i, item in enumerate(nums):
#print("i={0}, item={1}".format(i, item))
for p in permute(nums[:i] + nums[i + 1:]):
#print("p={0}, item={1}, append {2}".format(p, item, p + [item]))
result.append([item] + p)
#print("now result is ... {0}".format(result))
return result
class Solution1(object):
def permute(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
if nums is None:
return []
if len(nums) == 0:
return [[]]
self.result = []
visited = [False for i in nums]
self.dfs(nums, visited, [])
return self.result
def dfs(self, nums, visited, permutation):
if len(nums) == len(permutation):
self.result.append(permutation[:])
for i in range(0, len(nums)):
if visited[i] == True:
continue
permutation.append(nums[i])
visited[i] = True
self.dfs(nums, visited, permutation)
visited[i] = False
permutation.pop()
if __name__ == "__main__":
sln = Solution1()
result = sln.permute([1, 5, 9])
print(result)
| mit |
jack198345/volatility | volatility/plugins/malware/svcscan.py | 44 | 16277 | # Volatility
# Copyright (C) 2007-2013 Volatility Foundation
# Copyright (c) 2010, 2011, 2012 Michael Ligh <michael.ligh@mnin.org>
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
import volatility.utils as utils
import volatility.obj as obj
import volatility.plugins.common as common
import volatility.win32.tasks as tasks
import volatility.debug as debug
import volatility.plugins.registry.registryapi as registryapi
#--------------------------------------------------------------------------------
# vtypes
#--------------------------------------------------------------------------------
SERVICE_TYPE_FLAGS = {
'SERVICE_KERNEL_DRIVER': 0,
'SERVICE_FILE_SYSTEM_DRIVER': 1,
'SERVICE_WIN32_OWN_PROCESS': 4,
'SERVICE_WIN32_SHARE_PROCESS': 5,
'SERVICE_INTERACTIVE_PROCESS': 8}
SERVICE_STATE_ENUM = {
1: 'SERVICE_STOPPED',
2: 'SERVICE_START_PENDING',
3: 'SERVICE_STOP_PENDING',
4: 'SERVICE_RUNNING',
5: 'SERVICE_CONTINUE_PENDING',
6: 'SERVICE_PAUSE_PENDING',
7: 'SERVICE_PAUSED'}
svcscan_base_x86 = {
'_SERVICE_HEADER': [ None, {
'Tag': [ 0x0, ['array', 4, ['unsigned char']]],
'ServiceRecord': [ 0xC, ['pointer', ['_SERVICE_RECORD']]],
} ],
'_SERVICE_LIST_ENTRY' : [ 0x8, {
'Blink' : [ 0x0, ['pointer', ['_SERVICE_RECORD']]],
'Flink' : [ 0x4, ['pointer', ['_SERVICE_RECORD']]],
} ],
'_SERVICE_RECORD' : [ None, {
'ServiceList' : [ 0x0, ['_SERVICE_LIST_ENTRY']],
'ServiceName' : [ 0x8, ['pointer', ['String', dict(encoding = 'utf16', length = 512)]]],
'DisplayName' : [ 0xc, ['pointer', ['String', dict(encoding = 'utf16', length = 512)]]],
'Order' : [ 0x10, ['unsigned int']],
'Tag' : [ 0x18, ['array', 4, ['unsigned char']]],
'DriverName' : [ 0x24, ['pointer', ['String', dict(encoding = 'utf16', length = 256)]]],
'ServiceProcess' : [ 0x24, ['pointer', ['_SERVICE_PROCESS']]],
'Type' : [ 0x28, ['Flags', {'bitmap': SERVICE_TYPE_FLAGS}]],
'State' : [ 0x2c, ['Enumeration', dict(target = 'long', choices = SERVICE_STATE_ENUM)]],
} ],
'_SERVICE_PROCESS' : [ None, {
'BinaryPath' : [ 0x8, ['pointer', ['String', dict(encoding = 'utf16', length = 256)]]],
'ProcessId' : [ 0xc, ['unsigned int']],
} ],
}
svcscan_base_x64 = {
'_SERVICE_HEADER': [ None, {
'Tag': [ 0x0, ['array', 4, ['unsigned char']]],
'ServiceRecord': [ 0x10, ['pointer', ['_SERVICE_RECORD']]],
} ],
'_SERVICE_LIST_ENTRY' : [ 0x8, {
'Blink' : [ 0x0, ['pointer', ['_SERVICE_RECORD']]],
'Flink' : [ 0x10, ['pointer', ['_SERVICE_RECORD']]],
} ],
'_SERVICE_RECORD' : [ None, {
'ServiceList' : [ 0x0, ['_SERVICE_LIST_ENTRY']],
'ServiceName' : [ 0x8, ['pointer', ['String', dict(encoding = 'utf16', length = 512)]]],
'DisplayName' : [ 0x10, ['pointer', ['String', dict(encoding = 'utf16', length = 512)]]],
'Order' : [ 0x18, ['unsigned int']],
'Tag' : [ 0x20, ['array', 4, ['unsigned char']]],
'DriverName' : [ 0x30, ['pointer', ['String', dict(encoding = 'utf16', length = 256)]]],
'ServiceProcess' : [ 0x30, ['pointer', ['_SERVICE_PROCESS']]],
'Type' : [ 0x38, ['Flags', {'bitmap': SERVICE_TYPE_FLAGS}]],
'State' : [ 0x3C, ['Enumeration', dict(target = 'long', choices = SERVICE_STATE_ENUM)]],
} ],
'_SERVICE_PROCESS': [ None, {
'BinaryPath': [ 0x10, ['pointer', ['String', dict(encoding = 'utf16', length = 256)]]],
'ProcessId': [ 0x18, ['unsigned int']],
} ],
}
#--------------------------------------------------------------------------------
# object Classes
#--------------------------------------------------------------------------------
class _SERVICE_RECORD_LEGACY(obj.CType):
"Service records for XP/2003 x86 and x64"
@property
def Binary(self):
"Return the binary path for a service"
# No path in memory for services that aren't running
# (if needed, query the registry key)
if str(self.State) != 'SERVICE_RUNNING':
return obj.NoneObject("No path, service isn't running")
# Depending on whether the service is for a process
# or kernel driver, the binary path is stored differently
if 'PROCESS' in str(self.Type):
return self.ServiceProcess.BinaryPath.dereference()
else:
return self.DriverName.dereference()
@property
def Pid(self):
"Return the process ID for a service"
if str(self.State) == 'SERVICE_RUNNING':
if 'PROCESS' in str(self.Type):
return self.ServiceProcess.ProcessId
return obj.NoneObject("Cannot get process ID")
def is_valid(self):
"Check some fields for validity"
return obj.CType.is_valid(self) and self.Order > 0 and self.Order < 0xFFFF
def traverse(self):
rec = self # Include this object in the list
while rec and rec.is_valid():
yield rec
rec = rec.ServiceList.Blink.dereference()
class _SERVICE_RECORD_RECENT(_SERVICE_RECORD_LEGACY):
"Service records for 2008, Vista, 7 x86 and x64"
def traverse(self):
"""Generator that walks the singly-linked list"""
yield self # Include this object in the list
# Make sure we dereference these pointers, or the
# is_valid() checks will apply to the pointer and
# not the _SERVICE_RECORD object as intended.
rec = self.PrevEntry.dereference()
while rec and rec.is_valid():
yield rec
rec = rec.PrevEntry.dereference()
class _SERVICE_HEADER(obj.CType):
"Service headers for 2008, Vista, 7 x86 and x64"
def is_valid(self):
"Check some fields for validity"
return (obj.CType.is_valid(self) and
self.ServiceRecord.is_valid() and
self.ServiceRecord.Order < 0xFFFF)
#--------------------------------------------------------------------------------
# profile modifications
#--------------------------------------------------------------------------------
class ServiceBase(obj.ProfileModification):
"""The base applies to XP and 2003 SP0-SP1"""
before = ['WindowsOverlay', 'WindowsObjectClasses']
conditions = {'os': lambda x: x == 'windows'}
def modification(self, profile):
profile.object_classes.update({
'_SERVICE_RECORD': _SERVICE_RECORD_LEGACY,
'_SERVICE_HEADER': _SERVICE_HEADER,
})
profile.merge_overlay({'VOLATILITY_MAGIC': [ None, {
'ServiceTag': [ 0x0, ['VolatilityMagic', dict(value = "sErv")]]
}]})
profile.vtypes.update(svcscan_base_x86)
class ServiceBasex64(obj.ProfileModification):
"""This overrides the base x86 vtypes with x64 vtypes"""
before = ['WindowsOverlay', 'WindowsObjectClasses', 'ServiceBase']
conditions = {'os': lambda x: x == 'windows',
'memory_model': lambda x: x == '64bit'}
def modification(self, profile):
profile.vtypes.update(svcscan_base_x64)
class ServiceVista(obj.ProfileModification):
"""Override the base with OC's for Vista, 2008, and 7"""
before = ['WindowsOverlay', 'WindowsObjectClasses', 'ServiceBase']
conditions = {'os': lambda x: x == 'windows',
'major': lambda x: x >= 6}
def modification(self, profile):
profile.object_classes.update({
'_SERVICE_RECORD': _SERVICE_RECORD_RECENT,
})
profile.merge_overlay({'VOLATILITY_MAGIC': [ None, {
'ServiceTag': [ 0x0, ['VolatilityMagic', dict(value = "serH")]]
}]})
class ServiceVistax86(obj.ProfileModification):
"""Override the base with vtypes for x86 Vista, 2008, and 7"""
before = ['WindowsOverlay', 'WindowsObjectClasses', 'ServiceBase']
conditions = {'os': lambda x: x == 'windows',
'major': lambda x: x >= 6,
'memory_model': lambda x: x == '32bit'}
def modification(self, profile):
profile.merge_overlay({'_SERVICE_RECORD': [ None, {
'PrevEntry': [ 0x0, ['pointer', ['_SERVICE_RECORD']]],
'ServiceName': [ 0x4, ['pointer', ['String', dict(encoding = 'utf16', length = 512)]]],
'DisplayName': [ 0x8, ['pointer', ['String', dict(encoding = 'utf16', length = 512)]]],
'Order': [ 0xC, ['unsigned int']],
'ServiceProcess': [ 0x1C, ['pointer', ['_SERVICE_PROCESS']]],
'DriverName': [ 0x1C, ['pointer', ['String', dict(encoding = 'utf16', length = 256)]]],
'Type' : [ 0x20, ['Flags', {'bitmap': SERVICE_TYPE_FLAGS}]],
'State': [ 0x24, ['Enumeration', dict(target = 'long', choices = SERVICE_STATE_ENUM)]],
}]})
class ServiceVistax64(obj.ProfileModification):
"""Override the base with vtypes for x64 Vista, 2008, and 7"""
before = ['WindowsOverlay', 'WindowsObjectClasses', 'ServiceBase']
conditions = {'os': lambda x: x == 'windows',
'major': lambda x: x >= 6,
'memory_model': lambda x: x == '64bit'}
def modification(self, profile):
profile.merge_overlay({'_SERVICE_RECORD': [ None, {
'PrevEntry': [ 0x0, ['pointer', ['_SERVICE_RECORD']]],
'ServiceName': [ 0x8, ['pointer', ['String', dict(encoding = 'utf16', length = 512)]]],
'DisplayName': [ 0x10, ['pointer', ['String', dict(encoding = 'utf16', length = 512)]]],
'Order': [ 0x18, ['unsigned int']],
'ServiceProcess': [ 0x28, ['pointer', ['_SERVICE_PROCESS']]],
'DriverName': [ 0x28, ['pointer', ['String', dict(encoding = 'utf16', length = 256)]]],
'Type' : [ 0x30, ['Flags', {'bitmap': SERVICE_TYPE_FLAGS}]],
'State': [ 0x34, ['Enumeration', dict(target = 'long', choices = SERVICE_STATE_ENUM)]],
}]})
#--------------------------------------------------------------------------------
# svcscan plugin
#--------------------------------------------------------------------------------
class SvcScan(common.AbstractWindowsCommand):
"Scan for Windows services"
def calculate(self):
addr_space = utils.load_as(self._config)
# Get the version we're analyzing
version = (addr_space.profile.metadata.get('major', 0),
addr_space.profile.metadata.get('minor', 0))
tag = obj.VolMagic(addr_space).ServiceTag.v()
# On systems more recent than XP/2003, the serH marker doesn't
# find *all* services, but the ones it does find have linked
# lists to the others. We use this variable to track which
# ones we've seen so as to not yield duplicates.
records = []
for task in tasks.pslist(addr_space):
# We only want the Service Control Manager process
if str(task.ImageFileName).lower() != "services.exe":
continue
# Process AS must be valid
process_space = task.get_process_address_space()
if process_space == None:
continue
# Find all instances of the record tag
for address in task.search_process_memory([tag]):
if version <= (5, 2):
# Windows XP/2003
rec = obj.Object("_SERVICE_RECORD", offset = address -
addr_space.profile.get_obj_offset('_SERVICE_RECORD', 'Tag'),
vm = process_space
)
# Apply our sanity checks
if rec.is_valid():
yield rec
else:
# Windows Vista, 2008, and 7
svc_hdr = obj.Object('_SERVICE_HEADER', offset = address,
vm = process_space)
# Apply our sanity checks
if svc_hdr.is_valid():
# Since we walk the s-list backwards, if we've seen
# an object, then we've also seen all objects that
# exist before it, thus we can break at that time.
for rec in svc_hdr.ServiceRecord.traverse():
if rec in records:
break
records.append(rec)
yield rec
def render_dot(self, outfd, data):
"""Generate a dot graph of service relationships.
This currently only works for XP/2003 profiles,
because the linked list was removed after that.
"""
## Collect all the service records from calculate()
all_services = [d for d in data]
## Abort if we're not using the supported profiles
if all_services[0].obj_vm.profile.metadata.get('major', 0) != 5:
debug.error("This profile does not support --output=dot format")
objects = set()
links = set()
for svc in all_services:
label = "{{ {0:#x} \\n {1} \\n {2} \\n F:{3:#x} B:{4:#x} }}".format(
svc.obj_offset,
svc.ServiceName.dereference(),
str(svc.State),
svc.ServiceList.Flink.v(),
svc.ServiceList.Blink.v())
objects.add('"{0:#x}" [label="{1}" shape="record"];\n'.format(
svc.obj_offset, label))
## Check the linked list pointers
flink = svc.ServiceList.Flink.dereference()
blink = svc.ServiceList.Blink.dereference()
if flink.is_valid():
links.add('"{0:#x}" -> "{1:#x}" [];\n'.format(
svc.obj_offset, flink.obj_offset))
if blink.is_valid():
links.add('"{0:#x}" -> "{1:#x}" [];\n'.format(
svc.obj_offset, blink.obj_offset))
## Now write the graph nodes
outfd.write("digraph svctree { \ngraph [rankdir = \"TB\"];\n")
for item in objects:
outfd.write(item)
for link in links:
outfd.write(link)
outfd.write("}\n")
def render_text(self, outfd, data):
if self._config.VERBOSE:
regapi = registryapi.RegistryApi(self._config)
for rec in data:
# This can't possibly look neat in a table with columns...
outfd.write("Offset: {0:#x}\n".format(rec.obj_offset))
outfd.write("Order: {0}\n".format(rec.Order))
outfd.write("Process ID: {0}\n".format(rec.Pid))
outfd.write("Service Name: {0}\n".format(rec.ServiceName.dereference()))
outfd.write("Display Name: {0}\n".format(rec.DisplayName.dereference()))
outfd.write("Service Type: {0}\n".format(rec.Type))
outfd.write("Service State: {0}\n".format(rec.State))
outfd.write("Binary Path: {0}\n".format(rec.Binary))
if self._config.VERBOSE:
ccs = regapi.reg_get_currentcontrolset()
val = regapi.reg_get_value(
hive_name = "system",
key = "{0}\\services\\{1}\\Parameters".format(ccs, rec.ServiceName.dereference()),
value = "ServiceDll")
if val is not None:
outfd.write("ServiceDll: {0}\n".format(val))
outfd.write("\n")
| gpl-2.0 |
ganeti-github-testing/ganeti-test-1 | lib/client/gnt_instance.py | 1 | 62250 | #
#
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2014 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Instance related commands"""
# pylint: disable=W0401,W0614,C0103
# W0401: Wildcard import ganeti.cli
# W0614: Unused import %s from wildcard import (since we need cli)
# C0103: Invalid name gnt-instance
import copy
import itertools
import simplejson
import logging
from ganeti.cli import *
from ganeti import opcodes
from ganeti import constants
from ganeti import compat
from ganeti import utils
from ganeti import errors
from ganeti import netutils
from ganeti import ssh
from ganeti import objects
from ganeti import ht
_EXPAND_CLUSTER = "cluster"
_EXPAND_NODES_BOTH = "nodes"
_EXPAND_NODES_PRI = "nodes-pri"
_EXPAND_NODES_SEC = "nodes-sec"
_EXPAND_NODES_BOTH_BY_TAGS = "nodes-by-tags"
_EXPAND_NODES_PRI_BY_TAGS = "nodes-pri-by-tags"
_EXPAND_NODES_SEC_BY_TAGS = "nodes-sec-by-tags"
_EXPAND_INSTANCES = "instances"
_EXPAND_INSTANCES_BY_TAGS = "instances-by-tags"
_EXPAND_NODES_TAGS_MODES = compat.UniqueFrozenset([
_EXPAND_NODES_BOTH_BY_TAGS,
_EXPAND_NODES_PRI_BY_TAGS,
_EXPAND_NODES_SEC_BY_TAGS,
])
#: default list of options for L{ListInstances}
_LIST_DEF_FIELDS = [
"name", "hypervisor", "os", "pnode", "status", "oper_ram",
]
_MISSING = object()
_ENV_OVERRIDE = compat.UniqueFrozenset(["list"])
_INST_DATA_VAL = ht.TListOf(ht.TDict)
def _ExpandMultiNames(mode, names, client=None):
"""Expand the given names using the passed mode.
For _EXPAND_CLUSTER, all instances will be returned. For
_EXPAND_NODES_PRI/SEC, all instances having those nodes as
primary/secondary will be returned. For _EXPAND_NODES_BOTH, all
instances having those nodes as either primary or secondary will be
returned. For _EXPAND_INSTANCES, the given instances will be
returned.
@param mode: one of L{_EXPAND_CLUSTER}, L{_EXPAND_NODES_BOTH},
L{_EXPAND_NODES_PRI}, L{_EXPAND_NODES_SEC} or
L{_EXPAND_INSTANCES}
@param names: a list of names; for cluster, it must be empty,
and for node and instance it must be a list of valid item
names (short names are valid as usual, e.g. node1 instead of
node1.example.com)
@rtype: list
@return: the list of names after the expansion
@raise errors.ProgrammerError: for unknown selection type
@raise errors.OpPrereqError: for invalid input parameters
"""
# pylint: disable=W0142
if client is None:
client = GetClient()
if mode == _EXPAND_CLUSTER:
if names:
raise errors.OpPrereqError("Cluster filter mode takes no arguments",
errors.ECODE_INVAL)
idata = client.QueryInstances([], ["name"], False)
inames = [row[0] for row in idata]
elif (mode in _EXPAND_NODES_TAGS_MODES or
mode in (_EXPAND_NODES_BOTH, _EXPAND_NODES_PRI, _EXPAND_NODES_SEC)):
if mode in _EXPAND_NODES_TAGS_MODES:
if not names:
raise errors.OpPrereqError("No node tags passed", errors.ECODE_INVAL)
ndata = client.QueryNodes([], ["name", "pinst_list",
"sinst_list", "tags"], False)
ndata = [row for row in ndata if set(row[3]).intersection(names)]
else:
if not names:
raise errors.OpPrereqError("No node names passed", errors.ECODE_INVAL)
ndata = client.QueryNodes(names, ["name", "pinst_list", "sinst_list"],
False)
ipri = [row[1] for row in ndata]
pri_names = list(itertools.chain(*ipri))
isec = [row[2] for row in ndata]
sec_names = list(itertools.chain(*isec))
if mode in (_EXPAND_NODES_BOTH, _EXPAND_NODES_BOTH_BY_TAGS):
inames = pri_names + sec_names
elif mode in (_EXPAND_NODES_PRI, _EXPAND_NODES_PRI_BY_TAGS):
inames = pri_names
elif mode in (_EXPAND_NODES_SEC, _EXPAND_NODES_SEC_BY_TAGS):
inames = sec_names
else:
raise errors.ProgrammerError("Unhandled shutdown type")
elif mode == _EXPAND_INSTANCES:
if not names:
raise errors.OpPrereqError("No instance names passed",
errors.ECODE_INVAL)
idata = client.QueryInstances(names, ["name"], False)
inames = [row[0] for row in idata]
elif mode == _EXPAND_INSTANCES_BY_TAGS:
if not names:
raise errors.OpPrereqError("No instance tags passed",
errors.ECODE_INVAL)
idata = client.QueryInstances([], ["name", "tags"], False)
inames = [row[0] for row in idata if set(row[1]).intersection(names)]
else:
raise errors.OpPrereqError("Unknown mode '%s'" % mode, errors.ECODE_INVAL)
return inames
def _EnsureInstancesExist(client, names):
"""Check for and ensure the given instance names exist.
This function will raise an OpPrereqError in case they don't
exist. Otherwise it will exit cleanly.
@type client: L{ganeti.luxi.Client}
@param client: the client to use for the query
@type names: list
@param names: the list of instance names to query
@raise errors.OpPrereqError: in case any instance is missing
"""
# TODO: change LUInstanceQuery to that it actually returns None
# instead of raising an exception, or devise a better mechanism
result = client.QueryInstances(names, ["name"], False)
for orig_name, row in zip(names, result):
if row[0] is None:
raise errors.OpPrereqError("Instance '%s' does not exist" % orig_name,
errors.ECODE_NOENT)
def GenericManyOps(operation, fn):
"""Generic multi-instance operations.
The will return a wrapper that processes the options and arguments
given, and uses the passed function to build the opcode needed for
the specific operation. Thus all the generic loop/confirmation code
is abstracted into this function.
"""
def realfn(opts, args):
if opts.multi_mode is None:
opts.multi_mode = _EXPAND_INSTANCES
cl = GetClient()
inames = _ExpandMultiNames(opts.multi_mode, args, client=cl)
if not inames:
if opts.multi_mode == _EXPAND_CLUSTER:
ToStdout("Cluster is empty, no instances to shutdown")
return 0
raise errors.OpPrereqError("Selection filter does not match"
" any instances", errors.ECODE_INVAL)
multi_on = opts.multi_mode != _EXPAND_INSTANCES or len(inames) > 1
if not (opts.force_multi or not multi_on
or ConfirmOperation(inames, "instances", operation)):
return 1
jex = JobExecutor(verbose=multi_on, cl=cl, opts=opts)
for name in inames:
op = fn(name, opts)
jex.QueueJob(name, op)
results = jex.WaitOrShow(not opts.submit_only)
rcode = compat.all(row[0] for row in results)
return int(not rcode)
return realfn
def ListInstances(opts, args):
"""List instances and their properties.
@param opts: the command line options selected by the user
@type args: list
@param args: should be an empty list
@rtype: int
@return: the desired exit code
"""
selected_fields = ParseFields(opts.output, _LIST_DEF_FIELDS)
fmtoverride = dict.fromkeys(["tags", "disk.sizes", "nic.macs", "nic.ips",
"nic.modes", "nic.links", "nic.bridges",
"nic.networks",
"snodes", "snodes.group", "snodes.group.uuid"],
(lambda value: ",".join(str(item)
for item in value),
False))
cl = GetClient()
return GenericList(constants.QR_INSTANCE, selected_fields, args, opts.units,
opts.separator, not opts.no_headers,
format_override=fmtoverride, verbose=opts.verbose,
force_filter=opts.force_filter, cl=cl)
def ListInstanceFields(opts, args):
"""List instance fields.
@param opts: the command line options selected by the user
@type args: list
@param args: fields to list, or empty for all
@rtype: int
@return: the desired exit code
"""
return GenericListFields(constants.QR_INSTANCE, args, opts.separator,
not opts.no_headers)
def AddInstance(opts, args):
"""Add an instance to the cluster.
This is just a wrapper over L{GenericInstanceCreate}.
"""
return GenericInstanceCreate(constants.INSTANCE_CREATE, opts, args)
def BatchCreate(opts, args):
"""Create instances using a definition file.
This function reads a json file with L{opcodes.OpInstanceCreate}
serialisations.
@param opts: the command line options selected by the user
@type args: list
@param args: should contain one element, the json filename
@rtype: int
@return: the desired exit code
"""
(json_filename,) = args
cl = GetClient()
try:
instance_data = simplejson.loads(utils.ReadFile(json_filename))
except Exception, err: # pylint: disable=W0703
ToStderr("Can't parse the instance definition file: %s" % str(err))
return 1
if not _INST_DATA_VAL(instance_data):
ToStderr("The instance definition file is not %s" % _INST_DATA_VAL)
return 1
instances = []
possible_params = set(opcodes.OpInstanceCreate.GetAllSlots())
for (idx, inst) in enumerate(instance_data):
unknown = set(inst.keys()) - possible_params
if unknown:
# TODO: Suggest closest match for more user friendly experience
raise errors.OpPrereqError("Unknown fields in definition %s: %s" %
(idx, utils.CommaJoin(unknown)),
errors.ECODE_INVAL)
op = opcodes.OpInstanceCreate(**inst) # pylint: disable=W0142
op.Validate(False)
instances.append(op)
op = opcodes.OpInstanceMultiAlloc(iallocator=opts.iallocator,
instances=instances)
result = SubmitOrSend(op, opts, cl=cl)
# Keep track of submitted jobs
jex = JobExecutor(cl=cl, opts=opts)
for (status, job_id) in result[constants.JOB_IDS_KEY]:
jex.AddJobId(None, status, job_id)
results = jex.GetResults()
bad_cnt = len([row for row in results if not row[0]])
if bad_cnt == 0:
ToStdout("All instances created successfully.")
rcode = constants.EXIT_SUCCESS
else:
ToStdout("There were %s errors during the creation.", bad_cnt)
rcode = constants.EXIT_FAILURE
return rcode
def ReinstallInstance(opts, args):
"""Reinstall an instance.
@param opts: the command line options selected by the user
@type args: list
@param args: should contain only one element, the name of the
instance to be reinstalled
@rtype: int
@return: the desired exit code
"""
# first, compute the desired name list
if opts.multi_mode is None:
opts.multi_mode = _EXPAND_INSTANCES
inames = _ExpandMultiNames(opts.multi_mode, args)
if not inames:
raise errors.OpPrereqError("Selection filter does not match any instances",
errors.ECODE_INVAL)
# second, if requested, ask for an OS
if opts.select_os is True:
op = opcodes.OpOsDiagnose(output_fields=["name", "variants"], names=[])
result = SubmitOpCode(op, opts=opts)
if not result:
ToStdout("Can't get the OS list")
return 1
ToStdout("Available OS templates:")
number = 0
choices = []
for (name, variants) in result:
for entry in CalculateOSNames(name, variants):
ToStdout("%3s: %s", number, entry)
choices.append(("%s" % number, entry, entry))
number += 1
choices.append(("x", "exit", "Exit gnt-instance reinstall"))
selected = AskUser("Enter OS template number (or x to abort):",
choices)
if selected == "exit":
ToStderr("User aborted reinstall, exiting")
return 1
os_name = selected
os_msg = "change the OS to '%s'" % selected
else:
os_name = opts.os
if opts.os is not None:
os_msg = "change the OS to '%s'" % os_name
else:
os_msg = "keep the same OS"
# third, get confirmation: multi-reinstall requires --force-multi,
# single-reinstall either --force or --force-multi (--force-multi is
# a stronger --force)
multi_on = opts.multi_mode != _EXPAND_INSTANCES or len(inames) > 1
if multi_on:
warn_msg = ("Note: this will remove *all* data for the"
" below instances! It will %s.\n" % os_msg)
if not (opts.force_multi or
ConfirmOperation(inames, "instances", "reinstall", extra=warn_msg)):
return 1
else:
if not (opts.force or opts.force_multi):
usertext = ("This will reinstall the instance '%s' (and %s) which"
" removes all data. Continue?") % (inames[0], os_msg)
if not AskUser(usertext):
return 1
jex = JobExecutor(verbose=multi_on, opts=opts)
for instance_name in inames:
op = opcodes.OpInstanceReinstall(instance_name=instance_name,
os_type=os_name,
force_variant=opts.force_variant,
osparams=opts.osparams,
osparams_private=opts.osparams_private,
osparams_secret=opts.osparams_secret)
jex.QueueJob(instance_name, op)
results = jex.WaitOrShow(not opts.submit_only)
if compat.all(map(compat.fst, results)):
return constants.EXIT_SUCCESS
else:
return constants.EXIT_FAILURE
def RemoveInstance(opts, args):
"""Remove an instance.
@param opts: the command line options selected by the user
@type args: list
@param args: should contain only one element, the name of
the instance to be removed
@rtype: int
@return: the desired exit code
"""
instance_name = args[0]
force = opts.force
cl = GetClient()
if not force:
_EnsureInstancesExist(cl, [instance_name])
usertext = ("This will remove the volumes of the instance %s"
" (including mirrors), thus removing all the data"
" of the instance. Continue?") % instance_name
if not AskUser(usertext):
return 1
op = opcodes.OpInstanceRemove(instance_name=instance_name,
ignore_failures=opts.ignore_failures,
shutdown_timeout=opts.shutdown_timeout)
SubmitOrSend(op, opts, cl=cl)
return 0
def RenameInstance(opts, args):
"""Rename an instance.
@param opts: the command line options selected by the user
@type args: list
@param args: should contain two elements, the old and the
new instance names
@rtype: int
@return: the desired exit code
"""
if not opts.name_check:
if not AskUser("As you disabled the check of the DNS entry, please verify"
" that '%s' is a FQDN. Continue?" % args[1]):
return 1
op = opcodes.OpInstanceRename(instance_name=args[0],
new_name=args[1],
ip_check=opts.ip_check,
name_check=opts.name_check)
result = SubmitOrSend(op, opts)
if result:
ToStdout("Instance '%s' renamed to '%s'", args[0], result)
return 0
def ActivateDisks(opts, args):
"""Activate an instance's disks.
This serves two purposes:
- it allows (as long as the instance is not running)
mounting the disks and modifying them from the node
- it repairs inactive secondary drbds
@param opts: the command line options selected by the user
@type args: list
@param args: should contain only one element, the instance name
@rtype: int
@return: the desired exit code
"""
instance_name = args[0]
op = opcodes.OpInstanceActivateDisks(instance_name=instance_name,
ignore_size=opts.ignore_size,
wait_for_sync=opts.wait_for_sync)
disks_info = SubmitOrSend(op, opts)
for host, iname, nname in disks_info:
ToStdout("%s:%s:%s", host, iname, nname)
return 0
def DeactivateDisks(opts, args):
"""Deactivate an instance's disks.
This function takes the instance name, looks for its primary node
and the tries to shutdown its block devices on that node.
@param opts: the command line options selected by the user
@type args: list
@param args: should contain only one element, the instance name
@rtype: int
@return: the desired exit code
"""
instance_name = args[0]
op = opcodes.OpInstanceDeactivateDisks(instance_name=instance_name,
force=opts.force)
SubmitOrSend(op, opts)
return 0
def RecreateDisks(opts, args):
"""Recreate an instance's disks.
@param opts: the command line options selected by the user
@type args: list
@param args: should contain only one element, the instance name
@rtype: int
@return: the desired exit code
"""
instance_name = args[0]
disks = []
if opts.disks:
for didx, ddict in opts.disks:
didx = int(didx)
if not ht.TDict(ddict):
msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
if constants.IDISK_SIZE in ddict:
try:
ddict[constants.IDISK_SIZE] = \
utils.ParseUnit(ddict[constants.IDISK_SIZE])
except ValueError, err:
raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
(didx, err), errors.ECODE_INVAL)
if constants.IDISK_SPINDLES in ddict:
try:
ddict[constants.IDISK_SPINDLES] = \
int(ddict[constants.IDISK_SPINDLES])
except ValueError, err:
raise errors.OpPrereqError("Invalid spindles for disk %d: %s" %
(didx, err), errors.ECODE_INVAL)
disks.append((didx, ddict))
# TODO: Verify modifyable parameters (already done in
# LUInstanceRecreateDisks, but it'd be nice to have in the client)
if opts.node:
if opts.iallocator:
msg = "At most one of either --nodes or --iallocator can be passed"
raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
pnode, snode = SplitNodeOption(opts.node)
nodes = [pnode]
if snode is not None:
nodes.append(snode)
else:
nodes = []
op = opcodes.OpInstanceRecreateDisks(instance_name=instance_name,
disks=disks, nodes=nodes,
iallocator=opts.iallocator)
SubmitOrSend(op, opts)
return 0
def GrowDisk(opts, args):
"""Grow an instance's disks.
@param opts: the command line options selected by the user
@type args: list
@param args: should contain three elements, the target instance name,
the target disk id, and the target growth
@rtype: int
@return: the desired exit code
"""
instance = args[0]
disk = args[1]
try:
disk = int(disk)
except (TypeError, ValueError), err:
raise errors.OpPrereqError("Invalid disk index: %s" % str(err),
errors.ECODE_INVAL)
try:
amount = utils.ParseUnit(args[2])
except errors.UnitParseError:
raise errors.OpPrereqError("Can't parse the given amount '%s'" % args[2],
errors.ECODE_INVAL)
op = opcodes.OpInstanceGrowDisk(instance_name=instance,
disk=disk, amount=amount,
wait_for_sync=opts.wait_for_sync,
absolute=opts.absolute)
SubmitOrSend(op, opts)
return 0
def _StartupInstance(name, opts):
"""Startup instances.
This returns the opcode to start an instance, and its decorator will
wrap this into a loop starting all desired instances.
@param name: the name of the instance to act on
@param opts: the command line options selected by the user
@return: the opcode needed for the operation
"""
op = opcodes.OpInstanceStartup(instance_name=name,
force=opts.force,
ignore_offline_nodes=opts.ignore_offline,
no_remember=opts.no_remember,
startup_paused=opts.startup_paused)
# do not add these parameters to the opcode unless they're defined
if opts.hvparams:
op.hvparams = opts.hvparams
if opts.beparams:
op.beparams = opts.beparams
return op
def _RebootInstance(name, opts):
"""Reboot instance(s).
This returns the opcode to reboot an instance, and its decorator
will wrap this into a loop rebooting all desired instances.
@param name: the name of the instance to act on
@param opts: the command line options selected by the user
@return: the opcode needed for the operation
"""
return opcodes.OpInstanceReboot(instance_name=name,
reboot_type=opts.reboot_type,
ignore_secondaries=opts.ignore_secondaries,
shutdown_timeout=opts.shutdown_timeout)
def _ShutdownInstance(name, opts):
"""Shutdown an instance.
This returns the opcode to shutdown an instance, and its decorator
will wrap this into a loop shutting down all desired instances.
@param name: the name of the instance to act on
@param opts: the command line options selected by the user
@return: the opcode needed for the operation
"""
return opcodes.OpInstanceShutdown(instance_name=name,
force=opts.force,
timeout=opts.timeout,
ignore_offline_nodes=opts.ignore_offline,
no_remember=opts.no_remember)
def ReplaceDisks(opts, args):
"""Replace the disks of an instance
@param opts: the command line options selected by the user
@type args: list
@param args: should contain only one element, the instance name
@rtype: int
@return: the desired exit code
"""
new_2ndary = opts.dst_node
iallocator = opts.iallocator
if opts.disks is None:
disks = []
else:
try:
disks = [int(i) for i in opts.disks.split(",")]
except (TypeError, ValueError), err:
raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err),
errors.ECODE_INVAL)
cnt = [opts.on_primary, opts.on_secondary, opts.auto,
new_2ndary is not None, iallocator is not None].count(True)
if cnt != 1:
raise errors.OpPrereqError("One and only one of the -p, -s, -a, -n and -I"
" options must be passed", errors.ECODE_INVAL)
elif opts.on_primary:
mode = constants.REPLACE_DISK_PRI
elif opts.on_secondary:
mode = constants.REPLACE_DISK_SEC
elif opts.auto:
mode = constants.REPLACE_DISK_AUTO
if disks:
raise errors.OpPrereqError("Cannot specify disks when using automatic"
" mode", errors.ECODE_INVAL)
elif new_2ndary is not None or iallocator is not None:
# replace secondary
mode = constants.REPLACE_DISK_CHG
op = opcodes.OpInstanceReplaceDisks(instance_name=args[0], disks=disks,
remote_node=new_2ndary, mode=mode,
iallocator=iallocator,
early_release=opts.early_release,
ignore_ipolicy=opts.ignore_ipolicy)
SubmitOrSend(op, opts)
return 0
def FailoverInstance(opts, args):
"""Failover an instance.
The failover is done by shutting it down on its present node and
starting it on the secondary.
@param opts: the command line options selected by the user
@type args: list
@param args: should contain only one element, the instance name
@rtype: int
@return: the desired exit code
"""
cl = GetClient()
instance_name = args[0]
force = opts.force
iallocator = opts.iallocator
target_node = opts.dst_node
if iallocator and target_node:
raise errors.OpPrereqError("Specify either an iallocator (-I), or a target"
" node (-n) but not both", errors.ECODE_INVAL)
if not force:
_EnsureInstancesExist(cl, [instance_name])
usertext = ("Failover will happen to image %s."
" This requires a shutdown of the instance. Continue?" %
(instance_name,))
if not AskUser(usertext):
return 1
op = opcodes.OpInstanceFailover(instance_name=instance_name,
ignore_consistency=opts.ignore_consistency,
shutdown_timeout=opts.shutdown_timeout,
iallocator=iallocator,
target_node=target_node,
ignore_ipolicy=opts.ignore_ipolicy)
SubmitOrSend(op, opts, cl=cl)
return 0
def MigrateInstance(opts, args):
"""Migrate an instance.
The migrate is done without shutdown.
@param opts: the command line options selected by the user
@type args: list
@param args: should contain only one element, the instance name
@rtype: int
@return: the desired exit code
"""
cl = GetClient()
instance_name = args[0]
force = opts.force
iallocator = opts.iallocator
target_node = opts.dst_node
if iallocator and target_node:
raise errors.OpPrereqError("Specify either an iallocator (-I), or a target"
" node (-n) but not both", errors.ECODE_INVAL)
if not force:
_EnsureInstancesExist(cl, [instance_name])
if opts.cleanup:
usertext = ("Instance %s will be recovered from a failed migration."
" Note that the migration procedure (including cleanup)" %
(instance_name,))
else:
usertext = ("Instance %s will be migrated. Note that migration" %
(instance_name,))
usertext += (" might impact the instance if anything goes wrong"
" (e.g. due to bugs in the hypervisor). Continue?")
if not AskUser(usertext):
return 1
# this should be removed once --non-live is deprecated
if not opts.live and opts.migration_mode is not None:
raise errors.OpPrereqError("Only one of the --non-live and "
"--migration-mode options can be passed",
errors.ECODE_INVAL)
if not opts.live: # --non-live passed
mode = constants.HT_MIGRATION_NONLIVE
else:
mode = opts.migration_mode
op = opcodes.OpInstanceMigrate(instance_name=instance_name, mode=mode,
cleanup=opts.cleanup, iallocator=iallocator,
target_node=target_node,
allow_failover=opts.allow_failover,
allow_runtime_changes=opts.allow_runtime_chgs,
ignore_ipolicy=opts.ignore_ipolicy,
ignore_hvversions=opts.ignore_hvversions)
SubmitOrSend(op, cl=cl, opts=opts)
return 0
def MoveInstance(opts, args):
"""Move an instance.
@param opts: the command line options selected by the user
@type args: list
@param args: should contain only one element, the instance name
@rtype: int
@return: the desired exit code
"""
cl = GetClient()
instance_name = args[0]
force = opts.force
if not force:
usertext = ("Instance %s will be moved."
" This requires a shutdown of the instance. Continue?" %
(instance_name,))
if not AskUser(usertext):
return 1
op = opcodes.OpInstanceMove(instance_name=instance_name,
target_node=opts.node,
compress=opts.compress,
shutdown_timeout=opts.shutdown_timeout,
ignore_consistency=opts.ignore_consistency,
ignore_ipolicy=opts.ignore_ipolicy)
SubmitOrSend(op, opts, cl=cl)
return 0
def ConnectToInstanceConsole(opts, args):
"""Connect to the console of an instance.
@param opts: the command line options selected by the user
@type args: list
@param args: should contain only one element, the instance name
@rtype: int
@return: the desired exit code
"""
instance_name = args[0]
cl = GetClient()
try:
cluster_name = cl.QueryConfigValues(["cluster_name"])[0]
idata = cl.QueryInstances([instance_name], ["console", "oper_state"], False)
if not idata:
raise errors.OpPrereqError("Instance '%s' does not exist" % instance_name,
errors.ECODE_NOENT)
finally:
# Ensure client connection is closed while external commands are run
cl.Close()
del cl
((console_data, oper_state), ) = idata
if not console_data:
if oper_state:
# Instance is running
raise errors.OpExecError("Console information for instance %s is"
" unavailable" % instance_name)
else:
raise errors.OpExecError("Instance %s is not running, can't get console" %
instance_name)
return _DoConsole(objects.InstanceConsole.FromDict(console_data),
opts.show_command, cluster_name)
def _DoConsole(console, show_command, cluster_name, feedback_fn=ToStdout,
_runcmd_fn=utils.RunCmd):
"""Acts based on the result of L{opcodes.OpInstanceConsole}.
@type console: L{objects.InstanceConsole}
@param console: Console object
@type show_command: bool
@param show_command: Whether to just display commands
@type cluster_name: string
@param cluster_name: Cluster name as retrieved from master daemon
"""
console.Validate()
if console.kind == constants.CONS_MESSAGE:
feedback_fn(console.message)
elif console.kind == constants.CONS_VNC:
feedback_fn("Instance %s has VNC listening on %s:%s (display %s),"
" URL <vnc://%s:%s/>",
console.instance, console.host, console.port,
console.display, console.host, console.port)
elif console.kind == constants.CONS_SPICE:
feedback_fn("Instance %s has SPICE listening on %s:%s", console.instance,
console.host, console.port)
elif console.kind == constants.CONS_SSH:
# Convert to string if not already one
if isinstance(console.command, basestring):
cmd = console.command
else:
cmd = utils.ShellQuoteArgs(console.command)
srun = ssh.SshRunner(cluster_name=cluster_name)
ssh_cmd = srun.BuildCmd(console.host, console.user, cmd,
port=console.port,
batch=True, quiet=False, tty=True)
if show_command:
feedback_fn(utils.ShellQuoteArgs(ssh_cmd))
else:
result = _runcmd_fn(ssh_cmd, interactive=True)
if result.failed:
logging.error("Console command \"%s\" failed with reason '%s' and"
" output %r", result.cmd, result.fail_reason,
result.output)
raise errors.OpExecError("Connection to console of instance %s failed,"
" please check cluster configuration" %
console.instance)
else:
raise errors.GenericError("Unknown console type '%s'" % console.kind)
return constants.EXIT_SUCCESS
def _FormatDiskDetails(dev_type, dev, roman):
"""Formats the logical_id of a disk.
"""
if dev_type == constants.DT_DRBD8:
drbd_info = dev["drbd_info"]
data = [
("nodeA", "%s, minor=%s" %
(drbd_info["primary_node"],
compat.TryToRoman(drbd_info["primary_minor"],
convert=roman))),
("nodeB", "%s, minor=%s" %
(drbd_info["secondary_node"],
compat.TryToRoman(drbd_info["secondary_minor"],
convert=roman))),
("port", str(compat.TryToRoman(drbd_info["port"], roman))),
("auth key", str(drbd_info["secret"])),
]
elif dev_type == constants.DT_PLAIN:
vg_name, lv_name = dev["logical_id"]
data = ["%s/%s" % (vg_name, lv_name)]
else:
data = [str(dev["logical_id"])]
return data
def _FormatBlockDevInfo(idx, top_level, dev, roman):
"""Show block device information.
This is only used by L{ShowInstanceConfig}, but it's too big to be
left for an inline definition.
@type idx: int
@param idx: the index of the current disk
@type top_level: boolean
@param top_level: if this a top-level disk?
@type dev: dict
@param dev: dictionary with disk information
@type roman: boolean
@param roman: whether to try to use roman integers
@return: a list of either strings, tuples or lists
(which should be formatted at a higher indent level)
"""
def helper(dtype, status):
"""Format one line for physical device status.
@type dtype: str
@param dtype: a constant from the L{constants.DTS_BLOCK} set
@type status: tuple
@param status: a tuple as returned from L{backend.FindBlockDevice}
@return: the string representing the status
"""
if not status:
return "not active"
txt = ""
(path, major, minor, syncp, estt, degr, ldisk_status) = status
if major is None:
major_string = "N/A"
else:
major_string = str(compat.TryToRoman(major, convert=roman))
if minor is None:
minor_string = "N/A"
else:
minor_string = str(compat.TryToRoman(minor, convert=roman))
txt += ("%s (%s:%s)" % (path, major_string, minor_string))
if dtype in (constants.DT_DRBD8, ):
if syncp is not None:
sync_text = "*RECOVERING* %5.2f%%," % syncp
if estt:
sync_text += " ETA %ss" % compat.TryToRoman(estt, convert=roman)
else:
sync_text += " ETA unknown"
else:
sync_text = "in sync"
if degr:
degr_text = "*DEGRADED*"
else:
degr_text = "ok"
if ldisk_status == constants.LDS_FAULTY:
ldisk_text = " *MISSING DISK*"
elif ldisk_status == constants.LDS_UNKNOWN:
ldisk_text = " *UNCERTAIN STATE*"
else:
ldisk_text = ""
txt += (" %s, status %s%s" % (sync_text, degr_text, ldisk_text))
elif dtype == constants.DT_PLAIN:
if ldisk_status == constants.LDS_FAULTY:
ldisk_text = " *FAILED* (failed drive?)"
else:
ldisk_text = ""
txt += ldisk_text
return txt
# the header
if top_level:
if dev["iv_name"] is not None:
txt = dev["iv_name"]
else:
txt = "disk %s" % compat.TryToRoman(idx, convert=roman)
else:
txt = "child %s" % compat.TryToRoman(idx, convert=roman)
if isinstance(dev["size"], int):
nice_size = utils.FormatUnit(dev["size"], "h", roman)
else:
nice_size = str(dev["size"])
data = [(txt, "%s, size %s" % (dev["dev_type"], nice_size))]
if top_level:
if dev["spindles"] is not None:
data.append(("spindles", dev["spindles"]))
data.append(("access mode", dev["mode"]))
if dev["logical_id"] is not None:
try:
l_id = _FormatDiskDetails(dev["dev_type"], dev, roman)
except ValueError:
l_id = [str(dev["logical_id"])]
if len(l_id) == 1:
data.append(("logical_id", l_id[0]))
else:
data.extend(l_id)
if dev["pstatus"]:
data.append(("on primary", helper(dev["dev_type"], dev["pstatus"])))
if dev["sstatus"]:
data.append(("on secondary", helper(dev["dev_type"], dev["sstatus"])))
data.append(("name", dev["name"]))
data.append(("UUID", dev["uuid"]))
if dev["children"]:
data.append(("child devices", [
_FormatBlockDevInfo(c_idx, False, child, roman)
for c_idx, child in enumerate(dev["children"])
]))
return data
def _FormatInstanceNicInfo(idx, nic, roman=False):
"""Helper function for L{_FormatInstanceInfo()}"""
(name, uuid, ip, mac, mode, link, vlan, _, netinfo) = nic
network_name = None
if netinfo:
network_name = netinfo["name"]
return [
("nic/%s" % str(compat.TryToRoman(idx, roman)), ""),
("MAC", str(mac)),
("IP", str(ip)),
("mode", str(mode)),
("link", str(link)),
("vlan", str(compat.TryToRoman(vlan, roman))),
("network", str(network_name)),
("UUID", str(uuid)),
("name", str(name)),
]
def _FormatInstanceNodesInfo(instance):
"""Helper function for L{_FormatInstanceInfo()}"""
pgroup = ("%s (UUID %s)" %
(instance["pnode_group_name"], instance["pnode_group_uuid"]))
secs = utils.CommaJoin(("%s (group %s, group UUID %s)" %
(name, group_name, group_uuid))
for (name, group_name, group_uuid) in
zip(instance["snodes"],
instance["snodes_group_names"],
instance["snodes_group_uuids"]))
return [
[
("primary", instance["pnode"]),
("group", pgroup),
],
[("secondaries", secs)],
]
def _GetVncConsoleInfo(instance):
"""Helper function for L{_FormatInstanceInfo()}"""
vnc_bind_address = instance["hv_actual"].get(constants.HV_VNC_BIND_ADDRESS,
None)
if vnc_bind_address:
port = instance["network_port"]
display = int(port) - constants.VNC_BASE_PORT
if display > 0 and vnc_bind_address == constants.IP4_ADDRESS_ANY:
vnc_console_port = "%s:%s (display %s)" % (instance["pnode"],
port,
display)
elif display > 0 and netutils.IP4Address.IsValid(vnc_bind_address):
vnc_console_port = ("%s:%s (node %s) (display %s)" %
(vnc_bind_address, port,
instance["pnode"], display))
else:
# vnc bind address is a file
vnc_console_port = "%s:%s" % (instance["pnode"],
vnc_bind_address)
ret = "vnc to %s" % vnc_console_port
else:
ret = None
return ret
def _FormatInstanceInfo(instance, roman_integers):
"""Format instance information for L{cli.PrintGenericInfo()}"""
istate = "configured to be %s" % instance["config_state"]
if instance["run_state"]:
istate += ", actual state is %s" % instance["run_state"]
info = [
("Instance name", instance["name"]),
("UUID", instance["uuid"]),
("Serial number",
str(compat.TryToRoman(instance["serial_no"], convert=roman_integers))),
("Creation time", utils.FormatTime(instance["ctime"])),
("Modification time", utils.FormatTime(instance["mtime"])),
("State", istate),
("Nodes", _FormatInstanceNodesInfo(instance)),
("Operating system", instance["os"]),
("Operating system parameters",
FormatParamsDictInfo(instance["os_instance"], instance["os_actual"],
roman_integers)),
]
if "network_port" in instance:
info.append(("Allocated network port",
str(compat.TryToRoman(instance["network_port"],
convert=roman_integers))))
info.append(("Hypervisor", instance["hypervisor"]))
console = _GetVncConsoleInfo(instance)
if console:
info.append(("console connection", console))
# deprecated "memory" value, kept for one version for compatibility
# TODO(ganeti 2.7) remove.
be_actual = copy.deepcopy(instance["be_actual"])
be_actual["memory"] = be_actual[constants.BE_MAXMEM]
info.extend([
("Hypervisor parameters",
FormatParamsDictInfo(instance["hv_instance"], instance["hv_actual"],
roman_integers)),
("Back-end parameters",
FormatParamsDictInfo(instance["be_instance"], be_actual,
roman_integers)),
("NICs", [
_FormatInstanceNicInfo(idx, nic, roman_integers)
for (idx, nic) in enumerate(instance["nics"])
]),
("Disk template", instance["disk_template"]),
("Disks", [
_FormatBlockDevInfo(idx, True, device, roman_integers)
for (idx, device) in enumerate(instance["disks"])
]),
])
return info
def ShowInstanceConfig(opts, args):
"""Compute instance run-time status.
@param opts: the command line options selected by the user
@type args: list
@param args: either an empty list, and then we query all
instances, or should contain a list of instance names
@rtype: int
@return: the desired exit code
"""
if not args and not opts.show_all:
ToStderr("No instance selected."
" Please pass in --all if you want to query all instances.\n"
"Note that this can take a long time on a big cluster.")
return 1
elif args and opts.show_all:
ToStderr("Cannot use --all if you specify instance names.")
return 1
retcode = 0
op = opcodes.OpInstanceQueryData(instances=args, static=opts.static,
use_locking=not opts.static)
result = SubmitOpCode(op, opts=opts)
if not result:
ToStdout("No instances.")
return 1
PrintGenericInfo([
_FormatInstanceInfo(instance, opts.roman_integers)
for instance in result.values()
])
return retcode
def _ConvertNicDiskModifications(mods):
"""Converts NIC/disk modifications from CLI to opcode.
When L{opcodes.OpInstanceSetParams} was changed to support adding/removing
disks at arbitrary indices, its parameter format changed. This function
converts legacy requests (e.g. "--net add" or "--disk add:size=4G") to the
newer format and adds support for new-style requests (e.g. "--new 4:add").
@type mods: list of tuples
@param mods: Modifications as given by command line parser
@rtype: list of tuples
@return: Modifications as understood by L{opcodes.OpInstanceSetParams}
"""
result = []
for (identifier, params) in mods:
if identifier == constants.DDM_ADD:
# Add item as last item (legacy interface)
action = constants.DDM_ADD
identifier = -1
elif identifier == constants.DDM_REMOVE:
# Remove last item (legacy interface)
action = constants.DDM_REMOVE
identifier = -1
else:
# Modifications and adding/removing at arbitrary indices
add = params.pop(constants.DDM_ADD, _MISSING)
remove = params.pop(constants.DDM_REMOVE, _MISSING)
modify = params.pop(constants.DDM_MODIFY, _MISSING)
if modify is _MISSING:
if not (add is _MISSING or remove is _MISSING):
raise errors.OpPrereqError("Cannot add and remove at the same time",
errors.ECODE_INVAL)
elif add is not _MISSING:
action = constants.DDM_ADD
elif remove is not _MISSING:
action = constants.DDM_REMOVE
else:
action = constants.DDM_MODIFY
elif add is _MISSING and remove is _MISSING:
action = constants.DDM_MODIFY
else:
raise errors.OpPrereqError("Cannot modify and add/remove at the"
" same time", errors.ECODE_INVAL)
assert not (constants.DDMS_VALUES_WITH_MODIFY & set(params.keys()))
if action == constants.DDM_REMOVE and params:
raise errors.OpPrereqError("Not accepting parameters on removal",
errors.ECODE_INVAL)
result.append((action, identifier, params))
return result
def _ParseExtStorageParams(params):
"""Parses the disk params for ExtStorage conversions.
"""
if params:
if constants.IDISK_PROVIDER not in params:
raise errors.OpPrereqError("Missing required parameter '%s' when"
" converting to an ExtStorage disk template" %
constants.IDISK_PROVIDER, errors.ECODE_INVAL)
else:
for param in params.keys():
if (param != constants.IDISK_PROVIDER and
param in constants.IDISK_PARAMS):
raise errors.OpPrereqError("Invalid parameter '%s' when converting"
" to an ExtStorage template (it is not"
" allowed modifying existing disk"
" parameters)" % param, errors.ECODE_INVAL)
return params
def _ParseDiskSizes(mods):
"""Parses disk sizes in parameters.
"""
for (action, _, params) in mods:
if params and constants.IDISK_SPINDLES in params:
params[constants.IDISK_SPINDLES] = \
int(params[constants.IDISK_SPINDLES])
if params and constants.IDISK_SIZE in params:
params[constants.IDISK_SIZE] = \
utils.ParseUnit(params[constants.IDISK_SIZE])
elif action == constants.DDM_ADD:
raise errors.OpPrereqError("Missing required parameter 'size'",
errors.ECODE_INVAL)
return mods
def SetInstanceParams(opts, args):
"""Modifies an instance.
All parameters take effect only at the next restart of the instance.
@param opts: the command line options selected by the user
@type args: list
@param args: should contain only one element, the instance name
@rtype: int
@return: the desired exit code
"""
if not (opts.nics or opts.disks or opts.disk_template or opts.hvparams or
opts.beparams or opts.os or opts.osparams or opts.osparams_private
or opts.offline_inst or opts.online_inst or opts.runtime_mem or
opts.new_primary_node or opts.instance_communication is not None):
ToStderr("Please give at least one of the parameters.")
return 1
for param in opts.beparams:
if isinstance(opts.beparams[param], basestring):
if opts.beparams[param].lower() == "default":
opts.beparams[param] = constants.VALUE_DEFAULT
utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_COMPAT,
allowed_values=[constants.VALUE_DEFAULT])
for param in opts.hvparams:
if isinstance(opts.hvparams[param], basestring):
if opts.hvparams[param].lower() == "default":
opts.hvparams[param] = constants.VALUE_DEFAULT
utils.ForceDictType(opts.hvparams, constants.HVS_PARAMETER_TYPES,
allowed_values=[constants.VALUE_DEFAULT])
FixHvParams(opts.hvparams)
nics = _ConvertNicDiskModifications(opts.nics)
for action, _, __ in nics:
if action == constants.DDM_MODIFY and opts.hotplug and not opts.force:
usertext = ("You are about to hot-modify a NIC. This will be done"
" by removing the existing NIC and then adding a new one."
" Network connection might be lost. Continue?")
if not AskUser(usertext):
return 1
disks = _ParseDiskSizes(_ConvertNicDiskModifications(opts.disks))
# verify the user provided parameters for disk template conversions
if opts.disk_template:
if (not opts.node and
opts.disk_template in constants.DTS_INT_MIRROR):
ToStderr("Changing the disk template to a mirrored one requires"
" specifying a secondary node")
return 1
elif (opts.ext_params and
opts.disk_template != constants.DT_EXT):
ToStderr("Specifying ExtStorage parameters requires converting"
" to the '%s' disk template" % constants.DT_EXT)
return 1
elif (not opts.ext_params and
opts.disk_template == constants.DT_EXT):
ToStderr("Provider option is missing, use either the"
" '--ext-params' or '-e' option")
return 1
if ((opts.file_driver or
opts.file_storage_dir) and
not opts.disk_template in constants.DTS_FILEBASED):
ToStderr("Specifying file-based configuration arguments requires"
" converting to a file-based disk template")
return 1
ext_params = _ParseExtStorageParams(opts.ext_params)
if opts.offline_inst:
offline = True
elif opts.online_inst:
offline = False
else:
offline = None
instance_comm = opts.instance_communication
op = opcodes.OpInstanceSetParams(instance_name=args[0],
nics=nics,
disks=disks,
hotplug=opts.hotplug,
hotplug_if_possible=opts.hotplug_if_possible,
disk_template=opts.disk_template,
ext_params=ext_params,
file_driver=opts.file_driver,
file_storage_dir=opts.file_storage_dir,
remote_node=opts.node,
pnode=opts.new_primary_node,
hvparams=opts.hvparams,
beparams=opts.beparams,
runtime_mem=opts.runtime_mem,
os_name=opts.os,
osparams=opts.osparams,
osparams_private=opts.osparams_private,
force_variant=opts.force_variant,
force=opts.force,
wait_for_sync=opts.wait_for_sync,
offline=offline,
conflicts_check=opts.conflicts_check,
ignore_ipolicy=opts.ignore_ipolicy,
instance_communication=instance_comm)
# even if here we process the result, we allow submit only
result = SubmitOrSend(op, opts)
if result:
ToStdout("Modified instance %s", args[0])
for param, data in result:
ToStdout(" - %-5s -> %s", param, data)
ToStdout("Please don't forget that most parameters take effect"
" only at the next (re)start of the instance initiated by"
" ganeti; restarting from within the instance will"
" not be enough.")
if opts.hvparams:
ToStdout("Note that changing hypervisor parameters without performing a"
" restart might lead to a crash while performing a live"
" migration. This will be addressed in future Ganeti versions.")
return 0
def ChangeGroup(opts, args):
"""Moves an instance to another group.
"""
(instance_name, ) = args
cl = GetClient()
op = opcodes.OpInstanceChangeGroup(instance_name=instance_name,
iallocator=opts.iallocator,
target_groups=opts.to,
early_release=opts.early_release)
result = SubmitOrSend(op, opts, cl=cl)
# Keep track of submitted jobs
jex = JobExecutor(cl=cl, opts=opts)
for (status, job_id) in result[constants.JOB_IDS_KEY]:
jex.AddJobId(None, status, job_id)
results = jex.GetResults()
bad_cnt = len([row for row in results if not row[0]])
if bad_cnt == 0:
ToStdout("Instance '%s' changed group successfully.", instance_name)
rcode = constants.EXIT_SUCCESS
else:
ToStdout("There were %s errors while changing group of instance '%s'.",
bad_cnt, instance_name)
rcode = constants.EXIT_FAILURE
return rcode
# multi-instance selection options
m_force_multi = cli_option("--force-multiple", dest="force_multi",
help="Do not ask for confirmation when more than"
" one instance is affected",
action="store_true", default=False)
m_pri_node_opt = cli_option("--primary", dest="multi_mode",
help="Filter by nodes (primary only)",
const=_EXPAND_NODES_PRI, action="store_const")
m_sec_node_opt = cli_option("--secondary", dest="multi_mode",
help="Filter by nodes (secondary only)",
const=_EXPAND_NODES_SEC, action="store_const")
m_node_opt = cli_option("--node", dest="multi_mode",
help="Filter by nodes (primary and secondary)",
const=_EXPAND_NODES_BOTH, action="store_const")
m_clust_opt = cli_option("--all", dest="multi_mode",
help="Select all instances in the cluster",
const=_EXPAND_CLUSTER, action="store_const")
m_inst_opt = cli_option("--instance", dest="multi_mode",
help="Filter by instance name [default]",
const=_EXPAND_INSTANCES, action="store_const")
m_node_tags_opt = cli_option("--node-tags", dest="multi_mode",
help="Filter by node tag",
const=_EXPAND_NODES_BOTH_BY_TAGS,
action="store_const")
m_pri_node_tags_opt = cli_option("--pri-node-tags", dest="multi_mode",
help="Filter by primary node tag",
const=_EXPAND_NODES_PRI_BY_TAGS,
action="store_const")
m_sec_node_tags_opt = cli_option("--sec-node-tags", dest="multi_mode",
help="Filter by secondary node tag",
const=_EXPAND_NODES_SEC_BY_TAGS,
action="store_const")
m_inst_tags_opt = cli_option("--tags", dest="multi_mode",
help="Filter by instance tag",
const=_EXPAND_INSTANCES_BY_TAGS,
action="store_const")
# this is defined separately due to readability only
add_opts = [
NOSTART_OPT,
OS_OPT,
FORCE_VARIANT_OPT,
NO_INSTALL_OPT,
IGNORE_IPOLICY_OPT,
INSTANCE_COMMUNICATION_OPT,
HELPER_STARTUP_TIMEOUT_OPT,
HELPER_SHUTDOWN_TIMEOUT_OPT,
]
commands = {
"add": (
AddInstance, [ArgHost(min=1, max=1)],
COMMON_CREATE_OPTS + add_opts,
"[...] -t disk-type -n node[:secondary-node] -o os-type <name>",
"Creates and adds a new instance to the cluster"),
"batch-create": (
BatchCreate, [ArgFile(min=1, max=1)],
[DRY_RUN_OPT, PRIORITY_OPT, IALLOCATOR_OPT] + SUBMIT_OPTS,
"<instances.json>",
"Create a bunch of instances based on specs in the file."),
"console": (
ConnectToInstanceConsole, ARGS_ONE_INSTANCE,
[SHOWCMD_OPT, PRIORITY_OPT],
"[--show-cmd] <instance>", "Opens a console on the specified instance"),
"failover": (
FailoverInstance, ARGS_ONE_INSTANCE,
[FORCE_OPT, IGNORE_CONSIST_OPT] + SUBMIT_OPTS +
[SHUTDOWN_TIMEOUT_OPT,
DRY_RUN_OPT, PRIORITY_OPT, DST_NODE_OPT, IALLOCATOR_OPT,
IGNORE_IPOLICY_OPT, CLEANUP_OPT],
"[-f] <instance>", "Stops the instance, changes its primary node and"
" (if it was originally running) starts it on the new node"
" (the secondary for mirrored instances or any node"
" for shared storage)."),
"migrate": (
MigrateInstance, ARGS_ONE_INSTANCE,
[FORCE_OPT, NONLIVE_OPT, MIGRATION_MODE_OPT, CLEANUP_OPT, DRY_RUN_OPT,
PRIORITY_OPT, DST_NODE_OPT, IALLOCATOR_OPT, ALLOW_FAILOVER_OPT,
IGNORE_IPOLICY_OPT, IGNORE_HVVERSIONS_OPT, NORUNTIME_CHGS_OPT]
+ SUBMIT_OPTS,
"[-f] <instance>", "Migrate instance to its secondary node"
" (only for mirrored instances)"),
"move": (
MoveInstance, ARGS_ONE_INSTANCE,
[FORCE_OPT] + SUBMIT_OPTS +
[SINGLE_NODE_OPT, COMPRESS_OPT,
SHUTDOWN_TIMEOUT_OPT, DRY_RUN_OPT, PRIORITY_OPT, IGNORE_CONSIST_OPT,
IGNORE_IPOLICY_OPT],
"[-f] <instance>", "Move instance to an arbitrary node"
" (only for instances of type file and lv)"),
"info": (
ShowInstanceConfig, ARGS_MANY_INSTANCES,
[STATIC_OPT, ALL_OPT, ROMAN_OPT, PRIORITY_OPT],
"[-s] {--all | <instance>...}",
"Show information on the specified instance(s)"),
"list": (
ListInstances, ARGS_MANY_INSTANCES,
[NOHDR_OPT, SEP_OPT, USEUNITS_OPT, FIELDS_OPT, VERBOSE_OPT,
FORCE_FILTER_OPT],
"[<instance>...]",
"Lists the instances and their status. The available fields can be shown"
" using the \"list-fields\" command (see the man page for details)."
" The default field list is (in order): %s." %
utils.CommaJoin(_LIST_DEF_FIELDS),
),
"list-fields": (
ListInstanceFields, [ArgUnknown()],
[NOHDR_OPT, SEP_OPT],
"[fields...]",
"Lists all available fields for instances"),
"reinstall": (
ReinstallInstance, [ArgInstance()],
[FORCE_OPT, OS_OPT, FORCE_VARIANT_OPT, m_force_multi, m_node_opt,
m_pri_node_opt, m_sec_node_opt, m_clust_opt, m_inst_opt, m_node_tags_opt,
m_pri_node_tags_opt, m_sec_node_tags_opt, m_inst_tags_opt, SELECT_OS_OPT]
+ SUBMIT_OPTS + [DRY_RUN_OPT, PRIORITY_OPT, OSPARAMS_OPT,
OSPARAMS_PRIVATE_OPT, OSPARAMS_SECRET_OPT],
"[-f] <instance>", "Reinstall a stopped instance"),
"remove": (
RemoveInstance, ARGS_ONE_INSTANCE,
[FORCE_OPT, SHUTDOWN_TIMEOUT_OPT, IGNORE_FAILURES_OPT] + SUBMIT_OPTS
+ [DRY_RUN_OPT, PRIORITY_OPT],
"[-f] <instance>", "Shuts down the instance and removes it"),
"rename": (
RenameInstance,
[ArgInstance(min=1, max=1), ArgHost(min=1, max=1)],
[NOIPCHECK_OPT, NONAMECHECK_OPT] + SUBMIT_OPTS
+ [DRY_RUN_OPT, PRIORITY_OPT],
"<instance> <new_name>", "Rename the instance"),
"replace-disks": (
ReplaceDisks, ARGS_ONE_INSTANCE,
[AUTO_REPLACE_OPT, DISKIDX_OPT, IALLOCATOR_OPT, EARLY_RELEASE_OPT,
NEW_SECONDARY_OPT, ON_PRIMARY_OPT, ON_SECONDARY_OPT] + SUBMIT_OPTS
+ [DRY_RUN_OPT, PRIORITY_OPT, IGNORE_IPOLICY_OPT],
"[-s|-p|-a|-n NODE|-I NAME] <instance>",
"Replaces disks for the instance"),
"modify": (
SetInstanceParams, ARGS_ONE_INSTANCE,
[BACKEND_OPT, DISK_OPT, FORCE_OPT, HVOPTS_OPT, NET_OPT] + SUBMIT_OPTS +
[DISK_TEMPLATE_OPT, SINGLE_NODE_OPT, OS_OPT, FORCE_VARIANT_OPT,
OSPARAMS_OPT, OSPARAMS_PRIVATE_OPT, DRY_RUN_OPT, PRIORITY_OPT, NWSYNC_OPT,
OFFLINE_INST_OPT, ONLINE_INST_OPT, IGNORE_IPOLICY_OPT, RUNTIME_MEM_OPT,
NOCONFLICTSCHECK_OPT, NEW_PRIMARY_OPT, HOTPLUG_OPT,
HOTPLUG_IF_POSSIBLE_OPT, INSTANCE_COMMUNICATION_OPT,
EXT_PARAMS_OPT, FILESTORE_DRIVER_OPT, FILESTORE_DIR_OPT],
"<instance>", "Alters the parameters of an instance"),
"shutdown": (
GenericManyOps("shutdown", _ShutdownInstance), [ArgInstance()],
[FORCE_OPT, m_node_opt, m_pri_node_opt, m_sec_node_opt, m_clust_opt,
m_node_tags_opt, m_pri_node_tags_opt, m_sec_node_tags_opt,
m_inst_tags_opt, m_inst_opt, m_force_multi, TIMEOUT_OPT] + SUBMIT_OPTS
+ [DRY_RUN_OPT, PRIORITY_OPT, IGNORE_OFFLINE_OPT, NO_REMEMBER_OPT],
"<instance>", "Stops an instance"),
"startup": (
GenericManyOps("startup", _StartupInstance), [ArgInstance()],
[FORCE_OPT, m_force_multi, m_node_opt, m_pri_node_opt, m_sec_node_opt,
m_node_tags_opt, m_pri_node_tags_opt, m_sec_node_tags_opt,
m_inst_tags_opt, m_clust_opt, m_inst_opt] + SUBMIT_OPTS +
[HVOPTS_OPT,
BACKEND_OPT, DRY_RUN_OPT, PRIORITY_OPT, IGNORE_OFFLINE_OPT,
NO_REMEMBER_OPT, STARTUP_PAUSED_OPT],
"<instance>", "Starts an instance"),
"reboot": (
GenericManyOps("reboot", _RebootInstance), [ArgInstance()],
[m_force_multi, REBOOT_TYPE_OPT, IGNORE_SECONDARIES_OPT, m_node_opt,
m_pri_node_opt, m_sec_node_opt, m_clust_opt, m_inst_opt] + SUBMIT_OPTS +
[m_node_tags_opt, m_pri_node_tags_opt, m_sec_node_tags_opt,
m_inst_tags_opt, SHUTDOWN_TIMEOUT_OPT, DRY_RUN_OPT, PRIORITY_OPT],
"<instance>", "Reboots an instance"),
"activate-disks": (
ActivateDisks, ARGS_ONE_INSTANCE,
SUBMIT_OPTS + [IGNORE_SIZE_OPT, PRIORITY_OPT, WFSYNC_OPT],
"<instance>", "Activate an instance's disks"),
"deactivate-disks": (
DeactivateDisks, ARGS_ONE_INSTANCE,
[FORCE_OPT] + SUBMIT_OPTS + [DRY_RUN_OPT, PRIORITY_OPT],
"[-f] <instance>", "Deactivate an instance's disks"),
"recreate-disks": (
RecreateDisks, ARGS_ONE_INSTANCE,
SUBMIT_OPTS +
[DISK_OPT, NODE_PLACEMENT_OPT, DRY_RUN_OPT, PRIORITY_OPT,
IALLOCATOR_OPT],
"<instance>", "Recreate an instance's disks"),
"grow-disk": (
GrowDisk,
[ArgInstance(min=1, max=1), ArgUnknown(min=1, max=1),
ArgUnknown(min=1, max=1)],
SUBMIT_OPTS + [NWSYNC_OPT, DRY_RUN_OPT, PRIORITY_OPT, ABSOLUTE_OPT],
"<instance> <disk> <size>", "Grow an instance's disk"),
"change-group": (
ChangeGroup, ARGS_ONE_INSTANCE,
[TO_GROUP_OPT, IALLOCATOR_OPT, EARLY_RELEASE_OPT, PRIORITY_OPT]
+ SUBMIT_OPTS,
"[-I <iallocator>] [--to <group>]", "Change group of instance"),
"list-tags": (
ListTags, ARGS_ONE_INSTANCE, [],
"<instance_name>", "List the tags of the given instance"),
"add-tags": (
AddTags, [ArgInstance(min=1, max=1), ArgUnknown()],
[TAG_SRC_OPT, PRIORITY_OPT] + SUBMIT_OPTS,
"<instance_name> tag...", "Add tags to the given instance"),
"remove-tags": (
RemoveTags, [ArgInstance(min=1, max=1), ArgUnknown()],
[TAG_SRC_OPT, PRIORITY_OPT] + SUBMIT_OPTS,
"<instance_name> tag...", "Remove tags from given instance"),
}
#: dictionary with aliases for commands
aliases = {
"start": "startup",
"stop": "shutdown",
"show": "info",
}
def Main():
return GenericMain(commands, aliases=aliases,
override={"tag_type": constants.TAG_INSTANCE},
env_override=_ENV_OVERRIDE)
| bsd-2-clause |
gannetson/django | tests/migrate_signals/tests.py | 324 | 3585 | from django.apps import apps
from django.core import management
from django.db.models import signals
from django.test import TestCase, override_settings
from django.utils import six
APP_CONFIG = apps.get_app_config('migrate_signals')
PRE_MIGRATE_ARGS = ['app_config', 'verbosity', 'interactive', 'using']
MIGRATE_DATABASE = 'default'
MIGRATE_VERBOSITY = 1
MIGRATE_INTERACTIVE = False
class PreMigrateReceiver(object):
def __init__(self):
self.call_counter = 0
self.call_args = None
def __call__(self, signal, sender, **kwargs):
self.call_counter = self.call_counter + 1
self.call_args = kwargs
class OneTimeReceiver(object):
"""
Special receiver for handle the fact that test runner calls migrate for
several databases and several times for some of them.
"""
def __init__(self):
self.call_counter = 0
self.call_args = None
def __call__(self, signal, sender, **kwargs):
# Although test runner calls migrate for several databases,
# testing for only one of them is quite sufficient.
if kwargs['using'] == MIGRATE_DATABASE:
self.call_counter = self.call_counter + 1
self.call_args = kwargs
# we need to test only one call of migrate
signals.pre_migrate.disconnect(pre_migrate_receiver, sender=APP_CONFIG)
# We connect receiver here and not in unit test code because we need to
# connect receiver before test runner creates database. That is, sequence of
# actions would be:
#
# 1. Test runner imports this module.
# 2. We connect receiver.
# 3. Test runner calls migrate for create default database.
# 4. Test runner execute our unit test code.
pre_migrate_receiver = OneTimeReceiver()
signals.pre_migrate.connect(pre_migrate_receiver, sender=APP_CONFIG)
class MigrateSignalTests(TestCase):
available_apps = ['migrate_signals']
def test_pre_migrate_call_time(self):
self.assertEqual(pre_migrate_receiver.call_counter, 1)
def test_pre_migrate_args(self):
r = PreMigrateReceiver()
signals.pre_migrate.connect(r, sender=APP_CONFIG)
management.call_command('migrate', database=MIGRATE_DATABASE,
verbosity=MIGRATE_VERBOSITY, interactive=MIGRATE_INTERACTIVE,
stdout=six.StringIO())
args = r.call_args
self.assertEqual(r.call_counter, 1)
self.assertEqual(set(args), set(PRE_MIGRATE_ARGS))
self.assertEqual(args['app_config'], APP_CONFIG)
self.assertEqual(args['verbosity'], MIGRATE_VERBOSITY)
self.assertEqual(args['interactive'], MIGRATE_INTERACTIVE)
self.assertEqual(args['using'], 'default')
@override_settings(MIGRATION_MODULES={'migrate_signals': 'migrate_signals.custom_migrations'})
def test_pre_migrate_migrations_only(self):
"""
If all apps have migrations, pre_migrate should be sent.
"""
r = PreMigrateReceiver()
signals.pre_migrate.connect(r, sender=APP_CONFIG)
stdout = six.StringIO()
management.call_command('migrate', database=MIGRATE_DATABASE,
verbosity=MIGRATE_VERBOSITY, interactive=MIGRATE_INTERACTIVE,
stdout=stdout)
args = r.call_args
self.assertEqual(r.call_counter, 1)
self.assertEqual(set(args), set(PRE_MIGRATE_ARGS))
self.assertEqual(args['app_config'], APP_CONFIG)
self.assertEqual(args['verbosity'], MIGRATE_VERBOSITY)
self.assertEqual(args['interactive'], MIGRATE_INTERACTIVE)
self.assertEqual(args['using'], 'default')
| bsd-3-clause |
jwlawson/tensorflow | tensorflow/contrib/timeseries/python/timeseries/model_utils.py | 70 | 4017 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper functions for training and constructing time series Models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy
from tensorflow.contrib.timeseries.python.timeseries import feature_keys
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variable_scope
# TODO(agarwal): Remove and replace with functionality from tf.slim
def fully_connected(inp,
inp_size,
layer_size,
name,
activation=nn_ops.relu,
dtype=dtypes.float32):
"""Helper method to create a fully connected hidden layer."""
wt = variable_scope.get_variable(
name="{}_weight".format(name), shape=[inp_size, layer_size], dtype=dtype)
bias = variable_scope.get_variable(
name="{}_bias".format(name),
shape=[layer_size],
initializer=init_ops.zeros_initializer())
output = nn_ops.xw_plus_b(inp, wt, bias)
if activation is not None:
assert callable(activation)
output = activation(output)
return output
def parameter_switch(parameter_overrides):
"""Create a function which chooses between overridden and model parameters.
Args:
parameter_overrides: A dictionary with explicit overrides of model
parameters, mapping from Tensors to their overridden values.
Returns:
A function which takes a Tensor and returns the override if it is specified,
or otherwise the evaluated value (given current Variable values).
"""
def get_passed_or_trained_value(parameter):
return ops.convert_to_tensor(
parameter_overrides.get(parameter, parameter)).eval()
return get_passed_or_trained_value
def canonicalize_times_or_steps_from_output(times, steps,
previous_model_output):
"""Canonicalizes either relative or absolute times, with error checking."""
if steps is not None and times is not None:
raise ValueError("Only one of `steps` and `times` may be specified.")
if steps is None and times is None:
raise ValueError("One of `steps` and `times` must be specified.")
if times is not None:
times = numpy.array(times)
if len(times.shape) != 2:
times = times[None, ...]
if (previous_model_output[feature_keys.FilteringResults.TIMES].shape[0] !=
times.shape[0]):
raise ValueError(
("`times` must have a batch dimension matching"
" the previous model output (got a batch dimension of {} for `times`"
" and {} for the previous model output).").format(
times.shape[0], previous_model_output[
feature_keys.FilteringResults.TIMES].shape[0]))
if not (previous_model_output[feature_keys.FilteringResults.TIMES][:, -1] <
times[:, 0]).all():
raise ValueError("Prediction times must be after the corresponding "
"previous model output.")
if steps is not None:
predict_times = (
previous_model_output[feature_keys.FilteringResults.TIMES][:, -1:] + 1 +
numpy.arange(steps)[None, ...])
else:
predict_times = times
return predict_times
| apache-2.0 |
jaredly/pyjamas | library/pyjamas/ui/Tree.py | 4 | 13256 | # Copyright 2006 James Tauber and contributors
# Copyright (C) 2009 Luke Kenneth Casson Leighton <lkcl@lkcl.net>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pyjamas import DOM
from pyjamas import Factory
from __pyjamas__ import console
from sets import Set
import pygwt
from Widget import Widget
from pyjamas.ui import Event
from pyjamas.ui import Focus
from TreeItem import RootTreeItem, TreeItem
from pyjamas.ui import MouseListener
from pyjamas.ui import KeyboardListener
from pyjamas.ui import FocusListener
class Tree(Widget):
def __init__(self, **kwargs):
if not kwargs.has_key('StyleName'): kwargs['StyleName']="gwt-Tree"
self.root = None
self.childWidgets = Set()
self.curSelection = None
self.focusable = None
self.focusListeners = []
self.mouseListeners = []
self.imageBase = pygwt.getModuleBaseURL()
self.keyboardListeners = []
self.listeners = []
self.lastEventType = ""
if kwargs.has_key('Element'):
element = kwargs.pop('Element')
else:
element = DOM.createDiv()
self.setElement(element)
DOM.setStyleAttribute(self.getElement(), "position", "relative")
self.focusable = Focus.createFocusable()
# Hide focus outline in Mozilla/Webkit/Opera
DOM.setStyleAttribute(self.focusable, "outline", "0px")
# Hide focus outline in IE 6/7
DOM.setElemAttribute(self.focusable, "hideFocus", "true");
DOM.setStyleAttribute(self.focusable, "fontSize", "0")
DOM.setStyleAttribute(self.focusable, "position", "absolute")
DOM.setIntStyleAttribute(self.focusable, "zIndex", -1)
DOM.appendChild(self.getElement(), self.focusable)
self.root = RootTreeItem()
self.root.setTree(self)
Widget.__init__(self, **kwargs)
self.sinkEvents(Event.ONMOUSEDOWN | Event.ONCLICK | Event.KEYEVENTS)
#DOM.sinkEvents(self.focusable, Event.FOCUSEVENTS | Event.KEYEVENTS | DOM.getEventsSunk(self.focusable))
DOM.sinkEvents(self.focusable, Event.FOCUSEVENTS)
def add(self, widget):
self.addItem(widget)
def addFocusListener(self, listener):
self.focusListeners.append(listener)
def addItem(self, item):
return self.insertItem(item)
def insertItem(self, item, index=None):
if isinstance(item, str):
item = TreeItem(item)
ret = self.root.addItem(item)
if index is None:
DOM.appendChild(self.getElement(), item.getElement())
else:
DOM.insertChild(self.getElement(), item.getElement(), index)
return ret
def addKeyboardListener(self, listener):
self.keyboardListeners.append(listener)
def addMouseListener(self, listener):
self.mouseListeners.append(listener)
def addTreeListener(self, listener):
self.listeners.append(listener)
def clear(self):
size = self.root.getChildCount()
for i in range(size, 0, -1):
self.root.getChild(i-1).remove()
def ensureSelectedItemVisible(self):
if self.curSelection is None:
return
parent = self.curSelection.getParentItem()
while parent is not None:
parent.setState(True)
parent = parent.getParentItem()
def getImageBase(self):
return self.imageBase
def getItem(self, index):
return self.root.getChild(index)
def getItemCount(self):
return self.root.getChildCount()
def getSelectedItem(self):
return self.curSelection
def getTabIndex(self):
return Focus.getTabIndex(self.focusable)
def __iter__(self):
return self.childWidgets.__iter__()
def onBrowserEvent(self, event):
type = DOM.eventGetType(event)
if type == "click":
e = DOM.eventGetTarget(event)
if not self.shouldTreeDelegateFocusToElement(e):
self.setFocus(True)
elif type == "mousedown":
MouseListener.fireMouseEvent(self.mouseListeners, self, event)
self.elementClicked(self.root, DOM.eventGetTarget(event))
elif type == "mouseup" or type == "mousemove" or type == "mouseover" or type == "mouseout":
MouseListener.fireMouseEvent(self.mouseListeners, self, event)
elif type == "blur" or type == "focus":
FocusListener.fireFocusEvent(self.focusListeners, self, event)
elif type == "keydown":
if self.curSelection is None:
if self.root.getChildCount() > 0:
self.onSelection(self.root.getChild(0), True)
Widget.onBrowserEvent(self, event)
return
if self.lastEventType == "keydown":
return
keycode = DOM.eventGetKeyCode(event)
if keycode == KeyboardListener.KEY_UP:
self.moveSelectionUp(self.curSelection, True)
DOM.eventPreventDefault(event)
elif keycode == KeyboardListener.KEY_DOWN:
self.moveSelectionDown(self.curSelection, True)
DOM.eventPreventDefault(event)
elif keycode == KeyboardListener.KEY_LEFT:
if self.curSelection.getState():
self.curSelection.setState(False)
DOM.eventPreventDefault(event)
elif keycode == KeyboardListener.KEY_RIGHT:
if not self.curSelection.getState():
self.curSelection.setState(True)
DOM.eventPreventDefault(event)
elif type == "keyup":
if DOM.eventGetKeyCode(event) == KeyboardListener.KEY_TAB:
chain = []
self.collectElementChain(chain, self.getElement(), DOM.eventGetTarget(event))
item = self.findItemByChain(chain, 0, self.root)
if item != self.getSelectedItem():
self.setSelectedItem(item, True)
elif type == "keypress":
KeyboardListener.fireKeyboardEvent(self.keyboardListeners, self, event)
Widget.onBrowserEvent(self, event)
self.lastEventType = type
def remove(self, widget):
#throw new UnsupportedOperationException("Widgets should never be directly removed from a tree")
console.error("Widgets should never be directly removed from a tree")
def removeFocusListener(self, listener):
self.focusListeners.remove(listener)
def removeItem(self, item):
self.root.removeItem(item)
DOM.removeChild(self.getElement(), item.getElement())
def removeItems(self):
while self.getItemCount() > 0:
self.removeItem(self.getItem(0))
def removeKeyboardListener(self, listener):
self.keyboardListeners.remove(listener)
def removeTreeListener(self, listener):
self.listeners.remove(listener)
def setAccessKey(self, key):
Focus.setAccessKey(self.focusable, key)
def setFocus(self, focus):
if focus:
Focus.focus(self.focusable)
else:
Focus.blur(self.focusable)
def setImageBase(self, baseUrl):
self.imageBase = baseUrl
self.root.updateStateRecursive()
def setSelectedItem(self, item, fireEvents=True):
if item is None:
if self.curSelection is None:
return
self.curSelection.setSelected(False)
self.curSelection = None
return
self.onSelection(item, fireEvents)
def setTabIndex(self, index):
Focus.setTabIndex(self.focusable, index)
def treeItemIterator(self):
accum = []
self.root.addTreeItems(accum)
return accum.__iter__()
def collectElementChain(self, chain, hRoot, hElem):
if (hElem is None) or DOM.compare(hElem, hRoot):
return
self.collectElementChain(chain, hRoot, DOM.getParent(hElem))
chain.append(hElem)
def elementClicked(self, root, hElem):
chain = []
self.collectElementChain(chain, self.getElement(), hElem)
item = self.findItemByChain(chain, 0, root)
if item is not None:
if DOM.compare(item.getImageElement(), hElem):
item.setState(not item.getState(), True)
return True
elif DOM.isOrHasChild(item.getElement(), hElem):
self.onSelection(item, True)
return True
return False
def findDeepestOpenChild(self, item):
if not item.getState():
return item
return self.findDeepestOpenChild(item.getChild(item.getChildCount() - 1))
def findItemByChain(self, chain, idx, root):
if idx == len(chain):
return root
hCurElem = chain[idx]
for i in range(root.getChildCount()):
child = root.getChild(i)
if DOM.compare(child.getElement(), hCurElem):
retItem = self.findItemByChain(chain, idx + 1, root.getChild(i))
if retItem is None:
return child
return retItem
return self.findItemByChain(chain, idx + 1, root)
def moveFocus(self, selection):
focusableWidget = selection.getFocusableWidget()
if focusableWidget is not None:
focusableWidget.setFocus(True)
DOM.scrollIntoView(focusableWidget.getElement())
else:
selectedElem = selection.getContentElem()
containerLeft = self.getAbsoluteLeft()
containerTop = self.getAbsoluteTop()
left = DOM.getAbsoluteLeft(selectedElem) - containerLeft
top = DOM.getAbsoluteTop(selectedElem) - containerTop
width = DOM.getIntAttribute(selectedElem, "offsetWidth")
height = DOM.getIntAttribute(selectedElem, "offsetHeight")
DOM.setIntStyleAttribute(self.focusable, "left", left)
DOM.setIntStyleAttribute(self.focusable, "top", top)
DOM.setIntStyleAttribute(self.focusable, "width", width)
DOM.setIntStyleAttribute(self.focusable, "height", height)
DOM.scrollIntoView(self.focusable)
Focus.focus(self.focusable)
def moveSelectionDown(self, sel, dig):
if sel == self.root:
return
parent = sel.getParentItem()
if parent is None:
parent = self.root
idx = parent.getChildIndex(sel)
if not dig or not sel.getState():
if idx < parent.getChildCount() - 1:
self.onSelection(parent.getChild(idx + 1), True)
else:
self.moveSelectionDown(parent, False)
elif sel.getChildCount() > 0:
self.onSelection(sel.getChild(0), True)
def moveSelectionUp(self, sel, climb):
parent = sel.getParentItem()
if parent is None:
parent = self.root
idx = parent.getChildIndex(sel)
if idx > 0:
sibling = parent.getChild(idx - 1)
self.onSelection(self.findDeepestOpenChild(sibling), True)
else:
self.onSelection(parent, True)
def onSelection(self, item, fireEvents):
if item == self.root:
return
if self.curSelection is not None:
self.curSelection.setSelected(False)
self.curSelection = item
if self.curSelection is not None:
self.moveFocus(self.curSelection)
self.curSelection.setSelected(True)
if fireEvents and len(self.listeners):
for listener in self.listeners:
listener.onTreeItemSelected(item)
def doAttachChildren(self):
for child in self:
child.onAttach()
DOM.setEventListener(self.focusable, self);
def doDetachChildren(self):
for child in self:
child.onDetach()
DOM.setEventListener(self.focusable, None);
def onLoad(self):
self.root.updateStateRecursive()
def adopt(self, content):
self.childWidgets.add(content)
content.treeSetParent(self)
def disown(self, item):
self.childWidgets.remove(item)
item.treeSetParent(None)
def fireStateChanged(self, item):
for listener in self.listeners:
if hasattr(listener, "onTreeItemStateChanged"):
listener.onTreeItemStateChanged(item)
def getChildWidgets(self):
return self.childWidgets
def shouldTreeDelegateFocusToElement(self, elem):
name = str(elem.nodeName)
name = name.lower()
return name == 'select' or\
name == 'input' or\
name == 'checkbox'
Factory.registerClass('pyjamas.ui.Tree', Tree)
| apache-2.0 |
hanselke/erpnext-1 | erpnext/patches/v4_0/countrywise_coa.py | 119 | 1034 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
def execute():
frappe.reload_doc("setup", 'doctype', "company")
frappe.reload_doc("accounts", 'doctype', "account")
frappe.db.sql("""update tabAccount set account_type='Cash'
where account_type='Bank or Cash' and account_name in ('Cash', 'Cash In Hand')""")
frappe.db.sql("""update tabAccount set account_type='Stock'
where account_name = 'Stock Assets'""")
ac_types = {"Fixed Asset Account": "Fixed Asset", "Bank or Cash": "Bank"}
for old, new in ac_types.items():
frappe.db.sql("""update tabAccount set account_type=%s
where account_type=%s""", (new, old))
try:
frappe.db.sql("""update `tabAccount` set report_type =
if(is_pl_account='Yes', 'Profit and Loss', 'Balance Sheet')""")
frappe.db.sql("""update `tabAccount` set balance_must_be=debit_or_credit
where ifnull(allow_negative_balance, 0) = 0""")
except:
pass
| agpl-3.0 |
taotie12010/bigfour | common/lib/xmodule/xmodule/tests/test_split_test_module.py | 34 | 23231 | """
Tests for the Split Testing Module
"""
import ddt
import lxml
from mock import Mock, patch
from fs.memoryfs import MemoryFS
from xmodule.partitions.tests.test_partitions import StaticPartitionService, PartitionTestCase, MockUserPartitionScheme
from xmodule.tests.xml import factories as xml
from xmodule.tests.xml import XModuleXmlImportTest
from xmodule.tests import get_test_system
from xmodule.x_module import AUTHOR_VIEW, STUDENT_VIEW
from xmodule.validation import StudioValidationMessage
from xmodule.split_test_module import SplitTestDescriptor, SplitTestFields, get_split_user_partitions
from xmodule.partitions.partitions import Group, UserPartition
class SplitTestModuleFactory(xml.XmlImportFactory):
"""
Factory for generating SplitTestModules for testing purposes
"""
tag = 'split_test'
class SplitTestUtilitiesTest(PartitionTestCase):
"""
Tests for utility methods related to split_test module.
"""
def test_split_user_partitions(self):
"""
Tests the get_split_user_partitions helper method.
"""
first_random_partition = UserPartition(
0, 'first_partition', 'First Partition', [Group("0", 'alpha'), Group("1", 'beta')],
self.random_scheme
)
second_random_partition = UserPartition(
0, 'second_partition', 'Second Partition', [Group("4", 'zeta'), Group("5", 'omega')],
self.random_scheme
)
all_partitions = [
first_random_partition,
# Only UserPartitions with scheme "random" will be returned as available options.
UserPartition(
1, 'non_random_partition', 'Will Not Be Returned', [Group("1", 'apple'), Group("2", 'banana')],
self.non_random_scheme
),
second_random_partition
]
self.assertEqual(
[first_random_partition, second_random_partition],
get_split_user_partitions(all_partitions)
)
class SplitTestModuleTest(XModuleXmlImportTest, PartitionTestCase):
"""
Base class for all split_module tests.
"""
def setUp(self):
super(SplitTestModuleTest, self).setUp()
self.course_id = 'test_org/test_course_number/test_run'
# construct module
course = xml.CourseFactory.build()
sequence = xml.SequenceFactory.build(parent=course)
split_test = SplitTestModuleFactory(
parent=sequence,
attribs={
'user_partition_id': '0',
'group_id_to_child': '{"0": "i4x://edX/xml_test_course/html/split_test_cond0", "1": "i4x://edX/xml_test_course/html/split_test_cond1"}'
}
)
xml.HtmlFactory(parent=split_test, url_name='split_test_cond0', text='HTML FOR GROUP 0')
xml.HtmlFactory(parent=split_test, url_name='split_test_cond1', text='HTML FOR GROUP 1')
self.course = self.process_xml(course)
self.course_sequence = self.course.get_children()[0]
self.module_system = get_test_system()
self.module_system.descriptor_runtime = self.course._runtime # pylint: disable=protected-access
self.course.runtime.export_fs = MemoryFS()
user = Mock(username='ma', email='ma@edx.org', is_staff=False, is_active=True)
self.partitions_service = StaticPartitionService(
[
self.user_partition,
UserPartition(
1, 'second_partition', 'Second Partition',
[Group("0", 'abel'), Group("1", 'baker'), Group("2", 'charlie')],
MockUserPartitionScheme()
)
],
user=user,
course_id=self.course.id,
track_function=Mock(name='track_function'),
)
self.module_system._services['partitions'] = self.partitions_service # pylint: disable=protected-access
self.split_test_module = self.course_sequence.get_children()[0]
self.split_test_module.bind_for_student(
self.module_system,
user.id
)
@ddt.ddt
class SplitTestModuleLMSTest(SplitTestModuleTest):
"""
Test the split test module
"""
@ddt.data((0, 'split_test_cond0'), (1, 'split_test_cond1'))
@ddt.unpack
def test_child(self, user_tag, child_url_name):
self.user_partition.scheme.current_group = self.user_partition.groups[user_tag] # pylint: disable=no-member
self.assertEquals(self.split_test_module.child_descriptor.url_name, child_url_name)
@ddt.data((0, 'HTML FOR GROUP 0'), (1, 'HTML FOR GROUP 1'))
@ddt.unpack
def test_get_html(self, user_tag, child_content):
self.user_partition.scheme.current_group = self.user_partition.groups[user_tag] # pylint: disable=no-member
self.assertIn(
child_content,
self.module_system.render(self.split_test_module, STUDENT_VIEW).content
)
@ddt.data(0, 1)
def test_child_missing_tag_value(self, _user_tag):
# If user_tag has a missing value, we should still get back a valid child url
self.assertIn(self.split_test_module.child_descriptor.url_name, ['split_test_cond0', 'split_test_cond1'])
@ddt.data(100, 200, 300, 400, 500, 600, 700, 800, 900, 1000)
def test_child_persist_new_tag_value_when_tag_missing(self, _user_tag):
# If a user_tag has a missing value, a group should be saved/persisted for that user.
# So, we check that we get the same url_name when we call on the url_name twice.
# We run the test ten times so that, if our storage is failing, we'll be most likely to notice it.
self.assertEquals(self.split_test_module.child_descriptor.url_name, self.split_test_module.child_descriptor.url_name)
# Patch the definition_to_xml for the html children.
@patch('xmodule.html_module.HtmlDescriptor.definition_to_xml')
def test_export_import_round_trip(self, def_to_xml):
# The HtmlDescriptor definition_to_xml tries to write to the filesystem
# before returning an xml object. Patch this to just return the xml.
def_to_xml.return_value = lxml.etree.Element('html')
# Mock out the process_xml
# Expect it to return a child descriptor for the SplitTestDescriptor when called.
self.module_system.process_xml = Mock()
# Write out the xml.
xml_obj = self.split_test_module.definition_to_xml(MemoryFS())
self.assertEquals(xml_obj.get('user_partition_id'), '0')
self.assertIsNotNone(xml_obj.get('group_id_to_child'))
# Read the xml back in.
fields, children = SplitTestDescriptor.definition_from_xml(xml_obj, self.module_system)
self.assertEquals(fields.get('user_partition_id'), '0')
self.assertIsNotNone(fields.get('group_id_to_child'))
self.assertEquals(len(children), 2)
class SplitTestModuleStudioTest(SplitTestModuleTest):
"""
Unit tests for how split test interacts with Studio.
"""
@patch('xmodule.split_test_module.SplitTestDescriptor.group_configuration_url', return_value='http://example.com')
def test_render_author_view(self, group_configuration_url):
"""
Test the rendering of the Studio author view.
"""
def create_studio_context(root_xblock):
"""
Context for rendering the studio "author_view".
"""
return {
'reorderable_items': set(),
'root_xblock': root_xblock,
}
# The split_test module should render both its groups when it is the root
context = create_studio_context(self.split_test_module)
html = self.module_system.render(self.split_test_module, AUTHOR_VIEW, context).content
self.assertIn('HTML FOR GROUP 0', html)
self.assertIn('HTML FOR GROUP 1', html)
# When rendering as a child, it shouldn't render either of its groups
context = create_studio_context(self.course_sequence)
html = self.module_system.render(self.split_test_module, AUTHOR_VIEW, context).content
self.assertNotIn('HTML FOR GROUP 0', html)
self.assertNotIn('HTML FOR GROUP 1', html)
# The "Create Missing Groups" button should be rendered when groups are missing
context = create_studio_context(self.split_test_module)
self.split_test_module.user_partitions = [
UserPartition(0, 'first_partition', 'First Partition',
[Group("0", 'alpha'), Group("1", 'beta'), Group("2", 'gamma')])
]
html = self.module_system.render(self.split_test_module, AUTHOR_VIEW, context).content
self.assertIn('HTML FOR GROUP 0', html)
self.assertIn('HTML FOR GROUP 1', html)
def test_group_configuration_url(self):
"""
Test creation of correct Group Configuration URL.
"""
mocked_course = Mock(advanced_modules=['split_test'])
mocked_modulestore = Mock()
mocked_modulestore.get_course.return_value = mocked_course
self.split_test_module.system.modulestore = mocked_modulestore
self.split_test_module.user_partitions = [
UserPartition(0, 'first_partition', 'First Partition', [Group("0", 'alpha'), Group("1", 'beta')])
]
expected_url = '/group_configurations/edX/xml_test_course/101#0'
self.assertEqual(expected_url, self.split_test_module.group_configuration_url)
def test_editable_settings(self):
"""
Test the setting information passed back from editable_metadata_fields.
"""
editable_metadata_fields = self.split_test_module.editable_metadata_fields
self.assertIn(SplitTestDescriptor.display_name.name, editable_metadata_fields)
self.assertNotIn(SplitTestDescriptor.due.name, editable_metadata_fields)
self.assertNotIn(SplitTestDescriptor.user_partitions.name, editable_metadata_fields)
# user_partition_id will always appear in editable_metadata_settings, regardless
# of the selected value.
self.assertIn(SplitTestDescriptor.user_partition_id.name, editable_metadata_fields)
def test_non_editable_settings(self):
"""
Test the settings that are marked as "non-editable".
"""
non_editable_metadata_fields = self.split_test_module.non_editable_metadata_fields
self.assertIn(SplitTestDescriptor.due, non_editable_metadata_fields)
self.assertIn(SplitTestDescriptor.user_partitions, non_editable_metadata_fields)
self.assertNotIn(SplitTestDescriptor.display_name, non_editable_metadata_fields)
def test_available_partitions(self):
"""
Tests that the available partitions are populated correctly when editable_metadata_fields are called
"""
self.assertEqual([], SplitTestDescriptor.user_partition_id.values)
# user_partitions is empty, only the "Not Selected" item will appear.
self.split_test_module.user_partition_id = SplitTestFields.no_partition_selected['value']
self.split_test_module.editable_metadata_fields # pylint: disable=pointless-statement
partitions = SplitTestDescriptor.user_partition_id.values
self.assertEqual(1, len(partitions))
self.assertEqual(SplitTestFields.no_partition_selected['value'], partitions[0]['value'])
# Populate user_partitions and call editable_metadata_fields again
self.split_test_module.user_partitions = [
UserPartition(
0, 'first_partition', 'First Partition', [Group("0", 'alpha'), Group("1", 'beta')],
self.random_scheme
),
# Only UserPartitions with scheme "random" will be returned as available options.
UserPartition(
1, 'non_random_partition', 'Will Not Be Returned', [Group("1", 'apple'), Group("2", 'banana')],
self.non_random_scheme
)
]
self.split_test_module.editable_metadata_fields # pylint: disable=pointless-statement
partitions = SplitTestDescriptor.user_partition_id.values
self.assertEqual(2, len(partitions))
self.assertEqual(SplitTestFields.no_partition_selected['value'], partitions[0]['value'])
self.assertEqual(0, partitions[1]['value'])
self.assertEqual("first_partition", partitions[1]['display_name'])
# Try again with a selected partition and verify that there is no option for "No Selection"
self.split_test_module.user_partition_id = 0
self.split_test_module.editable_metadata_fields # pylint: disable=pointless-statement
partitions = SplitTestDescriptor.user_partition_id.values
self.assertEqual(1, len(partitions))
self.assertEqual(0, partitions[0]['value'])
self.assertEqual("first_partition", partitions[0]['display_name'])
# Finally try again with an invalid selected partition and verify that "No Selection" is an option
self.split_test_module.user_partition_id = 999
self.split_test_module.editable_metadata_fields # pylint: disable=pointless-statement
partitions = SplitTestDescriptor.user_partition_id.values
self.assertEqual(2, len(partitions))
self.assertEqual(SplitTestFields.no_partition_selected['value'], partitions[0]['value'])
self.assertEqual(0, partitions[1]['value'])
self.assertEqual("first_partition", partitions[1]['display_name'])
def test_active_and_inactive_children(self):
"""
Tests the active and inactive children returned for different split test configurations.
"""
split_test_module = self.split_test_module
children = split_test_module.get_children()
# Verify that a split test has no active children if it has no specified user partition.
split_test_module.user_partition_id = -1
[active_children, inactive_children] = split_test_module.active_and_inactive_children()
self.assertEqual(active_children, [])
self.assertEqual(inactive_children, children)
# Verify that all the children are returned as active for a correctly configured split_test
split_test_module.user_partition_id = 0
split_test_module.user_partitions = [
UserPartition(0, 'first_partition', 'First Partition', [Group("0", 'alpha'), Group("1", 'beta')])
]
[active_children, inactive_children] = split_test_module.active_and_inactive_children()
self.assertEqual(active_children, children)
self.assertEqual(inactive_children, [])
# Verify that a split_test does not return inactive children in the active children
self.split_test_module.user_partitions = [
UserPartition(0, 'first_partition', 'First Partition', [Group("0", 'alpha')])
]
[active_children, inactive_children] = split_test_module.active_and_inactive_children()
self.assertEqual(active_children, [children[0]])
self.assertEqual(inactive_children, [children[1]])
# Verify that a split_test ignores misconfigured children
self.split_test_module.user_partitions = [
UserPartition(0, 'first_partition', 'First Partition', [Group("0", 'alpha'), Group("2", 'gamma')])
]
[active_children, inactive_children] = split_test_module.active_and_inactive_children()
self.assertEqual(active_children, [children[0]])
self.assertEqual(inactive_children, [children[1]])
# Verify that a split_test referring to a non-existent user partition has no active children
self.split_test_module.user_partition_id = 2
[active_children, inactive_children] = split_test_module.active_and_inactive_children()
self.assertEqual(active_children, [])
self.assertEqual(inactive_children, children)
def test_validation_messages(self):
"""
Test the validation messages produced for different split test configurations.
"""
split_test_module = self.split_test_module
def verify_validation_message(message, expected_message, expected_message_type,
expected_action_class=None, expected_action_label=None,
expected_action_runtime_event=None):
"""
Verify that the validation message has the expected validation message and type.
"""
self.assertEqual(message.text, expected_message)
self.assertEqual(message.type, expected_message_type)
if expected_action_class:
self.assertEqual(message.action_class, expected_action_class)
else:
self.assertFalse(hasattr(message, "action_class"))
if expected_action_label:
self.assertEqual(message.action_label, expected_action_label)
else:
self.assertFalse(hasattr(message, "action_label"))
if expected_action_runtime_event:
self.assertEqual(message.action_runtime_event, expected_action_runtime_event)
else:
self.assertFalse(hasattr(message, "action_runtime_event"))
def verify_summary_message(general_validation, expected_message, expected_message_type):
"""
Verify that the general validation message has the expected validation message and type.
"""
self.assertEqual(general_validation.text, expected_message)
self.assertEqual(general_validation.type, expected_message_type)
# Verify the messages for an unconfigured user partition
split_test_module.user_partition_id = -1
validation = split_test_module.validate()
self.assertEqual(len(validation.messages), 0)
verify_validation_message(
validation.summary,
u"The experiment is not associated with a group configuration.",
StudioValidationMessage.NOT_CONFIGURED,
'edit-button',
u"Select a Group Configuration",
)
# Verify the messages for a correctly configured split_test
split_test_module.user_partition_id = 0
split_test_module.user_partitions = [
UserPartition(0, 'first_partition', 'First Partition', [Group("0", 'alpha'), Group("1", 'beta')])
]
validation = split_test_module.validate_split_test()
self.assertTrue(validation)
self.assertIsNone(split_test_module.general_validation_message(), None)
# Verify the messages for a split test with too few groups
split_test_module.user_partitions = [
UserPartition(0, 'first_partition', 'First Partition',
[Group("0", 'alpha'), Group("1", 'beta'), Group("2", 'gamma')])
]
validation = split_test_module.validate()
self.assertEqual(len(validation.messages), 1)
verify_validation_message(
validation.messages[0],
u"The experiment does not contain all of the groups in the configuration.",
StudioValidationMessage.ERROR,
expected_action_runtime_event='add-missing-groups',
expected_action_label=u"Add Missing Groups"
)
verify_summary_message(
validation.summary,
u"This content experiment has issues that affect content visibility.",
StudioValidationMessage.ERROR
)
# Verify the messages for a split test with children that are not associated with any group
split_test_module.user_partitions = [
UserPartition(0, 'first_partition', 'First Partition',
[Group("0", 'alpha')])
]
validation = split_test_module.validate()
self.assertEqual(len(validation.messages), 1)
verify_validation_message(
validation.messages[0],
u"The experiment has an inactive group. Move content into active groups, then delete the inactive group.",
StudioValidationMessage.WARNING
)
verify_summary_message(
validation.summary,
u"This content experiment has issues that affect content visibility.",
StudioValidationMessage.WARNING
)
# Verify the messages for a split test with both missing and inactive children
split_test_module.user_partitions = [
UserPartition(0, 'first_partition', 'First Partition',
[Group("0", 'alpha'), Group("2", 'gamma')])
]
validation = split_test_module.validate()
self.assertEqual(len(validation.messages), 2)
verify_validation_message(
validation.messages[0],
u"The experiment does not contain all of the groups in the configuration.",
StudioValidationMessage.ERROR,
expected_action_runtime_event='add-missing-groups',
expected_action_label=u"Add Missing Groups"
)
verify_validation_message(
validation.messages[1],
u"The experiment has an inactive group. Move content into active groups, then delete the inactive group.",
StudioValidationMessage.WARNING
)
# With two messages of type error and warning priority given to error.
verify_summary_message(
validation.summary,
u"This content experiment has issues that affect content visibility.",
StudioValidationMessage.ERROR
)
# Verify the messages for a split test referring to a non-existent user partition
split_test_module.user_partition_id = 2
validation = split_test_module.validate()
self.assertEqual(len(validation.messages), 1)
verify_validation_message(
validation.messages[0],
u"The experiment uses a deleted group configuration. "
u"Select a valid group configuration or delete this experiment.",
StudioValidationMessage.ERROR
)
verify_summary_message(
validation.summary,
u"This content experiment has issues that affect content visibility.",
StudioValidationMessage.ERROR
)
# Verify the message for a split test referring to a non-random user partition
split_test_module.user_partitions = [
UserPartition(
10, 'incorrect_partition', 'Non Random Partition', [Group("0", 'alpha'), Group("2", 'gamma')],
scheme=self.non_random_scheme
)
]
split_test_module.user_partition_id = 10
validation = split_test_module.validate()
self.assertEqual(len(validation.messages), 1)
verify_validation_message(
validation.messages[0],
u"The experiment uses a group configuration that is not supported for experiments. "
u"Select a valid group configuration or delete this experiment.",
StudioValidationMessage.ERROR
)
verify_summary_message(
validation.summary,
u"This content experiment has issues that affect content visibility.",
StudioValidationMessage.ERROR
)
| agpl-3.0 |
bespike/litecoin | test/functional/wallet_groups.py | 4 | 3973 | #!/usr/bin/env python3
# Copyright (c) 2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test wallet group functionality."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.messages import CTransaction, FromHex, ToHex
from test_framework.util import (
assert_equal,
)
def assert_approx(v, vexp, vspan=0.00001):
if v < vexp - vspan:
raise AssertionError("%s < [%s..%s]" % (str(v), str(vexp - vspan), str(vexp + vspan)))
if v > vexp + vspan:
raise AssertionError("%s > [%s..%s]" % (str(v), str(vexp - vspan), str(vexp + vspan)))
class WalletGroupTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.extra_args = [["-maxtxfee=1.0"], ["-maxtxfee=1.0"], ['-avoidpartialspends', '-maxtxfee=1.0']]
self.rpc_timeout = 120
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
# Mine some coins
self.nodes[0].generate(110)
# Get some addresses from the two nodes
addr1 = [self.nodes[1].getnewaddress() for i in range(3)]
addr2 = [self.nodes[2].getnewaddress() for i in range(3)]
addrs = addr1 + addr2
# Send 1 + 0.5 coin to each address
[self.nodes[0].sendtoaddress(addr, 1.0) for addr in addrs]
[self.nodes[0].sendtoaddress(addr, 0.5) for addr in addrs]
self.nodes[0].generate(1)
self.sync_all()
# For each node, send 0.2 coins back to 0;
# - node[1] should pick one 0.5 UTXO and leave the rest
# - node[2] should pick one (1.0 + 0.5) UTXO group corresponding to a
# given address, and leave the rest
txid1 = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 0.2)
tx1 = self.nodes[1].getrawtransaction(txid1, True)
# txid1 should have 1 input and 2 outputs
assert_equal(1, len(tx1["vin"]))
assert_equal(2, len(tx1["vout"]))
# one output should be 0.2, the other should be ~0.3
v = [vout["value"] for vout in tx1["vout"]]
v.sort()
assert_approx(v[0], 0.2)
assert_approx(v[1], 0.3, 0.001)
txid2 = self.nodes[2].sendtoaddress(self.nodes[0].getnewaddress(), 0.2)
tx2 = self.nodes[2].getrawtransaction(txid2, True)
# txid2 should have 2 inputs and 2 outputs
assert_equal(2, len(tx2["vin"]))
assert_equal(2, len(tx2["vout"]))
# one output should be 0.2, the other should be ~1.3
v = [vout["value"] for vout in tx2["vout"]]
v.sort()
assert_approx(v[0], 0.2)
assert_approx(v[1], 1.3, 0.001)
# Empty out node2's wallet
self.nodes[2].sendtoaddress(address=self.nodes[0].getnewaddress(), amount=self.nodes[2].getbalance(), subtractfeefromamount=True)
self.sync_all()
self.nodes[0].generate(1)
# Fill node2's wallet with 10000 outputs corresponding to the same
# scriptPubKey
for i in range(5):
raw_tx = self.nodes[0].createrawtransaction([{"txid":"0"*64, "vout":0}], [{addr2[0]: 0.05}])
tx = FromHex(CTransaction(), raw_tx)
tx.vin = []
tx.vout = [tx.vout[0]] * 2000
funded_tx = self.nodes[0].fundrawtransaction(ToHex(tx))
signed_tx = self.nodes[0].signrawtransactionwithwallet(funded_tx['hex'])
self.nodes[0].sendrawtransaction(signed_tx['hex'])
self.nodes[0].generate(1)
self.sync_all()
# Check that we can create a transaction that only requires ~100 of our
# utxos, without pulling in all outputs and creating a transaction that
# is way too big.
assert self.nodes[2].sendtoaddress(address=addr2[0], amount=5)
if __name__ == '__main__':
WalletGroupTest().main ()
| mit |
danieltt/nox | src/utilities/switch_command.py | 16 | 2438 | #!/usr/bin/python
#
# Send arbitrary command to a switch
#
import getopt,sys,os
import httplib
import simplejson
import urllib
# TODO: need to set the path for this
from nox.webapps.webserviceclient.simple import PersistentLogin, NOXWSClient
def usage():
print """
Usage:
switch_command.py -d <directory name> -s <switch name> -c <command>
[-u <admin username>] [-p <admin passwd>]
[args]
e.g. switch_command -d Built-in -s foo -c restart
Note: accepts mangled switch names
"""
if __name__ == '__main__':
sys.path.append('/opt/nox/bin')
try:
opts, args = getopt.getopt(sys.argv[1:], "hd:s:c:u:p:")
except getopt.GetoptError, err:
# print help information and exit:
print str(err) # will print something like "option -a not recognized"
usage()
sys.exit(2)
directory = None
switch = None
command = None
adminu = "admin"
adminp = "admin"
for o, a in opts:
if o == "-h":
usage()
sys.exit()
elif o == '-d':
directory = a
elif o == '-s':
switch = a
try:
if switch.find(';') != -1:
directory = switch.split(';')[0]
switch = switch.split(';')[0]
except Exception, e:
print 'Format error in mangled name',switch
sys.exit()
elif o == '-c':
command = a
elif o == '-u':
adminu = a
elif o == '-p':
adminp = a
else:
assert False, "unhandled option"
if not directory or not switch or not command:
usage()
sys.exit()
print ' Logging into web service.. ',
loginmgr = PersistentLogin("admin","admin")
# currently only support localhost
wsc = NOXWSClient("127.0.0.1", 443, True, loginmgr)
print 'done'
urlstr = '/ws.v1/switch/'+directory+'/'+switch+'/command'
print ' Issuing:'
print '\t',urlstr
url = urllib.quote(urlstr)
d = {}
d['command'] = command
d['args'] = args
headers = {}
headers["content-type"] = "application/json"
response = wsc.put(url, headers, simplejson.dumps(d))
body = response.getBody()
if body == '0':
print 'Command sent succesfully'
else:
print 'Error: ',body
| gpl-3.0 |
pradeep-gr/mbed-os5-onsemi | tools/test/config/config_test.py | 29 | 4502 | """
mbed SDK
Copyright (c) 2016 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os.path
import unittest
from mock import patch
from tools.config import Config
"""
Tests for config.py
"""
class ConfigTests(unittest.TestCase):
"""
Test cases for Config class
"""
def setUp(self):
"""
Called before each test case
:return:
"""
self.target = "K64F"
def tearDown(self):
"""
Called after each test case
:return:
"""
pass
@patch.object(Config, '_process_config_and_overrides')
@patch('tools.config.json_file_to_dict')
def test_init_app_config(self, mock_json_file_to_dict, _):
"""
Test that the initialisation correctly uses app_config
:param mock_json_file_to_dict: mock of function json_file_to_dict
:param _: mock of function _process_config_and_overrides (not tested)
:return:
"""
app_config = "app_config"
mock_return = {'config': 'test'}
mock_json_file_to_dict.return_value = mock_return
config = Config(self.target, app_config=app_config)
mock_json_file_to_dict.assert_called_with(app_config)
self.assertEqual(config.app_config_data, mock_return,
"app_config_data should be set to the returned value")
@patch.object(Config, '_process_config_and_overrides')
@patch('tools.config.json_file_to_dict')
def test_init_no_app_config(self, mock_json_file_to_dict, _):
"""
Test that the initialisation works without app config
:param mock_json_file_to_dict: mock of function json_file_to_dict
:param _: patch of function _process_config_and_overrides (not tested)
:return:
"""
config = Config(self.target)
mock_json_file_to_dict.assert_not_called()
self.assertEqual(config.app_config_data, {},
"app_config_data should be set an empty dictionary")
@patch.object(Config, '_process_config_and_overrides')
@patch('os.path.isfile')
@patch('tools.config.json_file_to_dict')
def test_init_no_app_config_with_dir(self, mock_json_file_to_dict, mock_isfile, _):
"""
Test that the initialisation works without app config and with a
specified top level directory
:param mock_json_file_to_dict: mock of function json_file_to_dict
:param _: patch of function _process_config_and_overrides (not tested)
:return:
"""
directory = '.'
path = os.path.join('.', 'mbed_app.json')
mock_return = {'config': 'test'}
mock_json_file_to_dict.return_value = mock_return
mock_isfile.return_value = True
config = Config(self.target, [directory])
mock_isfile.assert_called_with(path)
mock_json_file_to_dict.assert_called_once_with(path)
self.assertEqual(config.app_config_data, mock_return,
"app_config_data should be set to the returned value")
@patch.object(Config, '_process_config_and_overrides')
@patch('tools.config.json_file_to_dict')
def test_init_override_app_config(self, mock_json_file_to_dict, _):
"""
Test that the initialisation uses app_config instead of top_level_dir
when both are specified
:param mock_json_file_to_dict: mock of function json_file_to_dict
:param _: patch of function _process_config_and_overrides (not tested)
:return:
"""
app_config = "app_config"
directory = '.'
mock_return = {'config': 'test'}
mock_json_file_to_dict.return_value = mock_return
config = Config(self.target, [directory], app_config=app_config)
mock_json_file_to_dict.assert_called_once_with(app_config)
self.assertEqual(config.app_config_data, mock_return,
"app_config_data should be set to the returned value")
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
ilexius/odoo | addons/account/models/account_bank_statement.py | 4 | 46774 | # -*- coding: utf-8 -*-
from openerp import api, fields, models, _
from openerp.osv import expression
from openerp.tools import float_is_zero
from openerp.tools import float_compare, float_round
from openerp.tools.misc import formatLang
from openerp.exceptions import UserError, ValidationError
import time
import math
class AccountCashboxLine(models.Model):
""" Cash Box Details """
_name = 'account.cashbox.line'
_description = 'CashBox Line'
_rec_name = 'coin_value'
_order = 'coin_value'
@api.one
@api.depends('coin_value', 'number')
def _sub_total(self):
""" Calculates Sub total"""
self.subtotal = self.coin_value * self.number
coin_value = fields.Float(string='Coin/Bill Value', required=True, digits=0)
number = fields.Integer(string='Number of Coins/Bills', help='Opening Unit Numbers')
subtotal = fields.Float(compute='_sub_total', string='Subtotal', digits=0, readonly=True)
cashbox_id = fields.Many2one('account.bank.statement.cashbox')
class AccountBankStmtCashWizard(models.Model):
"""
Account Bank Statement popup that allows entering cash details.
"""
_name = 'account.bank.statement.cashbox'
_description = 'Account Bank Statement Cashbox Details'
cashbox_lines_ids = fields.One2many('account.cashbox.line', 'cashbox_id', string='Cashbox Lines')
@api.multi
def validate(self):
bnk_stmt_id = self.env.context.get('bank_statement_id', False) or self.env.context.get('active_id', False)
bnk_stmt = self.env['account.bank.statement'].browse(bnk_stmt_id)
total = 0.0
for lines in self.cashbox_lines_ids:
total += lines.subtotal
if self.env.context.get('balance', False) == 'start':
#starting balance
bnk_stmt.write({'balance_start': total, 'cashbox_start_id': self.id})
else:
#closing balance
bnk_stmt.write({'balance_end_real': total, 'cashbox_end_id': self.id})
return {'type': 'ir.actions.act_window_close'}
class AccountBankStmtCloseCheck(models.TransientModel):
"""
Account Bank Statement wizard that check that closing balance is correct.
"""
_name = 'account.bank.statement.closebalance'
_description = 'Account Bank Statement closing balance'
@api.multi
def validate(self):
bnk_stmt_id = self.env.context.get('active_id', False)
if bnk_stmt_id:
self.env['account.bank.statement'].browse(bnk_stmt_id).button_confirm_bank()
return {'type': 'ir.actions.act_window_close'}
class AccountBankStatement(models.Model):
@api.one
@api.depends('line_ids', 'balance_start', 'line_ids.amount', 'balance_end_real')
def _end_balance(self):
self.total_entry_encoding = sum([line.amount for line in self.line_ids])
self.balance_end = self.balance_start + self.total_entry_encoding
self.difference = self.balance_end_real - self.balance_end
@api.one
@api.depends('journal_id')
def _compute_currency(self):
self.currency_id = self.journal_id.currency_id or self.env.user.company_id.currency_id
@api.one
@api.depends('line_ids.journal_entry_ids')
def _check_lines_reconciled(self):
self.all_lines_reconciled = all([line.journal_entry_ids.ids or line.account_id.id for line in self.line_ids])
@api.model
def _default_journal(self):
journal_type = self.env.context.get('journal_type', False)
company_id = self.env['res.company']._company_default_get('account.bank.statement').id
if journal_type:
journals = self.env['account.journal'].search([('type', '=', journal_type), ('company_id', '=', company_id)])
if journals:
return journals[0]
return False
@api.multi
def _set_opening_balance(self, journal_id):
last_bnk_stmt = self.search([('journal_id', '=', journal_id), ('state', '=', 'confirm')], order="date_done desc", limit=1)
for bank_stmt in self:
if last_bnk_stmt:
bank_stmt.balance_start = last_bnk_stmt.balance_end
else:
bank_stmt.balance_start = 0
@api.model
def _default_opening_balance(self):
#Search last bank statement and set current opening balance as closing balance of previous one
journal_id = self._context.get('default_journal_id', False) or self._context.get('journal_id', False)
if journal_id:
last_bnk_stmt = self.search([('journal_id', '=', journal_id), ('state', '=', 'confirm')], order="date_done desc", limit=1)
if last_bnk_stmt:
return last_bnk_stmt.balance_end
else:
return 0
else:
return 0
_name = "account.bank.statement"
_description = "Bank Statement"
_order = "date desc, id desc"
_inherit = ['mail.thread']
name = fields.Char(string='Reference', states={'open': [('readonly', False)]}, copy=False, readonly=True)
date = fields.Date(required=True, states={'confirm': [('readonly', True)]}, select=True, copy=False, default=fields.Date.context_today)
date_done = fields.Datetime(string="Closed On")
balance_start = fields.Monetary(string='Starting Balance', states={'confirm': [('readonly', True)]}, default=_default_opening_balance)
balance_end_real = fields.Monetary('Ending Balance', states={'confirm': [('readonly', True)]})
state = fields.Selection([('open', 'New'), ('confirm', 'Validated')], string='Status', required=True, readonly=True, copy=False, default='open')
currency_id = fields.Many2one('res.currency', compute='_compute_currency', oldname='currency')
journal_id = fields.Many2one('account.journal', string='Journal', required=True, states={'confirm': [('readonly', True)]}, default=_default_journal)
journal_type = fields.Selection(related='journal_id.type', help="Technical field used for usability purposes")
company_id = fields.Many2one('res.company', related='journal_id.company_id', string='Company', store=True, readonly=True,
default=lambda self: self.env['res.company']._company_default_get('account.bank.statement'))
total_entry_encoding = fields.Monetary('Transactions Subtotal', compute='_end_balance', store=True, help="Total of transaction lines.")
balance_end = fields.Monetary('Computed Balance', compute='_end_balance', store=True, help='Balance as calculated based on Opening Balance and transaction lines')
difference = fields.Monetary(compute='_end_balance', store=True, help="Difference between the computed ending balance and the specified ending balance.")
line_ids = fields.One2many('account.bank.statement.line', 'statement_id', string='Statement lines', states={'confirm': [('readonly', True)]}, copy=True)
move_line_ids = fields.One2many('account.move.line', 'statement_id', string='Entry lines', states={'confirm': [('readonly', True)]})
all_lines_reconciled = fields.Boolean(compute='_check_lines_reconciled')
user_id = fields.Many2one('res.users', string='Responsible', required=False, default=lambda self: self.env.user)
cashbox_start_id = fields.Many2one('account.bank.statement.cashbox')
cashbox_end_id = fields.Many2one('account.bank.statement.cashbox')
@api.onchange('journal_id')
def onchange_journal_id(self):
self._set_opening_balance(self.journal_id.id)
@api.multi
def _balance_check(self):
for stmt in self:
if not stmt.currency_id.is_zero(stmt.difference):
if stmt.journal_type == 'cash':
if stmt.difference < 0.0:
account = stmt.journal_id.loss_account_id
name = _('Loss')
else:
# statement.difference > 0.0
account = stmt.journal_id.profit_account_id
name = _('Profit')
if not account:
raise UserError(_('There is no account defined on the journal %s for %s involved in a cash difference.') % (stmt.journal_id.name, name))
values = {
'statement_id': stmt.id,
'account_id': account.id,
'amount': stmt.difference,
'name': _("Cash difference observed during the counting (%s)") % name,
}
self.env['account.bank.statement.line'].create(values)
else:
balance_end_real = formatLang(self.env, stmt.balance_end_real, currency_obj=stmt.currency_id)
balance_end = formatLang(self.env, stmt.balance_end, currency_obj=stmt.currency_id)
raise UserError(_('The ending balance is incorrect !\nThe expected balance (%s) is different from the computed one. (%s)')
% (balance_end_real, balance_end))
return True
@api.model
def create(self, vals):
if not vals.get('name'):
journal_id = vals.get('journal_id', self._context.get('default_journal_id', False))
journal = self.env['account.journal'].browse(journal_id)
vals['name'] = journal.sequence_id.with_context(ir_sequence_date=vals.get('date')).next_by_id()
return super(AccountBankStatement, self).create(vals)
@api.multi
def unlink(self):
for statement in self:
if statement.state != 'open':
raise UserError(_('In order to delete a bank statement, you must first cancel it to delete related journal items.'))
# Explicitly unlink bank statement lines so it will check that the related journal entries have been deleted first
statement.line_ids.unlink()
return super(AccountBankStatement, self).unlink()
@api.multi
def open_cashbox_id(self):
context = dict(self.env.context or {})
if context.get('cashbox_id'):
context['active_id'] = self.id
return {
'name': _('Cash Control'),
'view_type': 'form',
'view_mode': 'form',
'res_model': 'account.bank.statement.cashbox',
'view_id': self.env.ref('account.view_account_bnk_stmt_cashbox').id,
'type': 'ir.actions.act_window',
'res_id': self.env.context.get('cashbox_id'),
'context': context,
'target': 'new'
}
@api.multi
def button_cancel(self):
for statement in self:
if any(line.journal_entry_ids.ids for line in statement.line_ids):
raise UserError(_('A statement cannot be canceled when its lines are reconciled.'))
self.state = 'open'
@api.multi
def check_confirm_bank(self):
if self.journal_type == 'cash' and not self.currency_id.is_zero(self.difference):
action_rec = self.env['ir.model.data'].xmlid_to_object('account.action_view_account_bnk_stmt_check')
if action_rec:
action = action_rec.read([])[0]
return action
return self.button_confirm_bank()
@api.multi
def button_confirm_bank(self):
self._balance_check()
statements = self.filtered(lambda r: r.state == 'open')
for statement in statements:
moves = self.env['account.move']
for st_line in statement.line_ids:
if st_line.account_id and not st_line.journal_entry_ids.ids:
st_line.fast_counterpart_creation()
elif not st_line.journal_entry_ids.ids:
raise UserError(_('All the account entries lines must be processed in order to close the statement.'))
moves = (moves | st_line.journal_entry_ids)
if moves:
moves.post()
statement.message_post(body=_('Statement %s confirmed, journal items were created.') % (statement.name,))
statements.link_bank_to_partner()
statements.write({'state': 'confirm', 'date_done': time.strftime("%Y-%m-%d %H:%M:%S")})
@api.multi
def button_journal_entries(self):
context = dict(self._context or {})
context['journal_id'] = self.journal_id.id
return {
'name': _('Journal Items'),
'view_type': 'form',
'view_mode': 'tree',
'res_model': 'account.move.line',
'view_id': False,
'type': 'ir.actions.act_window',
'domain': [('statement_id', 'in', self.ids)],
'context': context,
}
@api.multi
def button_open(self):
""" Changes statement state to Running."""
for statement in self:
if not statement.name:
context = {'ir_sequence_date', statement.date}
if statement.journal_id.sequence_id:
st_number = statement.journal_id.sequence_id.with_context(context).next_by_id()
else:
SequenceObj = self.env['ir.sequence']
st_number = SequenceObj.with_context(context).next_by_code('account.bank.statement')
statement.name = st_number
statement.state = 'open'
@api.multi
def reconciliation_widget_preprocess(self):
""" Get statement lines of the specified statements or all unreconciled statement lines and try to automatically reconcile them / find them a partner.
Return ids of statement lines left to reconcile and other data for the reconciliation widget.
"""
statements = self
bsl_obj = self.env['account.bank.statement.line']
# NB : The field account_id can be used at the statement line creation/import to avoid the reconciliation process on it later on,
# this is why we filter out statements lines where account_id is set
st_lines_filter = [('journal_entry_ids', '=', False), ('account_id', '=', False)]
if statements:
st_lines_filter += [('statement_id', 'in', statements.ids)]
# Try to automatically reconcile statement lines
automatic_reconciliation_entries = []
st_lines_left = self.env['account.bank.statement.line']
for st_line in bsl_obj.search(st_lines_filter):
res = st_line.auto_reconcile()
if not res:
st_lines_left = (st_lines_left | st_line)
else:
automatic_reconciliation_entries.append(res.ids)
# Try to set statement line's partner
for st_line in st_lines_left:
if st_line.name and not st_line.partner_id:
additional_domain = [('ref', '=', st_line.name)]
match_recs = st_line.get_move_lines_for_reconciliation(limit=1, additional_domain=additional_domain, overlook_partner=True)
if match_recs and match_recs[0].partner_id:
st_line.write({'partner_id': match_recs[0].partner_id.id})
# Collect various informations for the reconciliation widget
notifications = []
num_auto_reconciled = len(automatic_reconciliation_entries)
if num_auto_reconciled > 0:
auto_reconciled_message = num_auto_reconciled > 1 \
and _("%d transactions were automatically reconciled.") % num_auto_reconciled \
or _("1 transaction was automatically reconciled.")
notifications += [{
'type': 'info',
'message': auto_reconciled_message,
'details': {
'name': _("Automatically reconciled items"),
'model': 'account.move',
'ids': automatic_reconciliation_entries
}
}]
lines = []
for el in statements:
lines.extend(el.line_ids.ids)
lines = list(set(lines))
return {
'st_lines_ids': st_lines_left.ids,
'notifications': notifications,
'statement_name': len(statements) == 1 and statements[0].name or False,
'num_already_reconciled_lines': statements and bsl_obj.search_count([('journal_entry_ids', '!=', False), ('id', 'in', lines)]) or 0,
}
@api.multi
def link_bank_to_partner(self):
for statement in self:
for st_line in statement.line_ids:
if st_line.bank_account_id and st_line.partner_id and st_line.bank_account_id.partner_id != st_line.partner_id:
st_line.bank_account_id.partner_id = st_line.partner_id
class AccountBankStatementLine(models.Model):
_name = "account.bank.statement.line"
_description = "Bank Statement Line"
_order = "statement_id desc, sequence"
_inherit = ['ir.needaction_mixin']
name = fields.Char(string='Memo', required=True)
date = fields.Date(required=True, default=lambda self: self._context.get('date', fields.Date.context_today(self)))
amount = fields.Monetary(digits=0, currency_field='journal_currency_id')
journal_currency_id = fields.Many2one('res.currency', related='statement_id.currency_id',
help='Utility field to express amount currency', readonly=True)
partner_id = fields.Many2one('res.partner', string='Partner')
bank_account_id = fields.Many2one('res.partner.bank', string='Bank Account')
account_id = fields.Many2one('account.account', string='Counterpart Account', domain=[('deprecated', '=', False)],
help="This technical field can be used at the statement line creation/import time in order to avoid the reconciliation"
" process on it later on. The statement line will simply create a counterpart on this account")
statement_id = fields.Many2one('account.bank.statement', string='Statement', index=True, required=True, ondelete='cascade')
journal_id = fields.Many2one('account.journal', related='statement_id.journal_id', string='Journal', store=True, readonly=True)
partner_name = fields.Char(help="This field is used to record the third party name when importing bank statement in electronic format,"
" when the partner doesn't exist yet in the database (or cannot be found).")
ref = fields.Char(string='Reference')
note = fields.Text(string='Notes')
sequence = fields.Integer(index=True, help="Gives the sequence order when displaying a list of bank statement lines.", default=1)
company_id = fields.Many2one('res.company', related='statement_id.company_id', string='Company', store=True, readonly=True)
journal_entry_ids = fields.One2many('account.move', 'statement_line_id', 'Journal Entries', copy=False, readonly=True)
amount_currency = fields.Monetary(help="The amount expressed in an optional other currency if it is a multi-currency entry.")
currency_id = fields.Many2one('res.currency', string='Currency', help="The optional other currency if it is a multi-currency entry.")
@api.one
@api.constrains('amount')
def _check_amount(self):
# This constraint could possibly underline flaws in bank statement import (eg. inability to
# support hacks such as using dummy transactions to give additional informations)
if self.amount == 0:
raise ValidationError(_('A transaction can\'t have a 0 amount.'))
@api.one
@api.constrains('amount', 'amount_currency')
def _check_amount_currency(self):
if self.amount_currency != 0 and self.amount == 0:
raise ValidationError(_('If "Amount Currency" is specified, then "Amount" must be as well.'))
@api.multi
def unlink(self):
for line in self:
if line.journal_entry_ids.ids:
raise UserError(_('In order to delete a bank statement line, you must first cancel it to delete related journal items.'))
return super(AccountBankStatementLine, self).unlink()
@api.model
def _needaction_domain_get(self):
return [('journal_entry_ids', '=', False), ('account_id', '=', False)]
@api.multi
def button_cancel_reconciliation(self):
# TOCKECK : might not behave as expected in case of reconciliations (match statement line with already
# registered payment) or partial reconciliations : it will completely remove the existing payment.
move_recs = self.env['account.move']
for st_line in self:
move_recs = (move_recs | st_line.journal_entry_ids)
if move_recs:
for move in move_recs:
move.line_ids.remove_move_reconcile()
move_recs.write({'statement_line_id': False})
move_recs.button_cancel()
move_recs.unlink()
####################################################
# Reconciliation interface methods
####################################################
@api.multi
def get_data_for_reconciliation_widget(self, excluded_ids=None):
""" Returns the data required to display a reconciliation widget, for each statement line in self """
excluded_ids = excluded_ids or []
ret = []
for st_line in self:
aml_recs = st_line.get_reconciliation_proposition(excluded_ids=excluded_ids)
target_currency = st_line.currency_id or st_line.journal_id.currency_id or st_line.journal_id.company_id.currency_id
rp = aml_recs.prepare_move_lines_for_reconciliation_widget(target_currency=target_currency, target_date=st_line.date)
excluded_ids += [move_line['id'] for move_line in rp]
ret.append({
'st_line': st_line.get_statement_line_for_reconciliation_widget(),
'reconciliation_proposition': rp
})
return ret
def get_statement_line_for_reconciliation_widget(self):
""" Returns the data required by the bank statement reconciliation widget to display a statement line """
statement_currency = self.journal_id.currency_id or self.journal_id.company_id.currency_id
if self.amount_currency and self.currency_id:
amount = self.amount_currency
amount_currency = self.amount
amount_currency_str = amount_currency > 0 and amount_currency or -amount_currency
amount_currency_str = formatLang(self.env, amount_currency_str, currency_obj=statement_currency)
else:
amount = self.amount
amount_currency_str = ""
amount_str = formatLang(self.env, abs(amount), currency_obj=self.currency_id or statement_currency)
data = {
'id': self.id,
'ref': self.ref,
'note': self.note or "",
'name': self.name,
'date': self.date,
'amount': amount,
'amount_str': amount_str, # Amount in the statement line currency
'currency_id': self.currency_id.id or statement_currency.id,
'partner_id': self.partner_id.id,
'journal_id': self.journal_id.id,
'statement_id': self.statement_id.id,
'account_code': self.journal_id.default_debit_account_id.code,
'account_name': self.journal_id.default_debit_account_id.name,
'partner_name': self.partner_id.name,
'communication_partner_name': self.partner_name,
'amount_currency_str': amount_currency_str, # Amount in the statement currency
'has_no_partner': not self.partner_id.id,
}
if self.partner_id:
if amount > 0:
data['open_balance_account_id'] = self.partner_id.property_account_receivable_id.id
else:
data['open_balance_account_id'] = self.partner_id.property_account_payable_id.id
return data
@api.multi
def get_move_lines_for_reconciliation_widget(self, excluded_ids=None, str=False, offset=0, limit=None):
""" Returns move lines for the bank statement reconciliation widget, formatted as a list of dicts
"""
aml_recs = self.get_move_lines_for_reconciliation(excluded_ids=excluded_ids, str=str, offset=offset, limit=limit)
target_currency = self.currency_id or self.journal_id.currency_id or self.journal_id.company_id.currency_id
return aml_recs.prepare_move_lines_for_reconciliation_widget(target_currency=target_currency, target_date=self.date)
####################################################
# Reconciliation methods
####################################################
def get_move_lines_for_reconciliation(self, excluded_ids=None, str=False, offset=0, limit=None, additional_domain=None, overlook_partner=False):
""" Return account.move.line records which can be used for bank statement reconciliation.
:param excluded_ids:
:param str:
:param offset:
:param limit:
:param additional_domain:
:param overlook_partner:
"""
# Domain to fetch registered payments (use case where you encode the payment before you get the bank statement)
reconciliation_aml_accounts = [self.journal_id.default_credit_account_id.id, self.journal_id.default_debit_account_id.id]
domain_reconciliation = ['&', ('statement_id', '=', False), ('account_id', 'in', reconciliation_aml_accounts)]
# Domain to fetch unreconciled payables/receivables (use case where you close invoices/refunds by reconciling your bank statements)
domain_matching = [('reconciled', '=', False)]
if self.partner_id.id or overlook_partner:
domain_matching = expression.AND([domain_matching, [('account_id.internal_type', 'in', ['payable', 'receivable'])]])
else:
# TODO : find out what use case this permits (match a check payment, registered on a journal whose account type is other instead of liquidity)
domain_matching = expression.AND([domain_matching, [('account_id.reconcile', '=', True)]])
# Let's add what applies to both
domain = expression.OR([domain_reconciliation, domain_matching])
if self.partner_id.id and not overlook_partner:
domain = expression.AND([domain, [('partner_id', '=', self.partner_id.id)]])
# Domain factorized for all reconciliation use cases
ctx = dict(self._context or {})
ctx['bank_statement_line'] = self
generic_domain = self.env['account.move.line'].with_context(ctx).domain_move_lines_for_reconciliation(excluded_ids=excluded_ids, str=str)
domain = expression.AND([domain, generic_domain])
# Domain from caller
if additional_domain is None:
additional_domain = []
else:
additional_domain = expression.normalize_domain(additional_domain)
domain = expression.AND([domain, additional_domain])
return self.env['account.move.line'].search(domain, offset=offset, limit=limit, order="date_maturity asc, id asc")
def _get_domain_maker_move_line_amount(self):
""" Returns a function that can create the appropriate domain to search on move.line amount based on statement.line currency/amount """
company_currency = self.journal_id.company_id.currency_id
st_line_currency = self.currency_id or self.journal_id.currency_id
currency = (st_line_currency and st_line_currency != company_currency) and st_line_currency.id or False
field = currency and 'amount_residual_currency' or 'amount_residual'
precision = st_line_currency and st_line_currency.decimal_places or company_currency.decimal_places
def ret(comparator, amount, p=precision, f=field, c=currency):
if comparator == '<':
if amount < 0:
domain = [(f, '<', 0), (f, '>', amount)]
else:
domain = [(f, '>', 0), (f, '<', amount)]
elif comparator == '=':
domain = [(f, '=', float_round(amount, precision_digits=p))]
else:
raise UserError(_("Programmation error : domain_maker_move_line_amount requires comparator '=' or '<'"))
domain += [('currency_id', '=', c)]
return domain
return ret
def get_reconciliation_proposition(self, excluded_ids=None):
""" Returns move lines that constitute the best guess to reconcile a statement line
Note: it only looks for move lines in the same currency as the statement line.
"""
# Look for structured communication match
if self.name:
overlook_partner = not self.partner_id # If the transaction has no partner, look for match in payable and receivable account anyway
domain = [('ref', '=', self.name)]
match_recs = self.get_move_lines_for_reconciliation(excluded_ids=excluded_ids, limit=2, additional_domain=domain, overlook_partner=overlook_partner)
if match_recs and len(match_recs) == 1:
return match_recs
# How to compare statement line amount and move lines amount
amount_domain_maker = self._get_domain_maker_move_line_amount()
amount = self.amount_currency or self.amount
# Look for a single move line with the same amount
match_recs = self.get_move_lines_for_reconciliation(excluded_ids=excluded_ids, limit=1, additional_domain=amount_domain_maker('=', amount))
if match_recs:
return match_recs
if not self.partner_id:
return self.env['account.move.line']
# Select move lines until their total amount is greater than the statement line amount
domain = [('reconciled', '=', False)]
domain += [('account_id.user_type_id.type', '=', amount > 0 and 'receivable' or 'payable')] # Make sure we can't mix receivable and payable
domain += amount_domain_maker('<', amount) # Will also enforce > 0
mv_lines = self.get_move_lines_for_reconciliation(excluded_ids=excluded_ids, limit=5, additional_domain=domain)
st_line_currency = self.currency_id or self.journal_id.currency_id or self.journal_id.company_id.currency_id
ret = self.env['account.move.line']
total = 0
for line in mv_lines:
total += line.currency_id and line.amount_residual_currency or line.amount_residual
if float_compare(total, abs(amount), precision_digits=st_line_currency.rounding) != -1:
break
ret = (ret | line)
return ret
def _get_move_lines_for_auto_reconcile(self):
""" Returns the move lines that the method auto_reconcile can use to try to reconcile the statement line """
pass
@api.multi
def auto_reconcile(self):
""" Try to automatically reconcile the statement.line ; return the counterpart journal entry/ies if the automatic reconciliation succeeded, False otherwise.
TODO : this method could be greatly improved and made extensible
"""
self.ensure_one()
match_recs = self.env['account.move.line']
# How to compare statement line amount and move lines amount
amount_domain_maker = self._get_domain_maker_move_line_amount()
equal_amount_domain = amount_domain_maker('=', self.amount_currency or self.amount)
# Look for structured communication match
if self.name:
overlook_partner = not self.partner_id # If the transaction has no partner, look for match in payable and receivable account anyway
domain = equal_amount_domain + [('ref', '=', self.name)]
match_recs = self.get_move_lines_for_reconciliation(limit=2, additional_domain=domain, overlook_partner=overlook_partner)
if match_recs and len(match_recs) != 1:
return False
# Look for a single move line with the same partner, the same amount
if not match_recs:
if self.partner_id:
match_recs = self.get_move_lines_for_reconciliation(limit=2, additional_domain=equal_amount_domain)
if match_recs and len(match_recs) != 1:
return False
if not match_recs:
return False
# Now reconcile
counterpart_aml_dicts = []
payment_aml_rec = self.env['account.move.line']
for aml in match_recs:
if aml.account_id.internal_type == 'liquidity':
payment_aml_rec = (payment_aml_rec | aml)
else:
amount = aml.currency_id and aml.amount_residual_currency or aml.amount_residual
counterpart_aml_dicts.append({
'name': aml.name if aml.name != '/' else aml.move_id.name,
'debit': amount < 0 and -amount or 0,
'credit': amount > 0 and amount or 0,
'move_line': aml
})
try:
with self._cr.savepoint():
counterpart = self.process_reconciliation(counterpart_aml_dicts=counterpart_aml_dicts, payment_aml_rec=payment_aml_rec)
return counterpart
except UserError:
# A configuration / business logic error that makes it impossible to auto-reconcile should not be raised
# since automatic reconciliation is just an amenity and the user will get the same exception when manually
# reconciling. Other types of exception are (hopefully) programmation errors and should cause a stacktrace.
self.invalidate_cache()
self.env['account.move'].invalidate_cache()
self.env['account.move.line'].invalidate_cache()
return False
def _prepare_reconciliation_move(self, move_name):
""" Prepare the dict of values to create the move from a statement line. This method may be overridden to adapt domain logic
through model inheritance (make sure to call super() to establish a clean extension chain).
:param char st_line_number: will be used as the name of the generated account move
:return: dict of value to create() the account.move
"""
return {
'statement_line_id': self.id,
'journal_id': self.statement_id.journal_id.id,
'date': self.date,
'name': move_name,
'ref': self.ref,
}
def _prepare_reconciliation_move_line(self, move, amount):
""" Prepare the dict of values to create the move line from a statement line.
:param recordset move: the account.move to link the move line
:param float amount: the amount of transaction that wasn't already reconciled
"""
company_currency = self.journal_id.company_id.currency_id
statement_currency = self.journal_id.currency_id or company_currency
st_line_currency = self.currency_id or statement_currency
amount_currency = False
if statement_currency != company_currency or st_line_currency != company_currency:
# First get the ratio total mount / amount not already reconciled
if statement_currency == company_currency:
total_amount = self.amount
elif st_line_currency == company_currency:
total_amount = self.amount_currency
else:
total_amount = statement_currency.with_context({'date': self.date}).compute(self.amount, company_currency)
ratio = total_amount / amount
# Then use it to adjust the statement.line field that correspond to the move.line amount_currency
if statement_currency != company_currency:
amount_currency = self.amount * ratio
elif st_line_currency != company_currency:
amount_currency = self.amount_currency * ratio
return {
'name': self.name,
'date': self.date,
'ref': self.ref,
'move_id': move.id,
'partner_id': self.partner_id and self.partner_id.id or False,
'account_id': amount >= 0 \
and self.statement_id.journal_id.default_credit_account_id.id \
or self.statement_id.journal_id.default_debit_account_id.id,
'credit': amount < 0 and -amount or 0.0,
'debit': amount > 0 and amount or 0.0,
'statement_id': self.statement_id.id,
'journal_id': self.statement_id.journal_id.id,
'currency_id': statement_currency != company_currency and statement_currency.id or (st_line_currency != company_currency and st_line_currency.id or False),
'amount_currency': amount_currency,
}
@api.v7
def process_reconciliations(self, cr, uid, ids, data, context=None):
""" Handles data sent from the bank statement reconciliation widget (and can otherwise serve as an old-API bridge)
:param list of dicts data: must contains the keys 'counterpart_aml_dicts', 'payment_aml_ids' and 'new_aml_dicts',
whose value is the same as described in process_reconciliation except that ids are used instead of recordsets.
"""
aml_obj = self.pool['account.move.line']
for id, datum in zip(ids, data):
st_line = self.browse(cr, uid, id, context)
payment_aml_rec = aml_obj.browse(cr, uid, datum.get('payment_aml_ids', []), context)
for aml_dict in datum.get('counterpart_aml_dicts', []):
aml_dict['move_line'] = aml_obj.browse(cr, uid, aml_dict['counterpart_aml_id'], context)
del aml_dict['counterpart_aml_id']
st_line.process_reconciliation(datum.get('counterpart_aml_dicts', []), payment_aml_rec, datum.get('new_aml_dicts', []))
def fast_counterpart_creation(self):
for st_line in self:
# Technical functionality to automatically reconcile by creating a new move line
vals = {
'name': st_line.name,
'debit': st_line.amount < 0 and -st_line.amount or 0.0,
'credit': st_line.amount > 0 and st_line.amount or 0.0,
'account_id': st_line.account_id.id,
}
st_line.process_reconciliation(new_aml_dicts=[vals])
def process_reconciliation(self, counterpart_aml_dicts=None, payment_aml_rec=None, new_aml_dicts=None):
""" Match statement lines with existing payments (eg. checks) and/or payables/receivables (eg. invoices and refunds) and/or new move lines (eg. write-offs).
If any new journal item needs to be created (via new_aml_dicts or counterpart_aml_dicts), a new journal entry will be created and will contain those
items, as well as a journal item for the bank statement line.
Finally, mark the statement line as reconciled by putting the matched moves ids in the column journal_entry_ids.
:param (list of dicts) counterpart_aml_dicts: move lines to create to reconcile with existing payables/receivables.
The expected keys are :
- 'name'
- 'debit'
- 'credit'
- 'move_line'
# The move line to reconcile (partially if specified debit/credit is lower than move line's credit/debit)
:param (list of recordsets) payment_aml_rec: recordset move lines representing existing payments (which are already fully reconciled)
:param (list of dicts) new_aml_dicts: move lines to create. The expected keys are :
- 'name'
- 'debit'
- 'credit'
- 'account_id'
- (optional) 'tax_ids'
- (optional) Other account.move.line fields like analytic_account_id or analytics_id
:returns: The journal entries with which the transaction was matched. If there was at least an entry in counterpart_aml_dicts or new_aml_dicts, this list contains
the move created by the reconciliation, containing entries for the statement.line (1), the counterpart move lines (0..*) and the new move lines (0..*).
"""
counterpart_aml_dicts = counterpart_aml_dicts or []
payment_aml_rec = payment_aml_rec or self.env['account.move.line']
new_aml_dicts = new_aml_dicts or []
aml_obj = self.env['account.move.line']
company_currency = self.journal_id.company_id.currency_id
statement_currency = self.journal_id.currency_id or company_currency
st_line_currency = self.currency_id or statement_currency
counterpart_moves = self.env['account.move']
# Check and prepare received data
if self.journal_entry_ids.ids:
raise UserError(_('The bank statement line was already reconciled.'))
if any(rec.statement_id for rec in payment_aml_rec):
raise UserError(_('A selected move line was already reconciled.'))
for aml_dict in counterpart_aml_dicts:
if aml_dict['move_line'].reconciled:
raise UserError(_('A selected move line was already reconciled.'))
if isinstance(aml_dict['move_line'], (int, long)):
aml_dict['move_line'] = aml_obj.browse(aml_dict['move_line'])
for aml_dict in (counterpart_aml_dicts + new_aml_dicts):
if aml_dict.get('tax_ids') and aml_dict['tax_ids'] and isinstance(aml_dict['tax_ids'][0], (int, long)):
# Transform the value in the format required for One2many and Many2many fields
aml_dict['tax_ids'] = map(lambda id: (4, id, None), aml_dict['tax_ids'])
# Fully reconciled moves are just linked to the bank statement
for aml_rec in payment_aml_rec:
aml_rec.write({'statement_id': self.statement_id.id})
aml_rec.move_id.write({'statement_line_id': self.id})
counterpart_moves = (counterpart_moves | aml_rec.move_id)
# Create move line(s). Either matching an existing journal entry (eg. invoice), in which
# case we reconcile the existing and the new move lines together, or being a write-off.
if counterpart_aml_dicts or new_aml_dicts:
st_line_currency = self.currency_id or statement_currency
st_line_currency_rate = self.currency_id and (self.amount_currency / self.amount) or False
# Create the move
move_name = (self.statement_id.name or self.name) + "/" + str(self.sequence)
move_vals = self._prepare_reconciliation_move(move_name)
move = self.env['account.move'].create(move_vals)
move.post()
counterpart_moves = (counterpart_moves | move)
# Complete dicts to create both counterpart move lines and write-offs
to_create = (counterpart_aml_dicts + new_aml_dicts)
ctx = dict(self._context, date=self.date)
for aml_dict in to_create:
aml_dict['move_id'] = move.id
aml_dict['date'] = self.statement_id.date
aml_dict['partner_id'] = self.partner_id.id
aml_dict['journal_id'] = self.journal_id.id
aml_dict['company_id'] = self.company_id.id
aml_dict['statement_id'] = self.statement_id.id
if st_line_currency.id != company_currency.id:
aml_dict['amount_currency'] = aml_dict['debit'] - aml_dict['credit']
aml_dict['currency_id'] = st_line_currency.id
if self.currency_id and statement_currency.id == company_currency.id and st_line_currency_rate:
# Statement is in company currency but the transaction is in foreign currency
aml_dict['debit'] = company_currency.round(aml_dict['debit'] / st_line_currency_rate)
aml_dict['credit'] = company_currency.round(aml_dict['credit'] / st_line_currency_rate)
elif self.currency_id and st_line_currency_rate:
# Statement is in foreign currency and the transaction is in another one
aml_dict['debit'] = statement_currency.with_context(ctx).compute(aml_dict['debit'] / st_line_currency_rate, company_currency)
aml_dict['credit'] = statement_currency.with_context(ctx).compute(aml_dict['credit'] / st_line_currency_rate, company_currency)
else:
# Statement is in foreign currency and no extra currency is given for the transaction
aml_dict['debit'] = st_line_currency.with_context(ctx).compute(aml_dict['debit'], company_currency)
aml_dict['credit'] = st_line_currency.with_context(ctx).compute(aml_dict['credit'], company_currency)
elif statement_currency.id != company_currency.id:
# Statement is in foreign currency but the transaction is in company currency
prorata_factor = (aml_dict['debit'] - aml_dict['credit']) / self.amount_currency
aml_dict['amount_currency'] = prorata_factor * self.amount
aml_dict['currency_id'] = statement_currency.id
# Create the move line for the statement line using the total credit/debit of the counterpart
# This leaves out the amount already reconciled and avoids rounding errors from currency conversion
st_line_amount = sum(aml_dict['credit'] - aml_dict['debit'] for aml_dict in to_create)
aml_obj.with_context(check_move_validity=False).create(self._prepare_reconciliation_move_line(move, st_line_amount))
# Create write-offs
for aml_dict in new_aml_dicts:
aml_obj.with_context(check_move_validity=False).create(aml_dict)
# Create counterpart move lines and reconcile them
for aml_dict in counterpart_aml_dicts:
if aml_dict['move_line'].partner_id.id:
aml_dict['partner_id'] = aml_dict['move_line'].partner_id.id
aml_dict['account_id'] = aml_dict['move_line'].account_id.id
counterpart_move_line = aml_dict.pop('move_line')
if counterpart_move_line.currency_id and counterpart_move_line.currency_id != company_currency and not aml_dict.get('currency_id'):
aml_dict['currency_id'] = counterpart_move_line.currency_id.id
aml_dict['amount_currency'] = company_currency.with_context(ctx).compute(aml_dict['debit'] - aml_dict['credit'], counterpart_move_line.currency_id)
new_aml = aml_obj.with_context(check_move_validity=False).create(aml_dict)
(new_aml | counterpart_move_line).reconcile()
counterpart_moves.assert_balanced()
return counterpart_moves
| gpl-3.0 |
waheedahmed/edx-platform | common/lib/xmodule/xmodule/tests/test_lti_unit.py | 51 | 22199 | # -*- coding: utf-8 -*-
"""Test for LTI Xmodule functional logic."""
import datetime
from django.utils.timezone import UTC
from mock import Mock, patch, PropertyMock
import textwrap
from lxml import etree
from webob.request import Request
from copy import copy
import urllib
from xmodule.fields import Timedelta
from xmodule.lti_module import LTIDescriptor
from xmodule.lti_2_util import LTIError
from . import LogicTest
class LTIModuleTest(LogicTest):
"""Logic tests for LTI module."""
descriptor_class = LTIDescriptor
def setUp(self):
super(LTIModuleTest, self).setUp()
self.environ = {'wsgi.url_scheme': 'http', 'REQUEST_METHOD': 'POST'}
self.request_body_xml_template = textwrap.dedent("""
<?xml version = "1.0" encoding = "UTF-8"?>
<imsx_POXEnvelopeRequest xmlns = "{namespace}">
<imsx_POXHeader>
<imsx_POXRequestHeaderInfo>
<imsx_version>V1.0</imsx_version>
<imsx_messageIdentifier>{messageIdentifier}</imsx_messageIdentifier>
</imsx_POXRequestHeaderInfo>
</imsx_POXHeader>
<imsx_POXBody>
<{action}>
<resultRecord>
<sourcedGUID>
<sourcedId>{sourcedId}</sourcedId>
</sourcedGUID>
<result>
<resultScore>
<language>en-us</language>
<textString>{grade}</textString>
</resultScore>
</result>
</resultRecord>
</{action}>
</imsx_POXBody>
</imsx_POXEnvelopeRequest>
""")
self.system.get_real_user = Mock()
self.system.publish = Mock()
self.system.rebind_noauth_module_to_user = Mock()
self.user_id = self.xmodule.runtime.anonymous_student_id
self.lti_id = self.xmodule.lti_id
self.unquoted_resource_link_id = u'{}-i4x-2-3-lti-31de800015cf4afb973356dbe81496df'.format(
self.xmodule.runtime.hostname
)
sourced_id = u':'.join(urllib.quote(i) for i in (self.lti_id, self.unquoted_resource_link_id, self.user_id))
self.defaults = {
'namespace': "http://www.imsglobal.org/services/ltiv1p1/xsd/imsoms_v1p0",
'sourcedId': sourced_id,
'action': 'replaceResultRequest',
'grade': 0.5,
'messageIdentifier': '528243ba5241b',
}
self.xmodule.due = None
self.xmodule.graceperiod = None
def get_request_body(self, params=None):
"""Fetches the body of a request specified by params"""
if params is None:
params = {}
data = copy(self.defaults)
data.update(params)
return self.request_body_xml_template.format(**data)
def get_response_values(self, response):
"""Gets the values from the given response"""
parser = etree.XMLParser(ns_clean=True, recover=True, encoding='utf-8')
root = etree.fromstring(response.body.strip(), parser=parser)
lti_spec_namespace = "http://www.imsglobal.org/services/ltiv1p1/xsd/imsoms_v1p0"
namespaces = {'def': lti_spec_namespace}
code_major = root.xpath("//def:imsx_codeMajor", namespaces=namespaces)[0].text
description = root.xpath("//def:imsx_description", namespaces=namespaces)[0].text
message_identifier = root.xpath("//def:imsx_messageIdentifier", namespaces=namespaces)[0].text
imsx_pox_body = root.xpath("//def:imsx_POXBody", namespaces=namespaces)[0]
try:
action = imsx_pox_body.getchildren()[0].tag.replace('{' + lti_spec_namespace + '}', '')
except Exception: # pylint: disable=broad-except
action = None
return {
'code_major': code_major,
'description': description,
'messageIdentifier': message_identifier,
'action': action
}
@patch(
'xmodule.lti_module.LTIModule.get_client_key_secret',
return_value=('test_client_key', u'test_client_secret')
)
def test_authorization_header_not_present(self, _get_key_secret):
"""
Request has no Authorization header.
This is an unknown service request, i.e., it is not a part of the original service specification.
"""
request = Request(self.environ)
request.body = self.get_request_body()
response = self.xmodule.grade_handler(request, '')
real_response = self.get_response_values(response)
expected_response = {
'action': None,
'code_major': 'failure',
'description': 'OAuth verification error: Malformed authorization header',
'messageIdentifier': self.defaults['messageIdentifier'],
}
self.assertEqual(response.status_code, 200)
self.assertDictEqual(expected_response, real_response)
@patch(
'xmodule.lti_module.LTIModule.get_client_key_secret',
return_value=('test_client_key', u'test_client_secret')
)
def test_authorization_header_empty(self, _get_key_secret):
"""
Request Authorization header has no value.
This is an unknown service request, i.e., it is not a part of the original service specification.
"""
request = Request(self.environ)
request.authorization = "bad authorization header"
request.body = self.get_request_body()
response = self.xmodule.grade_handler(request, '')
real_response = self.get_response_values(response)
expected_response = {
'action': None,
'code_major': 'failure',
'description': 'OAuth verification error: Malformed authorization header',
'messageIdentifier': self.defaults['messageIdentifier'],
}
self.assertEqual(response.status_code, 200)
self.assertDictEqual(expected_response, real_response)
def test_real_user_is_none(self):
"""
If we have no real user, we should send back failure response.
"""
self.xmodule.verify_oauth_body_sign = Mock()
self.xmodule.has_score = True
self.system.get_real_user = Mock(return_value=None)
request = Request(self.environ)
request.body = self.get_request_body()
response = self.xmodule.grade_handler(request, '')
real_response = self.get_response_values(response)
expected_response = {
'action': None,
'code_major': 'failure',
'description': 'User not found.',
'messageIdentifier': self.defaults['messageIdentifier'],
}
self.assertEqual(response.status_code, 200)
self.assertDictEqual(expected_response, real_response)
def test_grade_past_due(self):
"""
Should fail if we do not accept past due grades, and it is past due.
"""
self.xmodule.accept_grades_past_due = False
self.xmodule.due = datetime.datetime.now(UTC())
self.xmodule.graceperiod = Timedelta().from_json("0 seconds")
request = Request(self.environ)
request.body = self.get_request_body()
response = self.xmodule.grade_handler(request, '')
real_response = self.get_response_values(response)
expected_response = {
'action': None,
'code_major': 'failure',
'description': 'Grade is past due',
'messageIdentifier': 'unknown',
}
self.assertEqual(response.status_code, 200)
self.assertEqual(expected_response, real_response)
def test_grade_not_in_range(self):
"""
Grade returned from Tool Provider is outside the range 0.0-1.0.
"""
self.xmodule.verify_oauth_body_sign = Mock()
request = Request(self.environ)
request.body = self.get_request_body(params={'grade': '10'})
response = self.xmodule.grade_handler(request, '')
real_response = self.get_response_values(response)
expected_response = {
'action': None,
'code_major': 'failure',
'description': 'Request body XML parsing error: score value outside the permitted range of 0-1.',
'messageIdentifier': 'unknown',
}
self.assertEqual(response.status_code, 200)
self.assertDictEqual(expected_response, real_response)
def test_bad_grade_decimal(self):
"""
Grade returned from Tool Provider doesn't use a period as the decimal point.
"""
self.xmodule.verify_oauth_body_sign = Mock()
request = Request(self.environ)
request.body = self.get_request_body(params={'grade': '0,5'})
response = self.xmodule.grade_handler(request, '')
real_response = self.get_response_values(response)
expected_response = {
'action': None,
'code_major': 'failure',
'description': 'Request body XML parsing error: invalid literal for float(): 0,5',
'messageIdentifier': 'unknown',
}
self.assertEqual(response.status_code, 200)
self.assertDictEqual(expected_response, real_response)
def test_unsupported_action(self):
"""
Action returned from Tool Provider isn't supported.
`replaceResultRequest` is supported only.
"""
self.xmodule.verify_oauth_body_sign = Mock()
request = Request(self.environ)
request.body = self.get_request_body({'action': 'wrongAction'})
response = self.xmodule.grade_handler(request, '')
real_response = self.get_response_values(response)
expected_response = {
'action': None,
'code_major': 'unsupported',
'description': 'Target does not support the requested operation.',
'messageIdentifier': self.defaults['messageIdentifier'],
}
self.assertEqual(response.status_code, 200)
self.assertDictEqual(expected_response, real_response)
def test_good_request(self):
"""
Response from Tool Provider is correct.
"""
self.xmodule.verify_oauth_body_sign = Mock()
self.xmodule.has_score = True
request = Request(self.environ)
request.body = self.get_request_body()
response = self.xmodule.grade_handler(request, '')
description_expected = 'Score for {sourcedId} is now {score}'.format(
sourcedId=self.defaults['sourcedId'],
score=self.defaults['grade'],
)
real_response = self.get_response_values(response)
expected_response = {
'action': 'replaceResultResponse',
'code_major': 'success',
'description': description_expected,
'messageIdentifier': self.defaults['messageIdentifier'],
}
self.assertEqual(response.status_code, 200)
self.assertDictEqual(expected_response, real_response)
self.assertEqual(self.xmodule.module_score, float(self.defaults['grade']))
def test_user_id(self):
expected_user_id = unicode(urllib.quote(self.xmodule.runtime.anonymous_student_id))
real_user_id = self.xmodule.get_user_id()
self.assertEqual(real_user_id, expected_user_id)
def test_outcome_service_url(self):
mock_url_prefix = 'https://hostname/'
test_service_name = "test_service"
def mock_handler_url(block, handler_name, **kwargs): # pylint: disable=unused-argument
"""Mock function for returning fully-qualified handler urls"""
return mock_url_prefix + handler_name
self.xmodule.runtime.handler_url = Mock(side_effect=mock_handler_url)
real_outcome_service_url = self.xmodule.get_outcome_service_url(service_name=test_service_name)
self.assertEqual(real_outcome_service_url, mock_url_prefix + test_service_name)
def test_resource_link_id(self):
with patch('xmodule.lti_module.LTIModule.location', new_callable=PropertyMock):
self.xmodule.location.html_id = lambda: 'i4x-2-3-lti-31de800015cf4afb973356dbe81496df'
expected_resource_link_id = unicode(urllib.quote(self.unquoted_resource_link_id))
real_resource_link_id = self.xmodule.get_resource_link_id()
self.assertEqual(real_resource_link_id, expected_resource_link_id)
def test_lis_result_sourcedid(self):
expected_sourced_id = u':'.join(urllib.quote(i) for i in (
self.system.course_id.to_deprecated_string(),
self.xmodule.get_resource_link_id(),
self.user_id
))
real_lis_result_sourcedid = self.xmodule.get_lis_result_sourcedid()
self.assertEqual(real_lis_result_sourcedid, expected_sourced_id)
def test_client_key_secret(self):
"""
LTI module gets client key and secret provided.
"""
#this adds lti passports to system
mocked_course = Mock(lti_passports=['lti_id:test_client:test_secret'])
modulestore = Mock()
modulestore.get_course.return_value = mocked_course
runtime = Mock(modulestore=modulestore)
self.xmodule.descriptor.runtime = runtime
self.xmodule.lti_id = "lti_id"
key, secret = self.xmodule.get_client_key_secret()
expected = ('test_client', 'test_secret')
self.assertEqual(expected, (key, secret))
def test_client_key_secret_not_provided(self):
"""
LTI module attempts to get client key and secret provided in cms.
There are key and secret but not for specific LTI.
"""
# this adds lti passports to system
mocked_course = Mock(lti_passports=['test_id:test_client:test_secret'])
modulestore = Mock()
modulestore.get_course.return_value = mocked_course
runtime = Mock(modulestore=modulestore)
self.xmodule.descriptor.runtime = runtime
# set another lti_id
self.xmodule.lti_id = "another_lti_id"
key_secret = self.xmodule.get_client_key_secret()
expected = ('', '')
self.assertEqual(expected, key_secret)
def test_bad_client_key_secret(self):
"""
LTI module attempts to get client key and secret provided in cms.
There are key and secret provided in wrong format.
"""
# this adds lti passports to system
mocked_course = Mock(lti_passports=['test_id_test_client_test_secret'])
modulestore = Mock()
modulestore.get_course.return_value = mocked_course
runtime = Mock(modulestore=modulestore)
self.xmodule.descriptor.runtime = runtime
self.xmodule.lti_id = 'lti_id'
with self.assertRaises(LTIError):
self.xmodule.get_client_key_secret()
@patch('xmodule.lti_module.signature.verify_hmac_sha1', Mock(return_value=True))
@patch(
'xmodule.lti_module.LTIModule.get_client_key_secret',
Mock(return_value=('test_client_key', u'test_client_secret'))
)
def test_successful_verify_oauth_body_sign(self):
"""
Test if OAuth signing was successful.
"""
self.xmodule.verify_oauth_body_sign(self.get_signed_grade_mock_request())
@patch('xmodule.lti_module.LTIModule.get_outcome_service_url', Mock(return_value=u'https://testurl/'))
@patch('xmodule.lti_module.LTIModule.get_client_key_secret',
Mock(return_value=(u'__consumer_key__', u'__lti_secret__')))
def test_failed_verify_oauth_body_sign_proxy_mangle_url(self):
"""
Oauth signing verify fail.
"""
request = self.get_signed_grade_mock_request_with_correct_signature()
self.xmodule.verify_oauth_body_sign(request)
# we should verify against get_outcome_service_url not
# request url proxy and load balancer along the way may
# change url presented to the method
request.url = 'http://testurl/'
self.xmodule.verify_oauth_body_sign(request)
def get_signed_grade_mock_request_with_correct_signature(self):
"""
Generate a proper LTI request object
"""
mock_request = Mock()
mock_request.headers = {
'X-Requested-With': 'XMLHttpRequest',
'Content-Type': 'application/x-www-form-urlencoded',
'Authorization': (
u'OAuth realm="https://testurl/", oauth_body_hash="wwzA3s8gScKD1VpJ7jMt9b%2BMj9Q%3D",'
'oauth_nonce="18821463", oauth_timestamp="1409321145", '
'oauth_consumer_key="__consumer_key__", oauth_signature_method="HMAC-SHA1", '
'oauth_version="1.0", oauth_signature="fHsE1hhIz76/msUoMR3Lyb7Aou4%3D"'
)
}
mock_request.url = u'https://testurl'
mock_request.http_method = u'POST'
mock_request.method = mock_request.http_method
mock_request.body = (
'<?xml version=\'1.0\' encoding=\'utf-8\'?>\n'
'<imsx_POXEnvelopeRequest xmlns="http://www.imsglobal.org/services/ltiv1p1/xsd/imsoms_v1p0">'
'<imsx_POXHeader><imsx_POXRequestHeaderInfo><imsx_version>V1.0</imsx_version>'
'<imsx_messageIdentifier>edX_fix</imsx_messageIdentifier></imsx_POXRequestHeaderInfo>'
'</imsx_POXHeader><imsx_POXBody><replaceResultRequest><resultRecord><sourcedGUID>'
'<sourcedId>MITxLTI/MITxLTI/201x:localhost%3A8000-i4x-MITxLTI-MITxLTI-lti-3751833a214a4f66a0d18f63234207f2:363979ef768ca171b50f9d1bfb322131</sourcedId>' # pylint: disable=line-too-long
'</sourcedGUID><result><resultScore><language>en</language><textString>0.32</textString></resultScore>'
'</result></resultRecord></replaceResultRequest></imsx_POXBody></imsx_POXEnvelopeRequest>'
)
return mock_request
def test_wrong_xml_namespace(self):
"""
Test wrong XML Namespace.
Tests that tool provider returned grade back with wrong XML Namespace.
"""
with self.assertRaises(IndexError):
mocked_request = self.get_signed_grade_mock_request(namespace_lti_v1p1=False)
self.xmodule.parse_grade_xml_body(mocked_request.body)
def test_parse_grade_xml_body(self):
"""
Test XML request body parsing.
Tests that xml body was parsed successfully.
"""
mocked_request = self.get_signed_grade_mock_request()
message_identifier, sourced_id, grade, action = self.xmodule.parse_grade_xml_body(mocked_request.body)
self.assertEqual(self.defaults['messageIdentifier'], message_identifier)
self.assertEqual(self.defaults['sourcedId'], sourced_id)
self.assertEqual(self.defaults['grade'], grade)
self.assertEqual(self.defaults['action'], action)
@patch('xmodule.lti_module.signature.verify_hmac_sha1', Mock(return_value=False))
@patch(
'xmodule.lti_module.LTIModule.get_client_key_secret',
Mock(return_value=('test_client_key', u'test_client_secret'))
)
def test_failed_verify_oauth_body_sign(self):
"""
Oauth signing verify fail.
"""
with self.assertRaises(LTIError):
req = self.get_signed_grade_mock_request()
self.xmodule.verify_oauth_body_sign(req)
def get_signed_grade_mock_request(self, namespace_lti_v1p1=True):
"""
Example of signed request from LTI Provider.
When `namespace_v1p0` is set to True then the default namespase from
LTI 1.1 will be used. Otherwise fake namespace will be added to XML.
"""
mock_request = Mock()
mock_request.headers = {
'X-Requested-With': 'XMLHttpRequest',
'Content-Type': 'application/x-www-form-urlencoded',
'Authorization': u'OAuth oauth_nonce="135685044251684026041377608307", \
oauth_timestamp="1234567890", oauth_version="1.0", \
oauth_signature_method="HMAC-SHA1", \
oauth_consumer_key="test_client_key", \
oauth_signature="my_signature%3D", \
oauth_body_hash="JEpIArlNCeV4ceXxric8gJQCnBw="'
}
mock_request.url = u'http://testurl'
mock_request.http_method = u'POST'
params = {}
if not namespace_lti_v1p1:
params = {
'namespace': "http://www.fakenamespace.com/fake"
}
mock_request.body = self.get_request_body(params)
return mock_request
def test_good_custom_params(self):
"""
Custom parameters are presented in right format.
"""
self.xmodule.custom_parameters = ['test_custom_params=test_custom_param_value']
self.xmodule.get_client_key_secret = Mock(return_value=('test_client_key', 'test_client_secret'))
self.xmodule.oauth_params = Mock()
self.xmodule.get_input_fields()
self.xmodule.oauth_params.assert_called_with(
{u'custom_test_custom_params': u'test_custom_param_value'},
'test_client_key', 'test_client_secret'
)
def test_bad_custom_params(self):
"""
Custom parameters are presented in wrong format.
"""
bad_custom_params = ['test_custom_params: test_custom_param_value']
self.xmodule.custom_parameters = bad_custom_params
self.xmodule.get_client_key_secret = Mock(return_value=('test_client_key', 'test_client_secret'))
self.xmodule.oauth_params = Mock()
with self.assertRaises(LTIError):
self.xmodule.get_input_fields()
def test_max_score(self):
self.xmodule.weight = 100.0
self.assertFalse(self.xmodule.has_score)
self.assertEqual(self.xmodule.max_score(), None)
self.xmodule.has_score = True
self.assertEqual(self.xmodule.max_score(), 100.0)
def test_context_id(self):
"""
Tests that LTI parameter context_id is equal to course_id.
"""
self.assertEqual(self.system.course_id.to_deprecated_string(), self.xmodule.context_id)
| agpl-3.0 |
koomik/CouchPotatoServer | libs/requests/packages/charade/constants.py | 3008 | 1335 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
_debug = 0
eDetecting = 0
eFoundIt = 1
eNotMe = 2
eStart = 0
eError = 1
eItsMe = 2
SHORTCUT_THRESHOLD = 0.95
| gpl-3.0 |
KSanthanam/rethinkdb | external/v8_3.30.33.16/build/gyp/test/msvs/list_excluded/gyptest-all.py | 347 | 1292 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that msvs_list_excluded_files=0 doesn't list files that would
normally be in _excluded_files, and that if that flag is not set, then they
are still listed.
"""
import os
import TestGyp
test = TestGyp.TestGyp(formats=['msvs'], workdir='workarea_all')
# with the flag set to 0
try:
os.environ['GYP_GENERATOR_FLAGS'] = 'msvs_list_excluded_files=0'
test.run_gyp('hello_exclude.gyp')
finally:
del os.environ['GYP_GENERATOR_FLAGS']
if test.uses_msbuild:
test.must_not_contain('hello.vcxproj', 'hello_mac')
else:
test.must_not_contain('hello.vcproj', 'hello_mac')
# with the flag not set
test.run_gyp('hello_exclude.gyp')
if test.uses_msbuild:
test.must_contain('hello.vcxproj', 'hello_mac')
else:
test.must_contain('hello.vcproj', 'hello_mac')
# with the flag explicitly set to 1
try:
os.environ['GYP_GENERATOR_FLAGS'] = 'msvs_list_excluded_files=1'
test.run_gyp('hello_exclude.gyp')
finally:
del os.environ['GYP_GENERATOR_FLAGS']
if test.uses_msbuild:
test.must_contain('hello.vcxproj', 'hello_mac')
else:
test.must_contain('hello.vcproj', 'hello_mac')
test.pass_test()
| agpl-3.0 |
rafalcieslak/mimiker | launcher/outputs.py | 3 | 2037 | from .common import *
class ServerOutput(Launchable):
# This is a no-op class.
def __init__(self):
Launchable.__init__(self, 'server')
def probe(self):
return True
def configure(self, **kwargs):
pass
class XTermOutput(Launchable):
def __init__(self):
Launchable.__init__(self, 'xterm')
def probe(self):
self.cmd = shutil.which('xterm')
return self.cmd is not None
def configure(self, **kwargs):
port = kwargs['uart_port']
# The simulator will only open the server after some time has
# passed. OVPsim needs as much as 1 second. To minimize the delay, keep
# reconnecting until success.
self.options = ['-title', 'UART2', '-e',
'socat STDIO tcp:localhost:%d,retry,forever' % port]
class UrxvtOutput(Launchable):
def __init__(self):
Launchable.__init__(self, 'urxvt')
def probe(self):
self.cmd = shutil.which('urxvt')
return self.cmd is not None
def configure(self, **kwargs):
port = kwargs['uart_port']
# The simulator will only open the server after some time has
# passed. OVPsim needs as much as 1 second. To minimize the delay, keep
# reconnecting until success.
self.options = ['-title', 'UART2', '-e', 'sh', '-c',
'socat STDIO tcp:localhost:%d,retry,forever' % port]
class StdIOOutput(Launchable):
def __init__(self):
Launchable.__init__(self, 'stdio')
def probe(self):
return True
def configure(self, **kwargs):
port = kwargs['uart_port']
# The simulator will only open the server after some time has
# passed. OVPsim needs as much as 1 second. To minimize the delay, keep
# reconnecting until success.
self.cmd = 'socat'
self.options = ['STDIO', 'tcp:localhost:%d,retry,forever' % port]
OUTPUTS = [UrxvtOutput(),
XTermOutput(),
StdIOOutput(),
ServerOutput()]
| bsd-3-clause |
bmazin/ARCONS-pipeline | fluxcal/fluxCal.py | 1 | 29931 | #!/bin/python
'''
fluxCal.py
Created by Seth Meeker on 11-21-2012
Modified on 02-16-2015 to perform absolute fluxCal with point sources
Opens ARCONS observation of a spectrophotometric standard star and
associated wavelength cal file, reads in all photons and converts to energies.
Bins photons to generate a spectrum, then divides this into the known spectrum
of the object to create a Sensitivity curve. This curve is then written out to
h5 file.
Flags are associated with each pixel - see headers/pipelineFlags
for descriptions. Note some flags are set here, others are set
later on when creating photon lists.
'''
import sys,os
import tables
import numpy as np
from scipy import interpolate
from scipy.optimize.minpack import curve_fit
import matplotlib.pyplot as plt
from photometry import LightCurve
from util.FileName import FileName
from util.ObsFile import ObsFile
from util import MKIDStd
from util.readDict import readDict
from util.utils import rebin
from util.utils import gaussianConvolution
from util.utils import makeMovie
from util.utils import fitBlackbody
import hotpix.hotPixels as hp
from scipy.optimize.minpack import curve_fit
from scipy import interpolate
import matplotlib
from matplotlib.backends.backend_pdf import PdfPages
from headers import pipelineFlags
import figureHeader
class FluxCal:
def __init__(self,paramFile,plots=False,verbose=False):
"""
Opens flux file, prepares standard spectrum, and calculates flux factors for the file.
Method is provided in param file. If 'relative' is selected, an obs file with standard star defocused over
the entire array is expected, with accompanying sky file to do sky subtraction.
If any other method is provided, 'absolute' will be done by default, wherein a point source is assumed
to be present. The obs file is then broken into spectral frames with photometry (psf or aper) performed
on each frame to generate the ARCONS observed spectrum.
"""
self.verbose=verbose
self.plots = plots
self.params = readDict()
self.params.read_from_file(paramFile)
run = self.params['run']
sunsetDate = self.params['fluxSunsetLocalDate']
self.fluxTstamp = self.params['fluxTimestamp']
skyTstamp = self.params['skyTimestamp']
wvlSunsetDate = self.params['wvlCalSunsetLocalDate']
wvlTimestamp = self.params['wvlCalTimestamp']
flatCalFileName = self.params['flatCalFileName']
needTimeAdjust = self.params['needTimeAdjust']
self.deadtime = float(self.params['deadtime']) #from firmware pulse detection
self.timeSpacingCut = self.params['timeSpacingCut']
bLoadBeammap = self.params.get('bLoadBeammap',False)
self.method = self.params['method']
self.objectName = self.params['object']
self.r = float(self.params['energyResolution'])
self.photometry = self.params['photometry']
self.centroidRow = self.params['centroidRow']
self.centroidCol = self.params['centroidCol']
self.aperture = self.params['apertureRad']
self.annulusInner = self.params['annulusInner']
self.annulusOuter = self.params['annulusOuter']
self.collectingArea = self.params['collectingArea']
self.startTime = self.params['startTime']
self.intTime = self.params['integrationTime']
fluxFN = FileName(run=run,date=sunsetDate,tstamp=self.fluxTstamp)
self.fluxFileName = fluxFN.obs()
self.fluxFile = ObsFile(self.fluxFileName)
if self.plots:
self.plotSavePath = os.environ['MKID_PROC_PATH']+os.sep+'fluxCalSolnFiles'+os.sep+run+os.sep+sunsetDate+os.sep+'plots'+os.sep
if not os.path.exists(self.plotSavePath): os.mkdir(self.plotSavePath)
if self.verbose: print "Created directory %s"%self.plotSavePath
obsFNs = [fluxFN]
self.obsList = [self.fluxFile]
if self.startTime in ['',None]: self.startTime=0
if self.intTime in ['',None]: self.intTime=-1
if self.method=="relative":
try:
print "performing Relative Flux Calibration"
skyFN = FileName(run=run,date=sunsetDate,tstamp=skyTstamp)
self.skyFileName = skyFN.obs()
self.skyFile = ObsFile(self.skyFileName)
obsFNs.append(skyFN)
self.obsList.append(self.skyFile)
except:
print "For relative flux calibration a sky file must be provided in param file"
self.__del__()
else:
self.method='absolute'
print "performing Absolute Flux Calibration"
if self.photometry not in ['aperture','PSF']: self.photometry='PSF' #default to PSF fitting if no valid photometry selected
timeMaskFileNames = [fn.timeMask() for fn in obsFNs]
timeAdjustFileName = FileName(run=run).timeAdjustments()
#make filename for output fluxCalSoln file
self.fluxCalFileName = FileName(run=run,date=sunsetDate,tstamp=self.fluxTstamp).fluxSoln()
print "Creating flux cal: %s"%self.fluxCalFileName
if wvlSunsetDate != '':
wvlCalFileName = FileName(run=run,date=wvlSunsetDate,tstamp=wvlTimestamp).calSoln()
if flatCalFileName =='':
flatCalFileName=FileName(obsFile=self.fluxFile).flatSoln()
#load cal files for flux file and, if necessary, sky file
for iObs,obs in enumerate(self.obsList):
if bLoadBeammap:
print 'loading beammap',os.environ['MKID_BEAMMAP_PATH']
obs.loadBeammapFile(os.environ['MKID_BEAMMAP_PATH'])
if wvlSunsetDate != '':
obs.loadWvlCalFile(wvlCalFileName)
else:
obs.loadBestWvlCalFile()
obs.loadFlatCalFile(flatCalFileName)
obs.setWvlCutoffs(-1,-1)
if needTimeAdjust:
obs.loadTimeAdjustmentFile(timeAdjustFileName)
timeMaskFileName = timeMaskFileNames[iObs]
print timeMaskFileName
if not os.path.exists(timeMaskFileName):
print 'Running hotpix for ',obs
hp.findHotPixels(obsFile=obs,outputFileName=timeMaskFileName,fwhm=np.inf,useLocalStdDev=True)
print "Flux cal/sky file pixel mask saved to %s"%(timeMaskFileName)
obs.loadHotPixCalFile(timeMaskFileName)
if self.verbose: print "Loaded hot pixel file %s"%timeMaskFileName
#get flat cal binning information since flux cal will need to match it
self.wvlBinEdges = self.fluxFile.flatCalFile.root.flatcal.wavelengthBins.read()
self.nWvlBins = self.fluxFile.flatWeights.shape[2]
self.binWidths = np.empty((self.nWvlBins),dtype=float)
self.binCenters = np.empty((self.nWvlBins),dtype=float)
for i in xrange(self.nWvlBins):
self.binWidths[i] = self.wvlBinEdges[i+1]-self.wvlBinEdges[i]
self.binCenters[i] = (self.wvlBinEdges[i]+(self.binWidths[i]/2.0))
if self.method=='relative':
print "Extracting ARCONS flux and sky spectra"
self.loadRelativeSpectrum()
print "Flux Spectrum loaded"
self.loadSkySpectrum()
print "Sky Spectrum loaded"
elif self.method=='absolute':
print "Extracting ARCONS point source spectrum"
self.loadAbsoluteSpectrum()
print "Loading standard spectrum"
try:
self.loadStdSpectrum(self.objectName)
except KeyError:
print "Invalid spectrum object name"
self.__del__()
sys.exit()
print "Generating sensitivity curve"
self.calculateFactors()
print "Sensitivity Curve calculated"
print "Writing fluxCal to file %s"%self.fluxCalFileName
self.writeFactors(self.fluxCalFileName)
if self.plots: self.makePlots()
print "Done"
def __del__(self):
try:
self.fluxFile.close()
self.calFile.close()
except AttributeError:#fluxFile was never defined
pass
def getDeadTimeCorrection(self, obs): #WRONG RIGHT NOW. NEEDS TO HAVE RAW COUNTS SUMMED, NOT CUBE WHICH EXCLUDES NOISE TAIL
if self.verbose: print "Making raw cube to get dead time correction"
cubeDict = obs.getSpectralCube(firstSec=self.startTime, integrationTime=self.intTime, weighted=False, fluxWeighted=False)
cube= np.array(cubeDict['cube'], dtype=np.double)
wvlBinEdges= cubeDict['wvlBinEdges']
effIntTime= cubeDict['effIntTime']
if self.verbose: print "median effective integration time = ", np.median(effIntTime)
nWvlBins=len(wvlBinEdges)-1
if self.verbose: print "cube shape ", np.shape(cube)
if self.verbose: print "effIntTime shape ", np.shape(effIntTime)
#add third dimension to effIntTime for broadcasting
effIntTime = np.reshape(effIntTime,np.shape(effIntTime)+(1,))
#put cube into counts/s in each pixel
cube /= effIntTime
#CALCULATE DEADTIME CORRECTION
#NEED TOTAL COUNTS PER SECOND FOR EACH PIXEL TO DO PROPERLY
#ASSUMES SAME CORRECTION FACTOR APPLIED FOR EACH WAVELENGTH, MEANING NO WL DEPENDANCE ON DEAD TIME EFFECT
DTCorr = np.zeros((np.shape(cube)[0],np.shape(cube)[1]),dtype=float)
for f in range(0,np.shape(cube)[2]):
#if self.verbose: print cube[:,:,f]
#if self.verbose: print '-----------------------'
DTCorr += cube[:,:,f]
#if self.verbose: print DTCorr
#if self.verbose: print '\n=====================\n'
#Correct for firmware dead time (100us in 2012 ARCONS firmware)
DTCorrNew=DTCorr/(1-DTCorr*self.deadtime)
CorrFactors = DTCorrNew/DTCorr #This is what the frames need to be multiplied by to get their true values
if self.verbose: print "Dead time correction factors: ", CorrFactors
#add third dimension to CorrFactors for broadcasting
CorrFactors = np.reshape(CorrFactors,np.shape(CorrFactors)+(1,))
return CorrFactors
def loadAbsoluteSpectrum(self):
'''
extract the ARCONS measured spectrum of the spectrophotometric standard by breaking data into spectral cube
and performing photometry (aper or psf) on each spectral frame
'''
if self.verbose:print "Making spectral cube"
cubeDict = self.fluxFile.getSpectralCube(firstSec=self.startTime, integrationTime=self.intTime, weighted=True, fluxWeighted=False)
cube= np.array(cubeDict['cube'], dtype=np.double)
effIntTime= cubeDict['effIntTime']
if self.verbose: print "median effective integration time in flux file cube = ", np.median(effIntTime)
if self.verbose: print "cube shape ", np.shape(cube)
if self.verbose: print "effIntTime shape ", np.shape(effIntTime)
#add third dimension to effIntTime for broadcasting
effIntTime = np.reshape(effIntTime,np.shape(effIntTime)+(1,))
#put cube into counts/s in each pixel
cube /= effIntTime
#get dead time correction factors
DTCorr = self.getDeadTimeCorrection(self.fluxFile)
cube*=DTCorr #cube now in units of counts/s and corrected for dead time
if self.plots and not 'figureHeader' in sys.modules:
if self.verbose: print "Saving spectral frames as movie..."
movieCube = np.zeros((self.nWvlBins,np.shape(cube)[0],np.shape(cube)[1]),dtype=float)
for i in xrange(self.nWvlBins):
movieCube[i,:,:] = cube[:,:,i]
makeMovie(movieCube,frameTitles=self.binCenters,cbar=True,outName=self.plotSavePath+'FluxCal_Cube_%s.gif'%(self.objectName), normMin=0, normMax=50)
if self.verbose: print "Movie saved in %s"%self.plotSavePath
LCplot=False #light curve pop-ups not compatible with FLuxCal plotting 2/18/15
#if self.photometry=='PSF': LCplot = False
LC = LightCurve.LightCurve(verbose=self.verbose, showPlot=LCplot)
self.fluxSpectrum=np.empty((self.nWvlBins),dtype=float)
self.skySpectrum=np.zeros((self.nWvlBins),dtype=float)
for i in xrange(self.nWvlBins):
frame = cube[:,:,i]
if self.verbose: print "%s photometry on frame %i of cube, central wvl = %f Angstroms"%(self.photometry,i,self.binCenters[i])
if self.photometry == 'aperture':
fDict = LC.performPhotometry(self.photometry,frame,[[self.centroidCol,self.centroidRow]],expTime=None,aper_radius = self.aperture, annulus_inner = self.annulusInner, annulus_outer = self.annulusOuter, interpolation="linear")
self.fluxSpectrum[i] = fDict['flux']
self.skySpectrum[i] = fDict['skyFlux']
print "Sky estimate = ", fDict['skyFlux']
else:
fDict = LC.performPhotometry(self.photometry,frame,[[self.centroidCol,self.centroidRow]],expTime=None,aper_radius = self.aperture)
self.fluxSpectrum[i] = fDict['flux']
self.fluxSpectrum=self.fluxSpectrum/self.binWidths/self.collectingArea #spectrum now in counts/s/Angs/cm^2
self.skySpectrum=self.skySpectrum/self.binWidths/self.collectingArea
return self.fluxSpectrum, self.skySpectrum
def loadRelativeSpectrum(self):
self.fluxSpectra = [[[] for i in xrange(self.nCol)] for j in xrange(self.nRow)]
self.fluxEffTime = [[[] for i in xrange(self.nCol)] for j in xrange(self.nRow)]
for iRow in xrange(self.nRow):
for iCol in xrange(self.nCol):
count = self.fluxFile.getPixelCount(iRow,iCol)
fluxDict = self.fluxFile.getPixelSpectrum(iRow,iCol,weighted=True,firstSec=0,integrationTime=-1)
self.fluxSpectra[iRow][iCol],self.fluxEffTime[iRow][iCol] = fluxDict['spectrum'],fluxDict['effIntTime']
self.fluxSpectra = np.array(self.fluxSpectra)
self.fluxEffTime = np.array(self.fluxEffTime)
DTCorr = self.getDeadTimeCorrection(self.fluxFile)
#print "Bin widths = ",self.binWidths
self.fluxSpectra = self.fluxSpectra/self.binWidths/self.fluxEffTime*DTCorr
self.fluxSpectrum = self.calculateMedian(self.fluxSpectra) #find median of subtracted spectra across whole array
return self.fluxSpectrum
def loadSkySpectrum(self):
self.skySpectra = [[[] for i in xrange(self.nCol)] for j in xrange(self.nRow)]
self.skyEffTime = [[[] for i in xrange(self.nCol)] for j in xrange(self.nRow)]
for iRow in xrange(self.nRow):
for iCol in xrange(self.nCol):
count = self.skyFile.getPixelCount(iRow,iCol)
skyDict = self.skyFile.getPixelSpectrum(iRow,iCol,weighted=True,firstSec=0,integrationTime=-1)
self.skySpectra[iRow][iCol],self.skyEffTime[iRow][iCol] = skyDict['spectrum'],skyDict['effIntTime']
self.skySpectra = np.array(self.skySpectra)
self.skyEffTime = np.array(self.skyEffTime)
DTCorr = self.getDeadTimeCorrection(self.skyFile)
self.skySpectra = self.skySpectra/self.binWidths/self.skyEffTime*DTCorr
self.skySpectrum = self.calculateMedian(self.skySpectra) #find median of subtracted spectra across whole array
return self.skySpectrum
def loadStdSpectrum(self, objectName="G158-100"):
#import the known spectrum of the calibrator and rebin to the histogram parameters given
#must be imported into array with dtype float so division later does not have error
std = MKIDStd.MKIDStd()
a = std.load(objectName)
a = std.countsToErgs(a) #convert std spectrum to ergs/s/Angs/cm^2 for BB fitting and cleaning
self.stdWvls = np.array(a[:,0])
self.stdFlux = np.array(a[:,1]) #std object spectrum in ergs/s/Angs/cm^2
if self.plots:
#create figure for plotting standard spectrum modifications
self.stdFig = plt.figure()
self.stdAx = self.stdFig.add_subplot(111)
plt.xlim(3500,12000)
plt.plot(self.stdWvls,self.stdFlux*1E15,linewidth=1,color='grey',alpha=0.75)
convX_rev,convY_rev = self.cleanSpectrum(self.stdWvls,self.stdFlux)
convX = convX_rev[::-1] #convolved spectrum comes back sorted backwards, from long wvls to low which screws up rebinning
convY = convY_rev[::-1]
#rebin cleaned spectrum to flat cal's wvlBinEdges
newa = rebin(convX,convY,self.wvlBinEdges)
rebinnedWvl = np.array(newa[:,0])
rebinnedFlux = np.array(newa[:,1])
if self.plots:
#plot final resampled spectrum
plt.plot(convX,convY*1E15,color='blue')
plt.step(rebinnedWvl,rebinnedFlux*1E15,color = 'black',where='mid')
plt.legend(['%s Spectrum'%self.objectName,'Blackbody Fit','Gaussian Convolved Spectrum','Rebinned Spectrum'],'upper right', numpoints=1)
plt.xlabel(ur"Wavelength (\r{A})")
plt.ylabel(ur"Flux (10$^{-15}$ ergs s$^{-1}$ cm$^{-2}$ \r{A}$^{-1}$)")
plt.ylim(0.9*min(rebinnedFlux)*1E15, 1.1*max(rebinnedFlux)*1E15)
plt.savefig(self.plotSavePath+'FluxCal_StdSpectrum_%s.eps'%self.objectName,format='eps')
#convert standard spectrum back into counts/s/angstrom/cm^2
newa = std.ergsToCounts(newa)
self.binnedSpectrum = np.array(newa[:,1])
def cleanSpectrum(self,x,y):
##=============== BB Fit to extend spectrum beyond 11000 Angstroms ==================
fraction = 1.0/3.0
nirX = np.arange(int(x[(1.0-fraction)*len(x)]),20000)
T, nirY = fitBlackbody(x,y,fraction=fraction,newWvls=nirX,tempGuess=5600)
if self.plots: plt.plot(nirX,nirY*1E15,linestyle='--',linewidth=2, color="black",alpha=0.5)
extendedWvl = np.concatenate((x,nirX[nirX>max(x)]))
extendedFlux = np.concatenate((y,nirY[nirX>max(x)]))
##======= Gaussian convolution to smooth std spectrum to MKIDs median resolution ========
newX, newY = gaussianConvolution(extendedWvl,extendedFlux,xEnMin=0.005,xEnMax=6.0,xdE=0.001,fluxUnits = "lambda",r=self.r,plots=False)
return newX, newY
def calculateFactors(self):
"""
Calculate the sensitivity spectrum: the weighting factors that correct the flat calibrated spectra to the real spectra
For relative calibration:
First subtract sky spectrum from ARCONS observed spectrum. Then take median of this spectrum as it should be identical
across the array, assuming the flat cal has done its job. Then divide this into the known spectrum of the object.
For absolute calibration:
self.fluxSpectra already has sky subtraction included. Simply divide this spectrum into the known standard spectrum.
"""
self.subtractedSpectrum = self.fluxSpectrum - self.skySpectrum
self.subtractedSpectrum = np.array(self.subtractedSpectrum,dtype=float) #cast as floats so division does not fail later
if self.method=='relative':
normWvl = 5500 #Angstroms. Choose an arbitrary wvl to normalize the relative correction at
ind = np.where(self.wvlBinEdges >= normWvl)[0][0]-1
self.subtractedSpectrum = self.subtractedSpectrum/(self.subtractedSpectrum[ind]) #normalize
self.binnedSpectrum = self.binnedSpectrum/(self.binnedSpectrum[ind]) #normalize treated Std spectrum while we are at it
#Calculate FluxCal factors
self.fluxFactors = self.binnedSpectrum/self.subtractedSpectrum
#self.fluxFlags = np.zeros(np.shape(self.fluxFactors),dtype='int')
self.fluxFlags = np.empty(np.shape(self.fluxFactors),dtype='int')
self.fluxFlags.fill(pipelineFlags.fluxCal['good']) #Initialise flag array filled with 'good' flags. JvE 5/1/2013.
#set factors that will cause trouble to 1
#self.fluxFlags[self.fluxFactors == np.inf] = 1
self.fluxFlags[self.fluxFactors == np.inf] = pipelineFlags.fluxCal['infWeight'] #Modified to use flag dictionary - JvE 5/1/2013
self.fluxFactors[self.fluxFactors == np.inf]=1.0
self.fluxFlags[np.isnan(self.fluxFactors)] = pipelineFlags.fluxCal['nanWeight'] #Modified to use flag dictionary - JvE 5/1/2013
self.fluxFactors[np.isnan(self.fluxFactors)]=1.0
self.fluxFlags[self.fluxFactors <= 0]=pipelineFlags.fluxCal['LEzeroWeight'] #Modified to use flag dictionary - JvE 5/1/2013
self.fluxFactors[self.fluxFactors <= 0]=1.0
def calculateMedian(self, spectra):
spectra2d = np.reshape(spectra,[self.nRow*self.nCol,self.nWvlBins])
wvlMedian = np.empty(self.nWvlBins,dtype=float)
for iWvl in xrange(self.nWvlBins):
spectrum = spectra2d[:,iWvl]
goodSpectrum = spectrum[spectrum != 0]#dead pixels need to be taken out before calculating medians
wvlMedian[iWvl] = np.median(goodSpectrum)
return wvlMedian
def makePlots(self):
"""
Output all debugging plots of ARCONS sky and object spectra, known calibrator spectrum, and sensitivity curve
"""
scratchDir = os.getenv('MKID_PROC_PATH')
fluxDir = self.plotSavePath
fluxCalBase = 'FluxCal_%s'%self.objectName
plotFileName = fluxCalBase+".pdf"
fullFluxPlotFileName = os.path.join(fluxDir,plotFileName)
#uncomment to make some plots for the paper. Proper formatting Will also require figureheader to be imported and for movie making to be turned off
self.paperFig = plt.figure()
self.paperAx = self.paperFig.add_subplot(111)
plt.xlim(4000,11000)
plt.plot(self.binCenters,self.fluxFactors,linewidth=3,color='black')
plt.xlabel(ur"Wavelength (\r{A})")
plt.ylabel(ur"Spectral Calibration Curve")
plt.ylim(0,150)
plt.savefig(self.plotSavePath+'FluxCal_Sensitivity_%s.eps'%self.objectName,format='eps')
#save throughput as a .npz file that other code uses when making paper plots
np.savez(self.plotSavePath+'%s_%s_throughput.npz'%(self.objectName.strip(),self.fluxTstamp),throughput=1.0/self.fluxFactors,wvls=self.binCenters)
pp = PdfPages(fullFluxPlotFileName)
#plt.rcParams['font.size'] = 2
wvls = self.binCenters
plt.figure()
ax1 = plt.subplot(111)
ax1.set_title('ARCONS median flat cal\'d flux in counts')
plt.plot(wvls,self.fluxSpectrum)
pp.savefig()
plt.figure()
ax2 = plt.subplot(111)
ax2.set_title('ARCONS median flat cal\'d sky in counts')
plt.plot(wvls,self.skySpectrum)
pp.savefig()
plt.figure()
ax3 = plt.subplot(111)
ax3.set_title('Flux data minus sky in counts')
plt.plot(wvls,self.subtractedSpectrum)
pp.savefig()
plt.figure()
ax4 = plt.subplot(111)
ax4.set_title('Std Spectrum of %s'%(self.objectName))
plt.plot(self.stdWvls,self.stdFlux)
pp.savefig()
plt.figure()
ax5 = plt.subplot(111)
ax5.set_title('Binned Std Spectrum')
plt.plot(wvls,self.binnedSpectrum)
pp.savefig()
plt.figure()
ax6 = plt.subplot(111)
ax6.set_title('Median Sensitivity Spectrum')
ax6.set_xlim((3500,12000))
#ax6.set_ylim((0,5))
plt.plot(wvls,self.fluxFactors)
pp.savefig()
plt.figure()
ax7 = plt.subplot(111)
ax7.set_title('1/Sensitivity (Throughput)')
ax7.set_xlim((3500,12000))
ax7.set_ylim((0,.04))
plt.plot(wvls,1.0/self.fluxFactors)
pp.savefig()
plt.figure()
ax8 = plt.subplot(111)
ax8.set_title('Flux Cal\'d ARCONS Spectrum of Std')
plt.plot(wvls,self.fluxFactors*self.subtractedSpectrum)
pp.savefig()
pp.close()
print "Saved Flux Cal plots to %s"%(fullFluxPlotFileName)
def writeFactors(self,fluxCalFileName):
"""
Write flux cal weights to h5 file
"""
if os.path.isabs(fluxCalFileName) == True:
fullFluxCalFileName = fluxCalFileName
else:
scratchDir = os.getenv('MKID_PROC_PATH')
fluxDir = os.path.join(scratchDir,'fluxCalSolnFiles')
fullFluxCalFileName = os.path.join(fluxDir,fluxCalFileName)
try:
fluxCalFile = tables.openFile(fullFluxCalFileName,mode='w')
except:
print 'Error: Couldn\'t create flux cal file, ',fullFluxCalFileName
return
calgroup = fluxCalFile.createGroup(fluxCalFile.root,'fluxcal','Table of flux calibration weights by wavelength')
caltable = tables.Array(calgroup,'weights',object=self.fluxFactors,title='Flux calibration Weights indexed by wavelengthBin')
flagtable = tables.Array(calgroup,'flags',object=self.fluxFlags,title='Flux cal flags indexed by wavelengthBin. 0 is Good')
bintable = tables.Array(calgroup,'wavelengthBins',object=self.wvlBinEdges,title='Wavelength bin edges corresponding to third dimension of weights array')
fluxCalFile.flush()
fluxCalFile.close()
print "Finished Flux Cal, written to %s"%(fullFluxCalFileName)
def cleanSpectrum_old(self,x,y,objectName):
'''
function to take high resolution spectrum of standard star, extend IR coverage with
an exponential tail, then rebin down to ARCONS resolution. This function has since been
deprecated with the current cleanSpectrum which uses a BB fit to extend IR coverage,
and does the rebinning using a gaussian convolution. This is left in for reference.
'''
#locations and widths of absorption features in Angstroms
#features = [3890,3970,4099,4340,4860,6564,6883,7619]
#widths = [50,50,50,50,50,50,50,50]
#for i in xrange(len(features)):
# #check for absorption feature in std spectrum
# ind = np.where((x<(features[i]+15)) & (x>(features[i]-15)))[0]
# if len(ind)!=0:
# ind = ind[len(ind)/2]
# #if feature is found (flux is higher on both sides of the specified wavelength where the feature should be)
# if y[ind]<y[ind+1] and y[ind]<y[ind-1]:
# #cut out width[i] around feature[i]
# inds = np.where((x >= features[i]+widths[i]) | (x <= features[i]-widths[i]))
# x = x[inds]
# y = y[inds]
#fit a tail to the end of the spectrum to interpolate out to desired wavelength in angstroms
fraction = 3.0/4.0
newx = np.arange(int(x[fraction*len(x)]),20000)
slopeguess = (np.log(y[-1])-np.log(y[fraction*len(x)]))/(x[-1]-x[fraction*len(x)])
print "Guess at exponential slope is %f"%(slopeguess)
guess_a, guess_b, guess_c = float(y[fraction*len(x)]), x[fraction*len(x)], slopeguess
guess = [guess_a, guess_b, guess_c]
fitx = x[fraction*len(x):]
fity = y[fraction*len(x):]
exp_decay = lambda fx, A, x0, t: A * np.exp((fx-x0) * t)
params, cov = curve_fit(exp_decay, fitx, fity, p0=guess, maxfev=2000)
A, x0, t= params
print "A = %s\nx0 = %s\nt = %s\n"%(A, x0, t)
best_fit = lambda fx: A * np.exp((fx-x0)*t)
calcx = np.array(newx,dtype=float)
newy = best_fit(calcx)
#func = interpolate.splrep(x[fration*len(x):],y[fraction*len(x):],s=smooth)
#newx = np.arange(int(x[fraction*len(x)]),self.wvlBinEdges[-1])
#newy = interpolate.splev(newx,func)
wl = np.concatenate((x,newx[newx>max(x)]))
flux = np.concatenate((y,newy[newx>max(x)]))
#new method, rebin data to grid of wavelengths generated from a grid of evenly spaced energy bins
#R=7.0 at 4500
#R=E/dE -> dE = R/E
dE = 0.3936 #eV
start = 1000 #Angs
stop = 20000 #Angs
enBins = ObsFile.makeWvlBins(dE,start,stop)
rebinned = rebin(wl,flux,enBins)
re_wl = rebinned[:,0]
re_flux = rebinned[:,1]
#plt.plot(re_wl,re_flux,color='r')
re_wl = re_wl[np.isnan(re_flux)==False]
re_flux = re_flux[np.isnan(re_flux)==False]
start1 = self.wvlBinEdges[0]
stop1 = self.wvlBinEdges[-1]
#regrid downsampled data
new_wl = np.arange(start1,stop1)
#print re_wl
#print re_flux
#print new_wl
#weight=1.0/(re_flux)**(2/1.00)
print len(re_flux)
weight = np.ones(len(re_flux))
#decrease weights near peak
ind = np.where(re_flux == max(re_flux))[0]
weight[ind] = 0.3
for p in [1,2,3]:
if p==1:
wt = 0.3
elif p==2:
wt = 0.6
elif p==3:
wt = 0.7
try:
weight[ind+p] = wt
except IndexError:
pass
try:
if ind-p >= 0:
weight[ind-p] = wt
except IndexError:
pass
weight[-4:] = 1.0
#weight = [0.7,1,0.3,0.3,0.5,0.7,1,1,1]
#print len(weight)
#weight = re_flux/min(re_flux)
#weight = 1.0/weight
#weight = weight/max(weight)
#print weight
f = interpolate.splrep(re_wl,re_flux,w=weight,k=3,s=max(re_flux)**1.71)
new_flux = interpolate.splev(new_wl,f,der=0)
return new_wl, new_flux
if __name__ == '__main__':
try:
paramFile = sys.argv[1]
except:
paramFile = '/home/srmeeker/ARCONS-pipeline/params/fluxCal.dict'
fc = FluxCal(paramFile, plots=True, verbose=True)
| gpl-2.0 |
emc-openstack/storops | storops_test/lib/test_tasks.py | 1 | 3524 | # coding=utf-8
# Copyright (c) 2016 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import unicode_literals
import shutil
from unittest import TestCase
import tempfile
from hamcrest import assert_that, equal_to, raises
import persistqueue
from storops.lib import tasks
from storops_test.vnx.cli_mock import patch_cli, t_vnx
import time
class TestPQueue(TestCase):
def setUp(self):
self.path = tempfile.mkdtemp(suffix='storops')
self.q = tasks.PQueue(self.path)
def tearDown(self):
self.q.stop()
self.q = None
time.sleep(0.1)
shutil.rmtree(self.path, ignore_errors=True)
def test_queue_path(self):
assert_that(self.q.path, equal_to(self.path))
def test_put(self):
fake_vnx = t_vnx()
self.q.put(fake_vnx.delete_lun, name='l1')
def test_get(self):
fake_vnx = t_vnx()
self.q.put(fake_vnx.delete_lun, name='l1')
pickled_item = self.q.get()
assert_that(pickled_item['object']._ip, equal_to(fake_vnx._ip))
assert_that(pickled_item['method'], equal_to('delete_lun'))
assert_that(pickled_item['params']['name'], equal_to('l1'))
self.q.task_done()
self.q = None
self.q = tasks.PQueue(self.path)
assert_that(self.q.get, raises(persistqueue.Empty))
def test_run_empty_queue(self):
self.q.set_interval(0.01)
self.q.start()
# Make sure restart is fine
self.q.start()
@patch_cli
def test_run_tasks(self):
self.q.set_interval(0.01)
fake_vnx = t_vnx()
self.q.put(fake_vnx.delete_lun, name='l1')
self.q.start()
def test_re_enqueue(self):
fake_vnx = t_vnx()
item = {'object': fake_vnx, 'method': 'delete_lun',
'params': {'name': 'l1'}}
self.q.re_enqueue(item)
assert_that(item['retries'], equal_to(1))
def test_re_enqueue_max_retries(self):
fake_vnx = t_vnx()
item = {'object': fake_vnx, 'method': 'delete_lun', 'params': 'l1'}
for i in range(100):
self.q.re_enqueue(item)
self.q.get()
self.q.re_enqueue(item)
assert_that(item['retries'], equal_to(100))
@patch_cli
def test_enqueue_expected_error(self):
self.q.set_interval(0.1)
fake_vnx = t_vnx()
uid = '00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:01'
self.q.put(fake_vnx.delete_hba, hba_uid=uid)
self.q.start()
time.sleep(0.2)
assert_that(self.q.get, raises(persistqueue.Empty))
@patch_cli
def test_enqueue_storops_error(self):
self.q.set_interval(0.1)
fake_vnx = t_vnx()
self.q.put(fake_vnx.create_block_user,
name='b', password='b', role='operator')
self.q.start()
time.sleep(0.2)
reenqueued_item = self.q.get()
assert_that('b', equal_to(reenqueued_item['params']['name']))
| apache-2.0 |
diagramsoftware/odoo | addons/l10n_ma/__openerp__.py | 260 | 2074 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (c) 2010 kazacube (http://kazacube.com).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name' : 'Maroc - Accounting',
'version' : '1.0',
'author' : 'kazacube',
'category' : 'Localization/Account Charts',
'description': """
This is the base module to manage the accounting chart for Maroc.
=================================================================
Ce Module charge le modèle du plan de comptes standard Marocain et permet de
générer les états comptables aux normes marocaines (Bilan, CPC (comptes de
produits et charges), balance générale à 6 colonnes, Grand livre cumulatif...).
L'intégration comptable a été validé avec l'aide du Cabinet d'expertise comptable
Seddik au cours du troisième trimestre 2010.""",
'website': 'http://www.kazacube.com',
'depends' : ['base', 'account'],
'data' : [
'security/ir.model.access.csv',
'account_type.xml',
'account_pcg_morocco.xml',
'l10n_ma_wizard.xml',
'l10n_ma_tax.xml',
'l10n_ma_journal.xml',
],
'demo' : [],
'auto_install': False,
'installable': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
jetskijoe/headphones | lib/unidecode/x0a2.py | 253 | 4503 | data = (
'kax', # 0x00
'ka', # 0x01
'kap', # 0x02
'kuox', # 0x03
'kuo', # 0x04
'kuop', # 0x05
'kot', # 0x06
'kox', # 0x07
'ko', # 0x08
'kop', # 0x09
'ket', # 0x0a
'kex', # 0x0b
'ke', # 0x0c
'kep', # 0x0d
'kut', # 0x0e
'kux', # 0x0f
'ku', # 0x10
'kup', # 0x11
'kurx', # 0x12
'kur', # 0x13
'ggit', # 0x14
'ggix', # 0x15
'ggi', # 0x16
'ggiex', # 0x17
'ggie', # 0x18
'ggiep', # 0x19
'ggat', # 0x1a
'ggax', # 0x1b
'gga', # 0x1c
'ggap', # 0x1d
'gguot', # 0x1e
'gguox', # 0x1f
'gguo', # 0x20
'gguop', # 0x21
'ggot', # 0x22
'ggox', # 0x23
'ggo', # 0x24
'ggop', # 0x25
'gget', # 0x26
'ggex', # 0x27
'gge', # 0x28
'ggep', # 0x29
'ggut', # 0x2a
'ggux', # 0x2b
'ggu', # 0x2c
'ggup', # 0x2d
'ggurx', # 0x2e
'ggur', # 0x2f
'mgiex', # 0x30
'mgie', # 0x31
'mgat', # 0x32
'mgax', # 0x33
'mga', # 0x34
'mgap', # 0x35
'mguox', # 0x36
'mguo', # 0x37
'mguop', # 0x38
'mgot', # 0x39
'mgox', # 0x3a
'mgo', # 0x3b
'mgop', # 0x3c
'mgex', # 0x3d
'mge', # 0x3e
'mgep', # 0x3f
'mgut', # 0x40
'mgux', # 0x41
'mgu', # 0x42
'mgup', # 0x43
'mgurx', # 0x44
'mgur', # 0x45
'hxit', # 0x46
'hxix', # 0x47
'hxi', # 0x48
'hxip', # 0x49
'hxiet', # 0x4a
'hxiex', # 0x4b
'hxie', # 0x4c
'hxiep', # 0x4d
'hxat', # 0x4e
'hxax', # 0x4f
'hxa', # 0x50
'hxap', # 0x51
'hxuot', # 0x52
'hxuox', # 0x53
'hxuo', # 0x54
'hxuop', # 0x55
'hxot', # 0x56
'hxox', # 0x57
'hxo', # 0x58
'hxop', # 0x59
'hxex', # 0x5a
'hxe', # 0x5b
'hxep', # 0x5c
'ngiex', # 0x5d
'ngie', # 0x5e
'ngiep', # 0x5f
'ngat', # 0x60
'ngax', # 0x61
'nga', # 0x62
'ngap', # 0x63
'nguot', # 0x64
'nguox', # 0x65
'nguo', # 0x66
'ngot', # 0x67
'ngox', # 0x68
'ngo', # 0x69
'ngop', # 0x6a
'ngex', # 0x6b
'nge', # 0x6c
'ngep', # 0x6d
'hit', # 0x6e
'hiex', # 0x6f
'hie', # 0x70
'hat', # 0x71
'hax', # 0x72
'ha', # 0x73
'hap', # 0x74
'huot', # 0x75
'huox', # 0x76
'huo', # 0x77
'huop', # 0x78
'hot', # 0x79
'hox', # 0x7a
'ho', # 0x7b
'hop', # 0x7c
'hex', # 0x7d
'he', # 0x7e
'hep', # 0x7f
'wat', # 0x80
'wax', # 0x81
'wa', # 0x82
'wap', # 0x83
'wuox', # 0x84
'wuo', # 0x85
'wuop', # 0x86
'wox', # 0x87
'wo', # 0x88
'wop', # 0x89
'wex', # 0x8a
'we', # 0x8b
'wep', # 0x8c
'zit', # 0x8d
'zix', # 0x8e
'zi', # 0x8f
'zip', # 0x90
'ziex', # 0x91
'zie', # 0x92
'ziep', # 0x93
'zat', # 0x94
'zax', # 0x95
'za', # 0x96
'zap', # 0x97
'zuox', # 0x98
'zuo', # 0x99
'zuop', # 0x9a
'zot', # 0x9b
'zox', # 0x9c
'zo', # 0x9d
'zop', # 0x9e
'zex', # 0x9f
'ze', # 0xa0
'zep', # 0xa1
'zut', # 0xa2
'zux', # 0xa3
'zu', # 0xa4
'zup', # 0xa5
'zurx', # 0xa6
'zur', # 0xa7
'zyt', # 0xa8
'zyx', # 0xa9
'zy', # 0xaa
'zyp', # 0xab
'zyrx', # 0xac
'zyr', # 0xad
'cit', # 0xae
'cix', # 0xaf
'ci', # 0xb0
'cip', # 0xb1
'ciet', # 0xb2
'ciex', # 0xb3
'cie', # 0xb4
'ciep', # 0xb5
'cat', # 0xb6
'cax', # 0xb7
'ca', # 0xb8
'cap', # 0xb9
'cuox', # 0xba
'cuo', # 0xbb
'cuop', # 0xbc
'cot', # 0xbd
'cox', # 0xbe
'co', # 0xbf
'cop', # 0xc0
'cex', # 0xc1
'ce', # 0xc2
'cep', # 0xc3
'cut', # 0xc4
'cux', # 0xc5
'cu', # 0xc6
'cup', # 0xc7
'curx', # 0xc8
'cur', # 0xc9
'cyt', # 0xca
'cyx', # 0xcb
'cy', # 0xcc
'cyp', # 0xcd
'cyrx', # 0xce
'cyr', # 0xcf
'zzit', # 0xd0
'zzix', # 0xd1
'zzi', # 0xd2
'zzip', # 0xd3
'zziet', # 0xd4
'zziex', # 0xd5
'zzie', # 0xd6
'zziep', # 0xd7
'zzat', # 0xd8
'zzax', # 0xd9
'zza', # 0xda
'zzap', # 0xdb
'zzox', # 0xdc
'zzo', # 0xdd
'zzop', # 0xde
'zzex', # 0xdf
'zze', # 0xe0
'zzep', # 0xe1
'zzux', # 0xe2
'zzu', # 0xe3
'zzup', # 0xe4
'zzurx', # 0xe5
'zzur', # 0xe6
'zzyt', # 0xe7
'zzyx', # 0xe8
'zzy', # 0xe9
'zzyp', # 0xea
'zzyrx', # 0xeb
'zzyr', # 0xec
'nzit', # 0xed
'nzix', # 0xee
'nzi', # 0xef
'nzip', # 0xf0
'nziex', # 0xf1
'nzie', # 0xf2
'nziep', # 0xf3
'nzat', # 0xf4
'nzax', # 0xf5
'nza', # 0xf6
'nzap', # 0xf7
'nzuox', # 0xf8
'nzuo', # 0xf9
'nzox', # 0xfa
'nzop', # 0xfb
'nzex', # 0xfc
'nze', # 0xfd
'nzux', # 0xfe
'nzu', # 0xff
)
| gpl-3.0 |
oliciv/youtube-dl | youtube_dl/extractor/heise.py | 176 | 2864 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
determine_ext,
int_or_none,
parse_iso8601,
)
class HeiseIE(InfoExtractor):
_VALID_URL = r'''(?x)
https?://(?:www\.)?heise\.de/video/artikel/
.+?(?P<id>[0-9]+)\.html(?:$|[?#])
'''
_TEST = {
'url': (
'http://www.heise.de/video/artikel/Podcast-c-t-uplink-3-3-Owncloud-Tastaturen-Peilsender-Smartphone-2404147.html'
),
'md5': 'ffed432483e922e88545ad9f2f15d30e',
'info_dict': {
'id': '2404147',
'ext': 'mp4',
'title': (
"Podcast: c't uplink 3.3 – Owncloud / Tastaturen / Peilsender Smartphone"
),
'format_id': 'mp4_720p',
'timestamp': 1411812600,
'upload_date': '20140927',
'description': 'In uplink-Episode 3.3 geht es darum, wie man sich von Cloud-Anbietern emanzipieren kann, worauf man beim Kauf einer Tastatur achten sollte und was Smartphones über uns verraten.',
'thumbnail': 're:^https?://.*\.jpe?g$',
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
container_id = self._search_regex(
r'<div class="videoplayerjw".*?data-container="([0-9]+)"',
webpage, 'container ID')
sequenz_id = self._search_regex(
r'<div class="videoplayerjw".*?data-sequenz="([0-9]+)"',
webpage, 'sequenz ID')
data_url = 'http://www.heise.de/videout/feed?container=%s&sequenz=%s' % (container_id, sequenz_id)
doc = self._download_xml(data_url, video_id)
info = {
'id': video_id,
'thumbnail': self._og_search_thumbnail(webpage),
'timestamp': parse_iso8601(
self._html_search_meta('date', webpage)),
'description': self._og_search_description(webpage),
}
title = self._html_search_meta('fulltitle', webpage)
if title:
info['title'] = title
else:
info['title'] = self._og_search_title(webpage)
formats = []
for source_node in doc.findall('.//{http://rss.jwpcdn.com/}source'):
label = source_node.attrib['label']
height = int_or_none(self._search_regex(
r'^(.*?_)?([0-9]+)p$', label, 'height', default=None))
video_url = source_node.attrib['file']
ext = determine_ext(video_url, '')
formats.append({
'url': video_url,
'format_note': label,
'format_id': '%s_%s' % (ext, label),
'height': height,
})
self._sort_formats(formats)
info['formats'] = formats
return info
| unlicense |
induktio/LamaTrainer | falib/PdaSimulator.py | 1 | 2691 | import sys,os,json,pprint
class PDA():
"""
Represents a Push-Down Automaton
"""
EPSILON = '_e'
MAX_STEPS = 10000
def __init__(self):
pass
def testAutomata(self, pda, testcases):
"""
Tests whether the given PDA accepts or rejects the given testcases appropriately
Args:
* pda: PDA object to be tested
* testcases: Strings used for testing the PDA
Returns:
The result from simulating the PDA with the given inputs::
{'result': True} -- The PDA ran correctly with every input
{'result': False, 'minimal': 'aabb'} -- The PDA did not run correctly on input aabb
"""
if type(testcases) != dict or len(testcases) == 0:
return { 'result': False, 'minimal': 'No testcases defined' }
cases = sorted(sorted(testcases.keys()), key=len) # Order by length, alphabetic
for string in cases:
wanted = testcases[string]
simulate = self.simulate(pda, string)
if wanted != simulate:
return { 'result': False, 'minimal': string }
return { 'result': True }
def simulate(self, pda, string):
"""
Simulates the execution of PDA on a given input
Args:
* pda: PDA object to be executed
* string: Input to be simulated
Returns:
| True, if the PDA accepts the string
| False, if not
"""
visited = set()
queue = [(pda['start'], "", 0)] # Current state, stack, input string index
n = 0
while len(queue) != 0:
current = queue.pop(0)
visited.add(current)
curState,curStack,curIndex = current
if curState in pda['accepting'] and curIndex == len(string):
return True
if curIndex < len(string):
nextInput = string[curIndex]
else:
nextInput = None
n += 1
if n > PDA.MAX_STEPS:
break
for transition in pda['transitions'][curState]:
inputSymbol,popStack,pushStack = transition.split(',')
nextStates = pda['transitions'][curState][transition]
nextStack = curStack
nextIndex = curIndex
valid = True
if inputSymbol == PDA.EPSILON or inputSymbol == nextInput:
if popStack != PDA.EPSILON:
if curStack[-1] == popStack:
nextStack = curStack[0 : len(curStack)-1]
else:
valid = False
if pushStack != PDA.EPSILON:
nextStack += pushStack
if inputSymbol != PDA.EPSILON:
nextIndex = curIndex+1
if valid:
for state in nextStates:
nextItem = (state, nextStack, nextIndex)
if nextItem not in visited:
queue.append(nextItem)
return False
if __name__ == '__main__' and sys.argv[1] and sys.argv[2]:
pp = pprint.PrettyPrinter(indent=4)
comp = PDA()
automata = json.loads(open(sys.argv[1], 'r').read())
#pp.pprint( automata )
print comp.simulate(automata, sys.argv[2])
| gpl-3.0 |
rossumai/keras-multi-gpu | keras_tf_multigpu/examples/kuza55/cifar10_cnn_functional_multigpu.py | 1 | 4556 | '''Train a simple deep CNN on the CIFAR10 small images dataset.
GPU run command with Theano backend (with TensorFlow, the GPU is automatically used):
THEANO_FLAGS=mode=FAST_RUN,device=gpu,floatx=float32 python cifar10_cnn.py
It gets down to 0.65 test logloss in 25 epochs, and down to 0.55 after 50 epochs.
(it's still underfitting at that point, though).
'''
from __future__ import print_function
import keras
from keras import backend as K
from keras.datasets import cifar10
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Model
from keras.layers import Input, Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.layers.merge import concatenate
from keras.layers.core import Lambda
import os
import tensorflow as tf
from keras_tf_multigpu.kuza55 import make_parallel
# sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))
sess = tf.Session()
K.set_session(sess)
ps_device = '/gpu:0'
gpu_count = len([dev for dev in os.environ.get('CUDA_VISIBLE_DEVICES', '').split(',') if len(dev.strip()) > 0])
batch_size = 128
num_classes = 10
epochs = 6
data_augmentation = True
# The data, shuffled and split between train and test sets:
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
def basic_model():
input = Input(shape=x_train.shape[1:])
x = Conv2D(32, (3, 3), padding='same')(input)
x = Activation('relu')(x)
x = Conv2D(32, (3, 3))(x)
x = Activation('relu')(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
x = Dropout(0.25)(x)
x = Conv2D(64, (3, 3), padding='same')(x)
x = Activation('relu')(x)
x = Conv2D(64, (3, 3))(x)
x = Activation('relu')(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
x = Dropout(0.25)(x)
x = Flatten()(x)
x = Dense(512)(x)
x = Activation('relu')(x)
x = Dropout(0.5)(x)
x = Dense(num_classes)(x)
output = Activation('softmax')(x)
model = Model(inputs=input, outputs=output)
print('Single tower model:')
model.summary()
return model
with tf.device(ps_device):
serial_model = basic_model()
print('Serial model:')
serial_model.summary()
model = make_parallel(tower, gpu_count, ps_device)
print('Multi-GPU model:')
model.summary()
# initiate RMSprop optimizer
opt = keras.optimizers.rmsprop(lr=0.0001, decay=1e-6)
# Let's train the model using RMSprop
model.compile(loss='categorical_crossentropy',
optimizer=opt,
metrics=['accuracy'])
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
if not data_augmentation:
print('Not using data augmentation.')
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_test, y_test),
shuffle=True)
else:
print('Using real-time data augmentation.')
# This will do preprocessing and realtime data augmentation:
datagen = ImageDataGenerator(
featurewise_center=False, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=False, # divide inputs by std of the dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
rotation_range=0, # randomly rotate images in the range (degrees, 0 to 180)
width_shift_range=0.1, # randomly shift images horizontally (fraction of total width)
height_shift_range=0.1, # randomly shift images vertically (fraction of total height)
horizontal_flip=True, # randomly flip images
vertical_flip=False) # randomly flip images
# Compute quantities required for feature-wise normalization
# (std, mean, and principal components if ZCA whitening is applied).
datagen.fit(x_train)
# Fit the model on the batches generated by datagen.flow().
model.fit_generator(datagen.flow(x_train, y_train,
batch_size=batch_size),
steps_per_epoch=x_train.shape[0] // batch_size,
epochs=epochs,
validation_data=(x_test, y_test))
| mit |
nomadcube/scikit-learn | examples/neighbors/plot_nearest_centroid.py | 264 | 1804 | """
===============================
Nearest Centroid Classification
===============================
Sample usage of Nearest Centroid classification.
It will plot the decision boundaries for each class.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import datasets
from sklearn.neighbors import NearestCentroid
n_neighbors = 15
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
for shrinkage in [None, 0.1]:
# we create an instance of Neighbours Classifier and fit the data.
clf = NearestCentroid(shrink_threshold=shrinkage)
clf.fit(X, y)
y_pred = clf.predict(X)
print(shrinkage, np.mean(y == y_pred))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
plt.title("3-Class classification (shrink_threshold=%r)"
% shrinkage)
plt.axis('tight')
plt.show()
| bsd-3-clause |
max00xam/service.maxxam.teamwatch | lib/engineio/async_aiohttp.py | 3 | 3824 | import asyncio
import sys
from urllib.parse import urlsplit
from aiohttp.web import Response, WebSocketResponse
import six
def create_route(app, engineio_server, engineio_endpoint):
"""This function sets up the engine.io endpoint as a route for the
application.
Note that both GET and POST requests must be hooked up on the engine.io
endpoint.
"""
app.router.add_get(engineio_endpoint, engineio_server.handle_request)
app.router.add_post(engineio_endpoint, engineio_server.handle_request)
app.router.add_route('OPTIONS', engineio_endpoint,
engineio_server.handle_request)
def translate_request(request):
"""This function takes the arguments passed to the request handler and
uses them to generate a WSGI compatible environ dictionary.
"""
message = request._message
payload = request._payload
uri_parts = urlsplit(message.path)
environ = {
'wsgi.input': payload,
'wsgi.errors': sys.stderr,
'wsgi.version': (1, 0),
'wsgi.async': True,
'wsgi.multithread': False,
'wsgi.multiprocess': False,
'wsgi.run_once': False,
'SERVER_SOFTWARE': 'aiohttp',
'REQUEST_METHOD': message.method,
'QUERY_STRING': uri_parts.query or '',
'RAW_URI': message.path,
'SERVER_PROTOCOL': 'HTTP/%s.%s' % message.version,
'REMOTE_ADDR': '127.0.0.1',
'REMOTE_PORT': '0',
'SERVER_NAME': 'aiohttp',
'SERVER_PORT': '0',
'aiohttp.request': request
}
for hdr_name, hdr_value in message.headers.items():
hdr_name = hdr_name.upper()
if hdr_name == 'CONTENT-TYPE':
environ['CONTENT_TYPE'] = hdr_value
continue
elif hdr_name == 'CONTENT-LENGTH':
environ['CONTENT_LENGTH'] = hdr_value
continue
key = 'HTTP_%s' % hdr_name.replace('-', '_')
if key in environ:
hdr_value = '%s,%s' % (environ[key], hdr_value)
environ[key] = hdr_value
environ['wsgi.url_scheme'] = environ.get('HTTP_X_FORWARDED_PROTO', 'http')
path_info = uri_parts.path
environ['PATH_INFO'] = path_info
environ['SCRIPT_NAME'] = ''
return environ
def make_response(status, headers, payload, environ):
"""This function generates an appropriate response object for this async
mode.
"""
return Response(body=payload, status=int(status.split()[0]),
headers=headers)
class WebSocket(object): # pragma: no cover
"""
This wrapper class provides a aiohttp WebSocket interface that is
somewhat compatible with eventlet's implementation.
"""
def __init__(self, handler):
self.handler = handler
self._sock = None
async def __call__(self, environ):
request = environ['aiohttp.request']
self._sock = WebSocketResponse()
await self._sock.prepare(request)
self.environ = environ
await self.handler(self)
return self._sock
async def close(self):
await self._sock.close()
async def send(self, message):
if isinstance(message, bytes):
f = self._sock.send_bytes
else:
f = self._sock.send_str
if asyncio.iscoroutinefunction(f):
await f(message)
else:
f(message)
async def wait(self):
msg = await self._sock.receive()
if not isinstance(msg.data, six.binary_type) and \
not isinstance(msg.data, six.text_type):
raise IOError()
return msg.data
_async = {
'asyncio': True,
'create_route': create_route,
'translate_request': translate_request,
'make_response': make_response,
'websocket': sys.modules[__name__],
'websocket_class': 'WebSocket'
}
| gpl-3.0 |
Vova23/limbo-android | jni/glib/glib/update-pcre/make_utt.py | 47 | 1330 | #! /usr/bin/env python
# Reduce the number of relocations using a single string for the
# keys in the _pcre_utt table.
import re
fin = open('pcre_tables.c')
data = fin.read()
fin.close()
mo = re.search(r'const ucp_type_table _pcre_utt\[] = {', data)
assert mo, '_pcre_utt not found'
before = data[:mo.start()]
table_decl = data[mo.start():mo.end()]
table_start = mo.end()
mo = re.compile('};').search(data, table_start)
assert mo, 'end of _pcre_utt not found'
after = data[mo.end():]
table_end = mo.start()
table = data[table_start:table_end].strip()
rs = '\s*\{\s*"(?P<name>[^"]*)",\s*(?P<type>PT_[A-Z]*),\s*(?P<value>(?:0|ucp_[A-Za-z_]*))\s*},?\s*$'
r = re.compile(rs)
lines = []
names = []
pos = 0
for line in table.split('\n'):
mo = r.match(line)
assert mo, 'line not recognized'
name, type, value = mo.groups()
lines.append(' { %d, %s, %s }' % (pos, type, value))
names.append(name)
# +1 for the '\0'
pos += len(name) + 1
table = ',\n'.join(lines)
names = [' "%s\\0"' % n for n in names]
names_string = ' \n'.join(names) + ';'
data = before + \
'const char _pcre_ucp_names[] =\n' + \
names_string + \
'\n\n' + \
table_decl + \
'\n' + \
table + \
'\n};' + \
after
fout = open('pcre_tables.c', 'w')
fout.write(data)
fout.close()
| gpl-2.0 |
drpngx/tensorflow | tensorflow/contrib/opt/python/training/lazy_adam_optimizer_test.py | 104 | 6037 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for LazyAdamOptimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.opt.python.training import lazy_adam_optimizer
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
def adam_update_numpy(param,
g_t,
t,
m,
v,
alpha=0.001,
beta1=0.9,
beta2=0.999,
epsilon=1e-8):
alpha_t = alpha * np.sqrt(1 - beta2**t) / (1 - beta1**t)
m_t = beta1 * m + (1 - beta1) * g_t
v_t = beta2 * v + (1 - beta2) * g_t * g_t
param_t = param - alpha_t * m_t / (np.sqrt(v_t) + epsilon)
return param_t, m_t, v_t
class AdamOptimizerTest(test.TestCase):
def testSparse(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.test_session():
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
grads0_np_indices = np.array([0, 1], dtype=np.int32)
grads0 = ops.IndexedSlices(
constant_op.constant(grads0_np),
constant_op.constant(grads0_np_indices), constant_op.constant([2]))
grads1_np_indices = np.array([0, 1], dtype=np.int32)
grads1 = ops.IndexedSlices(
constant_op.constant(grads1_np),
constant_op.constant(grads1_np_indices), constant_op.constant([2]))
opt = lazy_adam_optimizer.LazyAdamOptimizer()
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
beta1_power, beta2_power = opt._get_beta_accumulators()
# Run 3 steps of Adam
for t in range(1, 4):
self.assertAllCloseAccordingToType(0.9**t, beta1_power.eval())
self.assertAllCloseAccordingToType(0.999**t, beta2_power.eval())
update.run()
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, var0.eval())
self.assertAllCloseAccordingToType(var1_np, var1.eval())
def testSparseDevicePlacement(self):
for index_dtype in [dtypes.int32, dtypes.int64]:
with self.test_session(force_gpu=test.is_gpu_available()):
# If a GPU is available, tests that all optimizer ops can be placed on
# it (i.e. they have GPU kernels).
var = variables.Variable([[1.0], [2.0]])
indices = constant_op.constant([0, 1], dtype=index_dtype)
gathered_sum = math_ops.reduce_sum(array_ops.gather(var, indices))
optimizer = lazy_adam_optimizer.LazyAdamOptimizer(3.0)
minimize_op = optimizer.minimize(gathered_sum)
variables.global_variables_initializer().run()
minimize_op.run()
def testSparseRepeatedIndices(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.test_session():
repeated_index_update_var = variables.Variable(
[[1.0], [2.0]], dtype=dtype)
aggregated_update_var = variables.Variable(
[[1.0], [2.0]], dtype=dtype)
grad_repeated_index = ops.IndexedSlices(
constant_op.constant(
[0.1, 0.1], shape=[2, 1], dtype=dtype),
constant_op.constant([1, 1]),
constant_op.constant([2, 1]))
grad_aggregated = ops.IndexedSlices(
constant_op.constant(
[0.2], shape=[1, 1], dtype=dtype),
constant_op.constant([1]),
constant_op.constant([2, 1]))
repeated_update_opt = lazy_adam_optimizer.LazyAdamOptimizer()
repeated_update = repeated_update_opt.apply_gradients(
[(grad_repeated_index, repeated_index_update_var)])
aggregated_update_opt = lazy_adam_optimizer.LazyAdamOptimizer()
aggregated_update = aggregated_update_opt.apply_gradients(
[(grad_aggregated, aggregated_update_var)])
variables.global_variables_initializer().run()
self.assertAllClose(aggregated_update_var.eval(),
repeated_index_update_var.eval())
for _ in range(3):
repeated_update.run()
aggregated_update.run()
self.assertAllClose(aggregated_update_var.eval(),
repeated_index_update_var.eval())
if __name__ == "__main__":
test.main()
| apache-2.0 |
karlito40/servo | tests/wpt/css-tests/tools/html5lib/html5lib/tests/tokenizertotree.py | 483 | 1965 | from __future__ import absolute_import, division, unicode_literals
import sys
import os
import json
import re
import html5lib
from . import support
from . import test_tokenizer
p = html5lib.HTMLParser()
unnamespaceExpected = re.compile(r"^(\|\s*)<html ([^>]+)>", re.M).sub
def main(out_path):
if not os.path.exists(out_path):
sys.stderr.write("Path %s does not exist" % out_path)
sys.exit(1)
for filename in support.get_data_files('tokenizer', '*.test'):
run_file(filename, out_path)
def run_file(filename, out_path):
try:
tests_data = json.load(open(filename, "r"))
except ValueError:
sys.stderr.write("Failed to load %s\n" % filename)
return
name = os.path.splitext(os.path.split(filename)[1])[0]
output_file = open(os.path.join(out_path, "tokenizer_%s.dat" % name), "w")
if 'tests' in tests_data:
for test_data in tests_data['tests']:
if 'initialStates' not in test_data:
test_data["initialStates"] = ["Data state"]
for initial_state in test_data["initialStates"]:
if initial_state != "Data state":
# don't support this yet
continue
test = make_test(test_data)
output_file.write(test)
output_file.close()
def make_test(test_data):
if 'doubleEscaped' in test_data:
test_data = test_tokenizer.unescape_test(test_data)
rv = []
rv.append("#data")
rv.append(test_data["input"].encode("utf8"))
rv.append("#errors")
tree = p.parse(test_data["input"])
output = p.tree.testSerializer(tree)
output = "\n".join(("| " + line[3:]) if line.startswith("| ") else line
for line in output.split("\n"))
output = unnamespaceExpected(r"\1<\2>", output)
rv.append(output.encode("utf8"))
rv.append("")
return "\n".join(rv)
if __name__ == "__main__":
main(sys.argv[1])
| mpl-2.0 |
j-carpentier/nova | nova/vnc/__init__.py | 44 | 2390 | #!/usr/bin/env python
# Copyright (c) 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for VNC Proxying."""
from oslo_config import cfg
vnc_opts = [
cfg.StrOpt('novncproxy_base_url',
default='http://127.0.0.1:6080/vnc_auto.html',
help='Location of VNC console proxy, in the form '
'"http://127.0.0.1:6080/vnc_auto.html"',
deprecated_group='DEFAULT',
deprecated_name='novncproxy_base_url'),
cfg.StrOpt('xvpvncproxy_base_url',
default='http://127.0.0.1:6081/console',
help='Location of nova xvp VNC console proxy, in the form '
'"http://127.0.0.1:6081/console"',
deprecated_group='DEFAULT',
deprecated_name='xvpvncproxy_base_url'),
cfg.StrOpt('vncserver_listen',
default='127.0.0.1',
help='IP address on which instance vncservers should listen',
deprecated_group='DEFAULT',
deprecated_name='vncserver_listen'),
cfg.StrOpt('vncserver_proxyclient_address',
default='127.0.0.1',
help='The address to which proxy clients '
'(like nova-xvpvncproxy) should connect',
deprecated_group='DEFAULT',
deprecated_name='vncserver_proxyclient_address'),
cfg.BoolOpt('enabled',
default=True,
help='Enable VNC related features',
deprecated_group='DEFAULT',
deprecated_name='vnc_enabled'),
cfg.StrOpt('keymap',
default='en-us',
help='Keymap for VNC',
deprecated_group='DEFAULT',
deprecated_name='vnc_keymap'),
]
CONF = cfg.CONF
CONF.register_opts(vnc_opts, group='vnc')
| apache-2.0 |
lumig242/Video-Share-System | video/views.py | 1 | 4497 | from django.shortcuts import render
from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import render_to_response
from django.template import RequestContext
from video.form import *
from video.models import Video,Comment
from django.contrib.auth.decorators import login_required
import json
@login_required
def upload(request):
uploadFlag = True
if request.method == 'POST':
form = UploadFileForm(request.POST, request.FILES)
if form.is_valid():
video = Video()
video.owner = request.user
video.title = form.cleaned_data['title']
video.file = request.FILES['file']
video.description = form.cleaned_data["description"]
video.save()
return HttpResponseRedirect('success/')
else:
form = UploadFileForm()
return render_to_response('upload.html', locals(),context_instance=RequestContext(request))
def uploadSuccess(request):
return render_to_response('upload_Success.html',context_instance=RequestContext(request))
def homepage_video_list(request):
highscore = Video.objects.all()
highscore = sorted(highscore, key=lambda x: 1. * x.rating_sum / (1 + x.rating_person))[0:5]
latest = Video.objects.all()[0:5]
return render_to_response('homepage.html', locals(), context_instance=RequestContext(request))
def video_play(request,video_id):
video_object = Video.objects.get(id=video_id)
video_path = video_object.file.url
own = True if request.user == video_object.owner else False
if video_object.rating_person:
points = round(1.0*video_object.rating_sum/video_object.rating_person,1)
else:
points = "Not rated"
# Comment display
commentList = Comment.objects.filter(video=video_object).order_by('-time')
return render_to_response('videoplay.html', locals(),context_instance=RequestContext(request))
def rate_video(request,video_id):
print request.method, video_id
if request.method == 'POST':
print 'hello2'
form = RatingForm(request.POST)
if form.is_valid():
print 'hello3'
video_object = Video.objects.get(id=video_id)
video_object.rating_person += 1
video_object.rating_sum += form.cleaned_data['rate']
video_object.save()
HasRated = True
points = round(1.0*video_object.rating_sum/video_object.rating_person,1)
return HttpResponse(points)
def comment_video(request, video_id):
print request.method, video_id
if request.method == 'POST':
print "hello2"
form = SendCommentForm(request.POST)
if form.is_valid():
print "hello3"
comment = Comment()
comment.author = request.user
comment.video = Video.objects.filter(id=video_id)[0]
comment.content = form.cleaned_data['content']
comment.save()
print str(comment.author.username), str(comment.time), str(comment.content)
s = '<p>'+str(comment.author.username)+ comment.time.strftime(" %b. %d, %Y, %I:%m %p ")+ str(comment.content) + '</p>'
#return HttpResponse(json.dumps({"name":str(comment.author.username), "date":str(comment.time), "content": str(comment.content)}))
return HttpResponse(s)
def video_modify(request,video_id):
modifyFlag = True
video_object = Video.objects.get(id=video_id)
if request.method == 'POST':
uploadFlag = True
form = ModifyVideoForm(request.POST)
if form.is_valid():
video_object.title = form.cleaned_data['title']
video_object.description = form.cleaned_data["description"]
video_object.save()
return HttpResponseRedirect('/videoplay/{}'.format(video_id))
else:
form = ModifyVideoForm()
return render_to_response('upload.html', locals(),context_instance=RequestContext(request))
def video_delete(request,video_id):
video_object = Video.objects.get(id=video_id)
video_object.delete()
return HttpResponseRedirect('/timeline')
def video_share(request,video_id):
video_object = Video.objects.get(id=video_id)
video = Video()
video.owner = request.user
video.title = video_object.title
video.file = video_object.file
video.description = video_object.description
video.save()
return HttpResponseRedirect('/videoplay/{}'.format(video_id))
| mit |
hrishioa/Aviato | flask/Lib/site-packages/osgeo/ogr.py | 1 | 171430 | # This file was automatically generated by SWIG (http://www.swig.org).
# Version 2.0.12
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info
if version_info >= (2,6,0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_ogr', [dirname(__file__)])
except ImportError:
import _ogr
return _ogr
if fp is not None:
try:
_mod = imp.load_module('_ogr', fp, pathname, description)
finally:
fp.close()
return _mod
_ogr = swig_import_helper()
del swig_import_helper
else:
import _ogr
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
if (name == "thisown"): return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
if (not static):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self,class_type,name,value):
return _swig_setattr_nondynamic(self,class_type,name,value,0)
def _swig_getattr(self,class_type,name):
if (name == "thisown"): return self.this.own()
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError(name)
def _swig_repr(self):
try: strthis = "proxy of " + self.this.__repr__()
except: strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
def _swig_setattr_nondynamic_method(set):
def set_attr(self,name,value):
if (name == "thisown"): return self.this.own(value)
if hasattr(self,name) or (name == "this"):
set(self,name,value)
else:
raise AttributeError("You cannot add attributes to %s" % self)
return set_attr
wkb25DBit = _ogr.wkb25DBit
wkb25Bit = _ogr.wkb25Bit
wkbUnknown = _ogr.wkbUnknown
wkbPoint = _ogr.wkbPoint
wkbLineString = _ogr.wkbLineString
wkbPolygon = _ogr.wkbPolygon
wkbMultiPoint = _ogr.wkbMultiPoint
wkbMultiLineString = _ogr.wkbMultiLineString
wkbMultiPolygon = _ogr.wkbMultiPolygon
wkbGeometryCollection = _ogr.wkbGeometryCollection
wkbNone = _ogr.wkbNone
wkbLinearRing = _ogr.wkbLinearRing
wkbPoint25D = _ogr.wkbPoint25D
wkbLineString25D = _ogr.wkbLineString25D
wkbPolygon25D = _ogr.wkbPolygon25D
wkbMultiPoint25D = _ogr.wkbMultiPoint25D
wkbMultiLineString25D = _ogr.wkbMultiLineString25D
wkbMultiPolygon25D = _ogr.wkbMultiPolygon25D
wkbGeometryCollection25D = _ogr.wkbGeometryCollection25D
OFTInteger = _ogr.OFTInteger
OFTIntegerList = _ogr.OFTIntegerList
OFTReal = _ogr.OFTReal
OFTRealList = _ogr.OFTRealList
OFTString = _ogr.OFTString
OFTStringList = _ogr.OFTStringList
OFTWideString = _ogr.OFTWideString
OFTWideStringList = _ogr.OFTWideStringList
OFTBinary = _ogr.OFTBinary
OFTDate = _ogr.OFTDate
OFTTime = _ogr.OFTTime
OFTDateTime = _ogr.OFTDateTime
OJUndefined = _ogr.OJUndefined
OJLeft = _ogr.OJLeft
OJRight = _ogr.OJRight
wkbXDR = _ogr.wkbXDR
wkbNDR = _ogr.wkbNDR
NullFID = _ogr.NullFID
ALTER_NAME_FLAG = _ogr.ALTER_NAME_FLAG
ALTER_TYPE_FLAG = _ogr.ALTER_TYPE_FLAG
ALTER_WIDTH_PRECISION_FLAG = _ogr.ALTER_WIDTH_PRECISION_FLAG
ALTER_ALL_FLAG = _ogr.ALTER_ALL_FLAG
OLCRandomRead = _ogr.OLCRandomRead
OLCSequentialWrite = _ogr.OLCSequentialWrite
OLCRandomWrite = _ogr.OLCRandomWrite
OLCFastSpatialFilter = _ogr.OLCFastSpatialFilter
OLCFastFeatureCount = _ogr.OLCFastFeatureCount
OLCFastGetExtent = _ogr.OLCFastGetExtent
OLCCreateField = _ogr.OLCCreateField
OLCDeleteField = _ogr.OLCDeleteField
OLCReorderFields = _ogr.OLCReorderFields
OLCAlterFieldDefn = _ogr.OLCAlterFieldDefn
OLCTransactions = _ogr.OLCTransactions
OLCDeleteFeature = _ogr.OLCDeleteFeature
OLCFastSetNextByIndex = _ogr.OLCFastSetNextByIndex
OLCStringsAsUTF8 = _ogr.OLCStringsAsUTF8
OLCIgnoreFields = _ogr.OLCIgnoreFields
OLCCreateGeomField = _ogr.OLCCreateGeomField
ODsCCreateLayer = _ogr.ODsCCreateLayer
ODsCDeleteLayer = _ogr.ODsCDeleteLayer
ODsCCreateGeomFieldAfterCreateLayer = _ogr.ODsCCreateGeomFieldAfterCreateLayer
ODrCCreateDataSource = _ogr.ODrCCreateDataSource
ODrCDeleteDataSource = _ogr.ODrCDeleteDataSource
def GetUseExceptions(*args):
"""GetUseExceptions() -> int"""
return _ogr.GetUseExceptions(*args)
def UseExceptions(*args):
"""UseExceptions()"""
return _ogr.UseExceptions(*args)
def DontUseExceptions(*args):
"""DontUseExceptions()"""
return _ogr.DontUseExceptions(*args)
import osr
class StyleTable(object):
"""Proxy of C++ OGRStyleTableShadow class"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
"""__init__(OGRStyleTableShadow self) -> StyleTable"""
this = _ogr.new_StyleTable(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _ogr.delete_StyleTable
__del__ = lambda self : None;
def AddStyle(self, *args):
"""AddStyle(StyleTable self, char const * pszName, char const * pszStyleString) -> int"""
return _ogr.StyleTable_AddStyle(self, *args)
def LoadStyleTable(self, *args):
"""LoadStyleTable(StyleTable self, char const * utf8_path) -> int"""
return _ogr.StyleTable_LoadStyleTable(self, *args)
def SaveStyleTable(self, *args):
"""SaveStyleTable(StyleTable self, char const * utf8_path) -> int"""
return _ogr.StyleTable_SaveStyleTable(self, *args)
def Find(self, *args):
"""Find(StyleTable self, char const * pszName) -> char const *"""
return _ogr.StyleTable_Find(self, *args)
def ResetStyleStringReading(self, *args):
"""ResetStyleStringReading(StyleTable self)"""
return _ogr.StyleTable_ResetStyleStringReading(self, *args)
def GetNextStyle(self, *args):
"""GetNextStyle(StyleTable self) -> char const *"""
return _ogr.StyleTable_GetNextStyle(self, *args)
def GetLastStyleName(self, *args):
"""GetLastStyleName(StyleTable self) -> char const *"""
return _ogr.StyleTable_GetLastStyleName(self, *args)
StyleTable_swigregister = _ogr.StyleTable_swigregister
StyleTable_swigregister(StyleTable)
class Driver(object):
"""Proxy of C++ OGRDriverShadow class"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined")
__repr__ = _swig_repr
name = _swig_property(_ogr.Driver_name_get)
def CreateDataSource(self, *args, **kwargs):
"""
CreateDataSource(Driver self, char const * utf8_path, char ** options=None) -> DataSource
OGRDataSourceH
OGR_Dr_CreateDataSource(OGRSFDriverH hDriver, const char *pszName,
char **papszOptions)
This function attempts to create a new data source based on the passed
driver.
The papszOptions argument can be used to control driver specific
creation options. These options are normally documented in the format
specific documentation.
It is important to call OGR_DS_Destroy() when the datasource is no
longer used to ensure that all data has been properly flushed to disk.
This function is the same as the C++ method
OGRSFDriver::CreateDataSource().
Parameters:
-----------
hDriver: handle to the driver on which data source creation is based.
pszName: the name for the new data source. UTF-8 encoded.
papszOptions: a StringList of name=value options. Options are driver
specific, and driver information can be found at the following
url:http://www.gdal.org/ogr/ogr_formats.html
NULL is returned on failure, or a new OGRDataSource handle on success.
"""
return _ogr.Driver_CreateDataSource(self, *args, **kwargs)
def CopyDataSource(self, *args, **kwargs):
"""
CopyDataSource(Driver self, DataSource copy_ds, char const * utf8_path, char ** options=None) -> DataSource
OGRDataSourceH
OGR_Dr_CopyDataSource(OGRSFDriverH hDriver, OGRDataSourceH hSrcDS,
const char *pszNewName, char **papszOptions)
This function creates a new datasource by copying all the layers from
the source datasource.
It is important to call OGR_DS_Destroy() when the datasource is no
longer used to ensure that all data has been properly flushed to disk.
This function is the same as the C++ method
OGRSFDriver::CopyDataSource().
Parameters:
-----------
hDriver: handle to the driver on which data source creation is based.
hSrcDS: source datasource
pszNewName: the name for the new data source.
papszOptions: a StringList of name=value options. Options are driver
specific, and driver information can be found at the following
url:http://www.gdal.org/ogr/ogr_formats.html
NULL is returned on failure, or a new OGRDataSource handle on success.
"""
return _ogr.Driver_CopyDataSource(self, *args, **kwargs)
def Open(self, *args, **kwargs):
"""
Open(Driver self, char const * utf8_path, int update=0) -> DataSource
OGRDataSourceH OGR_Dr_Open(OGRSFDriverH
hDriver, const char *pszName, int bUpdate)
Attempt to open file with this driver.
This function is the same as the C++ method OGRSFDriver::Open().
Parameters:
-----------
hDriver: handle to the driver that is used to open file.
pszName: the name of the file, or data source to try and open.
bUpdate: TRUE if update access is required, otherwise FALSE (the
default).
NULL on error or if the pass name is not supported by this driver,
otherwise an handle to an OGRDataSource. This OGRDataSource should be
closed by deleting the object when it is no longer needed.
"""
return _ogr.Driver_Open(self, *args, **kwargs)
def DeleteDataSource(self, *args):
"""
DeleteDataSource(Driver self, char const * utf8_path) -> int
OGRErr
OGR_Dr_DeleteDataSource(OGRSFDriverH hDriver, const char
*pszDataSource)
Delete a datasource.
Delete (from the disk, in the database, ...) the named datasource.
Normally it would be safest if the datasource was not open at the
time.
Whether this is a supported operation on this driver case be tested
using TestCapability() on ODrCDeleteDataSource.
This method is the same as the C++ method
OGRSFDriver::DeleteDataSource().
Parameters:
-----------
hDriver: handle to the driver on which data source deletion is based.
pszDataSource: the name of the datasource to delete.
OGRERR_NONE on success, and OGRERR_UNSUPPORTED_OPERATION if this is
not supported by this driver.
"""
return _ogr.Driver_DeleteDataSource(self, *args)
def TestCapability(self, *args):
"""
TestCapability(Driver self, char const * cap) -> bool
int
OGR_Dr_TestCapability(OGRSFDriverH hDriver, const char *pszCap)
Test if capability is available.
One of the following data source capability names can be passed into
this function, and a TRUE or FALSE value will be returned indicating
whether or not the capability is available for this object.
ODrCCreateDataSource: True if this driver can support creating data
sources.
ODrCDeleteDataSource: True if this driver supports deleting data
sources.
The #define macro forms of the capability names should be used in
preference to the strings themselves to avoid mispelling.
This function is the same as the C++ method
OGRSFDriver::TestCapability().
Parameters:
-----------
hDriver: handle to the driver to test the capability against.
pszCap: the capability to test.
TRUE if capability available otherwise FALSE.
"""
return _ogr.Driver_TestCapability(self, *args)
def GetName(self, *args):
"""
GetName(Driver self) -> char const *
const char*
OGR_Dr_GetName(OGRSFDriverH hDriver)
Fetch name of driver (file format). This name should be relatively
short (10-40 characters), and should reflect the underlying file
format. For instance "ESRI Shapefile".
This function is the same as the C++ method OGRSFDriver::GetName().
Parameters:
-----------
hDriver: handle to the the driver to get the name from.
driver name. This is an internal string and should not be modified or
freed.
"""
return _ogr.Driver_GetName(self, *args)
def Register(self, *args):
"""Register(Driver self)"""
return _ogr.Driver_Register(self, *args)
def Deregister(self, *args):
"""Deregister(Driver self)"""
return _ogr.Driver_Deregister(self, *args)
Driver_swigregister = _ogr.Driver_swigregister
Driver_swigregister(Driver)
class DataSource(object):
"""Proxy of C++ OGRDataSourceShadow class"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined")
__repr__ = _swig_repr
name = _swig_property(_ogr.DataSource_name_get)
__swig_destroy__ = _ogr.delete_DataSource
__del__ = lambda self : None;
def GetRefCount(self, *args):
"""
GetRefCount(DataSource self) -> int
int
OGR_DS_GetRefCount(OGRDataSourceH hDataSource)
"""
return _ogr.DataSource_GetRefCount(self, *args)
def GetSummaryRefCount(self, *args):
"""
GetSummaryRefCount(DataSource self) -> int
int
OGR_DS_GetSummaryRefCount(OGRDataSourceH hDataSource)
"""
return _ogr.DataSource_GetSummaryRefCount(self, *args)
def GetLayerCount(self, *args):
"""
GetLayerCount(DataSource self) -> int
int
OGR_DS_GetLayerCount(OGRDataSourceH hDS)
Get the number of layers in this data source.
This function is the same as the C++ method
OGRDataSource::GetLayerCount().
Parameters:
-----------
hDS: handle to the data source from which to get the number of
layers.
layer count.
"""
return _ogr.DataSource_GetLayerCount(self, *args)
def GetDriver(self, *args):
"""
GetDriver(DataSource self) -> Driver
OGRSFDriverH
OGR_DS_GetDriver(OGRDataSourceH hDS)
Returns the driver that the dataset was opened with.
This method is the same as the C++ method OGRDataSource::GetDriver()
Parameters:
-----------
hDS: handle to the datasource
NULL if driver info is not available, or pointer to a driver owned by
the OGRSFDriverManager.
"""
return _ogr.DataSource_GetDriver(self, *args)
def GetName(self, *args):
"""
GetName(DataSource self) -> char const *
const char*
OGR_DS_GetName(OGRDataSourceH hDS)
Returns the name of the data source.
This string should be sufficient to open the data source if passed to
the same OGRSFDriver that this data source was opened with, but it
need not be exactly the same string that was used to open the data
source. Normally this is a filename.
This function is the same as the C++ method OGRDataSource::GetName().
Parameters:
-----------
hDS: handle to the data source to get the name from.
pointer to an internal name string which should not be modified or
freed by the caller.
"""
return _ogr.DataSource_GetName(self, *args)
def DeleteLayer(self, *args):
"""
DeleteLayer(DataSource self, int index) -> OGRErr
OGRErr
OGR_DS_DeleteLayer(OGRDataSourceH hDS, int iLayer)
Delete the indicated layer from the datasource.
If this method is supported the ODsCDeleteLayer capability will test
TRUE on the OGRDataSource.
This method is the same as the C++ method
OGRDataSource::DeleteLayer().
Parameters:
-----------
hDS: handle to the datasource
iLayer: the index of the layer to delete.
OGRERR_NONE on success, or OGRERR_UNSUPPORTED_OPERATION if deleting
layers is not supported for this datasource.
"""
return _ogr.DataSource_DeleteLayer(self, *args)
def SyncToDisk(self, *args):
"""
SyncToDisk(DataSource self) -> OGRErr
OGRErr
OGR_DS_SyncToDisk(OGRDataSourceH hDS)
Flush pending changes to disk.
This call is intended to force the datasource to flush any pending
writes to disk, and leave the disk file in a consistent state. It
would not normally have any effect on read-only datasources.
Some data sources do not implement this method, and will still return
OGRERR_NONE. An error is only returned if an error occurs while
attempting to flush to disk.
The default implementation of this method just calls the SyncToDisk()
method on each of the layers. Conceptionally, calling SyncToDisk() on
a datasource should include any work that might be accomplished by
calling SyncToDisk() on layers in that data source.
In any event, you should always close any opened datasource with
OGR_DS_Destroy() that will ensure all data is correctly flushed.
This method is the same as the C++ method OGRDataSource::SyncToDisk()
Parameters:
-----------
hDS: handle to the data source
OGRERR_NONE if no error occurs (even if nothing is done) or an error
code.
"""
return _ogr.DataSource_SyncToDisk(self, *args)
def CreateLayer(self, *args, **kwargs):
"""
CreateLayer(DataSource self, char const * name, SpatialReference srs=None, OGRwkbGeometryType geom_type=wkbUnknown,
char ** options=None) -> Layer
OGRLayerH
OGR_DS_CreateLayer(OGRDataSourceH hDS, const char *pszName,
OGRSpatialReferenceH hSpatialRef, OGRwkbGeometryType eType, char
**papszOptions)
This function attempts to create a new layer on the data source with
the indicated name, coordinate system, geometry type.
The papszOptions argument can be used to control driver specific
creation options. These options are normally documented in the format
specific documentation.
This function is the same as the C++ method
OGRDataSource::CreateLayer().
Parameters:
-----------
hDS: The dataset handle.
pszName: the name for the new layer. This should ideally not match
any existing layer on the datasource.
hSpatialRef: handle to the coordinate system to use for the new
layer, or NULL if no coordinate system is available.
eType: the geometry type for the layer. Use wkbUnknown if there are
no constraints on the types geometry to be written.
papszOptions: a StringList of name=value options. Options are driver
specific, and driver information can be found at the following
url:http://www.gdal.org/ogr/ogr_formats.html
NULL is returned on failure, or a new OGRLayer handle on success.
Example:
"""
return _ogr.DataSource_CreateLayer(self, *args, **kwargs)
def CopyLayer(self, *args, **kwargs):
"""
CopyLayer(DataSource self, Layer src_layer, char const * new_name, char ** options=None) -> Layer
OGRLayerH
OGR_DS_CopyLayer(OGRDataSourceH hDS, OGRLayerH hSrcLayer, const char
*pszNewName, char **papszOptions)
Duplicate an existing layer.
This function creates a new layer, duplicate the field definitions of
the source layer and then duplicate each features of the source layer.
The papszOptions argument can be used to control driver specific
creation options. These options are normally documented in the format
specific documentation. The source layer may come from another
dataset.
This function is the same as the C++ method OGRDataSource::CopyLayer
Parameters:
-----------
hDS: handle to the data source where to create the new layer
hSrcLayer: handle to the source layer.
pszNewName: the name of the layer to create.
papszOptions: a StringList of name=value options. Options are driver
specific.
an handle to the layer, or NULL if an error occurs.
"""
return _ogr.DataSource_CopyLayer(self, *args, **kwargs)
def GetLayerByIndex(self, *args):
"""GetLayerByIndex(DataSource self, int index=0) -> Layer"""
return _ogr.DataSource_GetLayerByIndex(self, *args)
def GetLayerByName(self, *args):
"""
GetLayerByName(DataSource self, char const * layer_name) -> Layer
OGRLayerH
OGR_DS_GetLayerByName(OGRDataSourceH hDS, const char *pszName)
Fetch a layer by name.
The returned layer remains owned by the OGRDataSource and should not
be deleted by the application.
This function is the same as the C++ method
OGRDataSource::GetLayerByName().
Parameters:
-----------
hDS: handle to the data source from which to get the layer.
pszLayerName: Layer the layer name of the layer to fetch.
an handle to the layer, or NULL if the layer is not found or an error
occurs.
"""
return _ogr.DataSource_GetLayerByName(self, *args)
def TestCapability(self, *args):
"""
TestCapability(DataSource self, char const * cap) -> bool
int
OGR_DS_TestCapability(OGRDataSourceH hDS, const char *pszCap)
Test if capability is available.
One of the following data source capability names can be passed into
this function, and a TRUE or FALSE value will be returned indicating
whether or not the capability is available for this object.
ODsCCreateLayer: True if this datasource can create new layers.
The #define macro forms of the capability names should be used in
preference to the strings themselves to avoid mispelling.
This function is the same as the C++ method
OGRDataSource::TestCapability().
Parameters:
-----------
hDS: handle to the data source against which to test the capability.
pszCapability: the capability to test.
TRUE if capability available otherwise FALSE.
"""
return _ogr.DataSource_TestCapability(self, *args)
def ExecuteSQL(self, *args, **kwargs):
"""
ExecuteSQL(DataSource self, char const * statement, Geometry spatialFilter=None, char const * dialect="") -> Layer
OGRLayerH
OGR_DS_ExecuteSQL(OGRDataSourceH hDS, const char *pszStatement,
OGRGeometryH hSpatialFilter, const char *pszDialect)
Execute an SQL statement against the data store.
The result of an SQL query is either NULL for statements that are in
error, or that have no results set, or an OGRLayer handle representing
a results set from the query. Note that this OGRLayer is in addition
to the layers in the data store and must be destroyed with
OGR_DS_ReleaseResultSet() before the data source is closed
(destroyed).
For more information on the SQL dialect supported internally by OGR
review theOGR SQL document. Some drivers (ie. Oracle and PostGIS) pass
the SQL directly through to the underlying RDBMS.
This function is the same as the C++ method
OGRDataSource::ExecuteSQL();
Parameters:
-----------
hDS: handle to the data source on which the SQL query is executed.
pszSQLCommand: the SQL statement to execute.
hSpatialFilter: handle to a geometry which represents a spatial
filter. Can be NULL.
pszDialect: allows control of the statement dialect. If set to NULL,
the OGR SQL engine will be used, except for RDBMS drivers that will
use their dedicated SQL engine, unless OGRSQL is explicitely passed as
the dialect.
an handle to a OGRLayer containing the results of the query.
Deallocate with OGR_DS_ReleaseResultSet().
"""
return _ogr.DataSource_ExecuteSQL(self, *args, **kwargs)
def ReleaseResultSet(self, *args):
"""
ReleaseResultSet(DataSource self, Layer layer)
void
OGR_DS_ReleaseResultSet(OGRDataSourceH hDS, OGRLayerH hLayer)
Release results of OGR_DS_ExecuteSQL().
This function should only be used to deallocate OGRLayers resulting
from an OGR_DS_ExecuteSQL() call on the same OGRDataSource. Failure to
deallocate a results set before destroying the OGRDataSource may cause
errors.
This function is the same as the C++ method
OGRDataSource::ReleaseResultSet().
Parameters:
-----------
hDS: an handle to the data source on which was executed an SQL query.
hLayer: handle to the result of a previous OGR_DS_ExecuteSQL() call.
"""
return _ogr.DataSource_ReleaseResultSet(self, *args)
def GetStyleTable(self, *args):
"""
GetStyleTable(DataSource self) -> StyleTable
OGRStyleTableH
OGR_DS_GetStyleTable(OGRDataSourceH hDS)
"""
return _ogr.DataSource_GetStyleTable(self, *args)
def SetStyleTable(self, *args):
"""
SetStyleTable(DataSource self, StyleTable table)
void
OGR_DS_SetStyleTable(OGRDataSourceH hDS, OGRStyleTableH hStyleTable)
"""
return _ogr.DataSource_SetStyleTable(self, *args)
def Destroy(self):
"Once called, self has effectively been destroyed. Do not access. For backwards compatiblity only"
_ogr.delete_DataSource( self )
self.thisown = 0
def Release(self):
"Once called, self has effectively been destroyed. Do not access. For backwards compatiblity only"
_ogr.delete_DataSource( self )
self.thisown = 0
def Reference(self):
"For backwards compatibility only."
return self.Reference()
def Dereference(self):
"For backwards compatibility only."
self.Dereference()
def __len__(self):
"""Returns the number of layers on the datasource"""
return self.GetLayerCount()
def __getitem__(self, value):
"""Support dictionary, list, and slice -like access to the datasource.
] would return the first layer on the datasource.
aname'] would return the layer named "aname".
:4] would return a list of the first four layers."""
if isinstance(value, slice):
output = []
for i in xrange(value.start,value.stop,value.step):
try:
output.append(self.GetLayer(i))
except OGRError: #we're done because we're off the end
return output
return output
if isinstance(value, int):
if value > len(self)-1:
raise IndexError
return self.GetLayer(value)
elif isinstance(value, str):
return self.GetLayer(value)
else:
raise TypeError('Input %s is not of String or Int type' % type(value))
def GetLayer(self,iLayer=0):
"""Return the layer given an index or a name"""
if isinstance(iLayer, str):
return self.GetLayerByName(str(iLayer))
elif isinstance(iLayer, int):
return self.GetLayerByIndex(iLayer)
else:
raise TypeError("Input %s is not of String or Int type" % type(iLayer))
def DeleteLayer(self, value):
"""Deletes the layer given an index or layer name"""
if isinstance(value, str):
for i in range(self.GetLayerCount()):
name = self.GetLayer(i).GetName()
if name == value:
return _ogr.DataSource_DeleteLayer(self, i)
raise ValueError("Layer %s not found to delete" % value)
elif isinstance(value, int):
return _ogr.DataSource_DeleteLayer(self, value)
else:
raise TypeError("Input %s is not of String or Int type" % type(value))
DataSource_swigregister = _ogr.DataSource_swigregister
DataSource_swigregister(DataSource)
class Layer(object):
"""Proxy of C++ OGRLayerShadow class"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs): raise AttributeError("No constructor defined")
__repr__ = _swig_repr
def GetRefCount(self, *args):
"""
GetRefCount(Layer self) -> int
int OGR_L_GetRefCount(OGRLayerH
hLayer)
"""
return _ogr.Layer_GetRefCount(self, *args)
def SetSpatialFilter(self, *args):
"""
SetSpatialFilter(Layer self, Geometry filter)
SetSpatialFilter(Layer self, int iGeomField, Geometry filter)
void
OGR_L_SetSpatialFilter(OGRLayerH hLayer, OGRGeometryH hGeom)
Set a new spatial filter.
This function set the geometry to be used as a spatial filter when
fetching features via the OGR_L_GetNextFeature() function. Only
features that geometrically intersect the filter geometry will be
returned.
Currently this test is may be inaccurately implemented, but it is
guaranteed that all features who's envelope (as returned by
OGR_G_GetEnvelope()) overlaps the envelope of the spatial filter will
be returned. This can result in more shapes being returned that should
strictly be the case.
This function makes an internal copy of the passed geometry. The
passed geometry remains the responsibility of the caller, and may be
safely destroyed.
For the time being the passed filter geometry should be in the same
SRS as the layer (as returned by OGR_L_GetSpatialRef()). In the future
this may be generalized.
This function is the same as the C++ method
OGRLayer::SetSpatialFilter.
Parameters:
-----------
hLayer: handle to the layer on which to set the spatial filter.
hGeom: handle to the geometry to use as a filtering region. NULL may
be passed indicating that the current spatial filter should be
cleared, but no new one instituted.
"""
return _ogr.Layer_SetSpatialFilter(self, *args)
def SetSpatialFilterRect(self, *args):
"""
SetSpatialFilterRect(Layer self, double minx, double miny, double maxx, double maxy)
SetSpatialFilterRect(Layer self, int iGeomField, double minx, double miny, double maxx, double maxy)
void
OGR_L_SetSpatialFilterRect(OGRLayerH hLayer, double dfMinX, double
dfMinY, double dfMaxX, double dfMaxY)
Set a new rectangular spatial filter.
This method set rectangle to be used as a spatial filter when fetching
features via the OGR_L_GetNextFeature() method. Only features that
geometrically intersect the given rectangle will be returned.
The x/y values should be in the same coordinate system as the layer as
a whole (as returned by OGRLayer::GetSpatialRef()). Internally this
method is normally implemented as creating a 5 vertex closed
rectangular polygon and passing it to OGRLayer::SetSpatialFilter(). It
exists as a convenience.
The only way to clear a spatial filter set with this method is to call
OGRLayer::SetSpatialFilter(NULL).
This method is the same as the C++ method
OGRLayer::SetSpatialFilterRect().
Parameters:
-----------
hLayer: handle to the layer on which to set the spatial filter.
dfMinX: the minimum X coordinate for the rectangular region.
dfMinY: the minimum Y coordinate for the rectangular region.
dfMaxX: the maximum X coordinate for the rectangular region.
dfMaxY: the maximum Y coordinate for the rectangular region.
"""
return _ogr.Layer_SetSpatialFilterRect(self, *args)
def GetSpatialFilter(self, *args):
"""
GetSpatialFilter(Layer self) -> Geometry
OGRGeometryH
OGR_L_GetSpatialFilter(OGRLayerH hLayer)
This function returns the current spatial filter for this layer.
The returned pointer is to an internally owned object, and should not
be altered or deleted by the caller.
This function is the same as the C++ method
OGRLayer::GetSpatialFilter().
Parameters:
-----------
hLayer: handle to the layer to get the spatial filter from.
an handle to the spatial filter geometry.
"""
return _ogr.Layer_GetSpatialFilter(self, *args)
def SetAttributeFilter(self, *args):
"""
SetAttributeFilter(Layer self, char * filter_string) -> OGRErr
OGRErr
OGR_L_SetAttributeFilter(OGRLayerH hLayer, const char *pszQuery)
Set a new attribute query.
This function sets the attribute query string to be used when fetching
features via the OGR_L_GetNextFeature() function. Only features for
which the query evaluates as true will be returned.
The query string should be in the format of an SQL WHERE clause. For
instance "population > 1000000 and population < 5000000" where
population is an attribute in the layer. The query format is a
restricted form of SQL WHERE clause as defined
"eq_format=restricted_where" about half way through this document:
http://ogdi.sourceforge.net/prop/6.2.CapabilitiesMetadata.html
Note that installing a query string will generally result in resetting
the current reading position (ala OGR_L_ResetReading()).
This function is the same as the C++ method
OGRLayer::SetAttributeFilter().
Parameters:
-----------
hLayer: handle to the layer on which attribute query will be
executed.
pszQuery: query in restricted SQL WHERE format, or NULL to clear the
current query.
OGRERR_NONE if successfully installed, or an error code if the query
expression is in error, or some other failure occurs.
"""
return _ogr.Layer_SetAttributeFilter(self, *args)
def ResetReading(self, *args):
"""
ResetReading(Layer self)
void
OGR_L_ResetReading(OGRLayerH hLayer)
Reset feature reading to start on the first feature.
This affects GetNextFeature().
This function is the same as the C++ method OGRLayer::ResetReading().
Parameters:
-----------
hLayer: handle to the layer on which features are read.
"""
return _ogr.Layer_ResetReading(self, *args)
def GetName(self, *args):
"""
GetName(Layer self) -> char const *
const char* OGR_L_GetName(OGRLayerH
hLayer)
Return the layer name.
This returns the same content as
OGR_FD_GetName(OGR_L_GetLayerDefn(hLayer)), but for a few drivers,
calling OGR_L_GetName() directly can avoid lengthy layer definition
initialization.
This function is the same as the C++ method OGRLayer::GetName().
Parameters:
-----------
hLayer: handle to the layer.
the layer name (must not been freed)
OGR 1.8.0
"""
return _ogr.Layer_GetName(self, *args)
def GetGeomType(self, *args):
"""
GetGeomType(Layer self) -> OGRwkbGeometryType
OGRwkbGeometryType
OGR_L_GetGeomType(OGRLayerH hLayer)
Return the layer geometry type.
This returns the same result as
OGR_FD_GetGeomType(OGR_L_GetLayerDefn(hLayer)), but for a few drivers,
calling OGR_L_GetGeomType() directly can avoid lengthy layer
definition initialization.
This function is the same as the C++ method OGRLayer::GetGeomType().
Parameters:
-----------
hLayer: handle to the layer.
the geometry type
OGR 1.8.0
"""
return _ogr.Layer_GetGeomType(self, *args)
def GetGeometryColumn(self, *args):
"""
GetGeometryColumn(Layer self) -> char const *
const char*
OGR_L_GetGeometryColumn(OGRLayerH hLayer)
This method returns the name of the underlying database column being
used as the geometry column, or "" if not supported.
This method is the same as the C++ method
OGRLayer::GetGeometryColumn()
Parameters:
-----------
hLayer: handle to the layer
geometry column name.
"""
return _ogr.Layer_GetGeometryColumn(self, *args)
def GetFIDColumn(self, *args):
"""
GetFIDColumn(Layer self) -> char const *
const char*
OGR_L_GetFIDColumn(OGRLayerH hLayer)
This method returns the name of the underlying database column being
used as the FID column, or "" if not supported.
This method is the same as the C++ method OGRLayer::GetFIDColumn()
Parameters:
-----------
hLayer: handle to the layer
fid column name.
"""
return _ogr.Layer_GetFIDColumn(self, *args)
def GetFeature(self, *args):
"""
GetFeature(Layer self, long fid) -> Feature
OGRFeatureH
OGR_L_GetFeature(OGRLayerH hLayer, long nFeatureId)
Fetch a feature by its identifier.
This function will attempt to read the identified feature. The nFID
value cannot be OGRNullFID. Success or failure of this operation is
unaffected by the spatial or attribute filters.
If this function returns a non-NULL feature, it is guaranteed that its
feature id ( OGR_F_GetFID()) will be the same as nFID.
Use OGR_L_TestCapability(OLCRandomRead) to establish if this layer
supports efficient random access reading via OGR_L_GetFeature();
however, the call should always work if the feature exists as a
fallback implementation just scans all the features in the layer
looking for the desired feature.
Sequential reads are generally considered interrupted by a
OGR_L_GetFeature() call.
The returned feature should be free with OGR_F_Destroy().
This function is the same as the C++ method OGRLayer::GetFeature( ).
Parameters:
-----------
hLayer: handle to the layer that owned the feature.
nFeatureId: the feature id of the feature to read.
an handle to a feature now owned by the caller, or NULL on failure.
"""
return _ogr.Layer_GetFeature(self, *args)
def GetNextFeature(self, *args):
"""
GetNextFeature(Layer self) -> Feature
OGRFeatureH
OGR_L_GetNextFeature(OGRLayerH hLayer)
Fetch the next available feature from this layer.
The returned feature becomes the responsiblity of the caller to delete
with OGR_F_Destroy(). It is critical that all features associated with
an OGRLayer (more specifically an OGRFeatureDefn) be deleted before
that layer/datasource is deleted.
Only features matching the current spatial filter (set with
SetSpatialFilter()) will be returned.
This function implements sequential access to the features of a layer.
The OGR_L_ResetReading() function can be used to start at the
beginning again.
This function is the same as the C++ method
OGRLayer::GetNextFeature().
Parameters:
-----------
hLayer: handle to the layer from which feature are read.
an handle to a feature, or NULL if no more features are available.
"""
return _ogr.Layer_GetNextFeature(self, *args)
def SetNextByIndex(self, *args):
"""
SetNextByIndex(Layer self, long new_index) -> OGRErr
OGRErr
OGR_L_SetNextByIndex(OGRLayerH hLayer, long nIndex)
Move read cursor to the nIndex'th feature in the current resultset.
This method allows positioning of a layer such that the
GetNextFeature() call will read the requested feature, where nIndex is
an absolute index into the current result set. So, setting it to 3
would mean the next feature read with GetNextFeature() would have been
the 4th feature to have been read if sequential reading took place
from the beginning of the layer, including accounting for spatial and
attribute filters.
Only in rare circumstances is SetNextByIndex() efficiently
implemented. In all other cases the default implementation which calls
ResetReading() and then calls GetNextFeature() nIndex times is used.
To determine if fast seeking is available on the current layer use the
TestCapability() method with a value of OLCFastSetNextByIndex.
This method is the same as the C++ method OGRLayer::SetNextByIndex()
Parameters:
-----------
hLayer: handle to the layer
nIndex: the index indicating how many steps into the result set to
seek.
OGRERR_NONE on success or an error code.
"""
return _ogr.Layer_SetNextByIndex(self, *args)
def SetFeature(self, *args):
"""
SetFeature(Layer self, Feature feature) -> OGRErr
OGRErr OGR_L_SetFeature(OGRLayerH
hLayer, OGRFeatureH hFeat)
Rewrite an existing feature.
This function will write a feature to the layer, based on the feature
id within the OGRFeature.
Use OGR_L_TestCapability(OLCRandomWrite) to establish if this layer
supports random access writing via OGR_L_SetFeature().
This function is the same as the C++ method OGRLayer::SetFeature().
Parameters:
-----------
hLayer: handle to the layer to write the feature.
hFeat: the feature to write.
OGRERR_NONE if the operation works, otherwise an appropriate error
code.
"""
return _ogr.Layer_SetFeature(self, *args)
def CreateFeature(self, *args):
"""
CreateFeature(Layer self, Feature feature) -> OGRErr
OGRErr
OGR_L_CreateFeature(OGRLayerH hLayer, OGRFeatureH hFeat)
Create and write a new feature within a layer.
The passed feature is written to the layer as a new feature, rather
than overwriting an existing one. If the feature has a feature id
other than OGRNullFID, then the native implementation may use that as
the feature id of the new feature, but not necessarily. Upon
successful return the passed feature will have been updated with the
new feature id.
This function is the same as the C++ method OGRLayer::CreateFeature().
Parameters:
-----------
hLayer: handle to the layer to write the feature to.
hFeat: the handle of the feature to write to disk.
OGRERR_NONE on success.
"""
return _ogr.Layer_CreateFeature(self, *args)
def DeleteFeature(self, *args):
"""
DeleteFeature(Layer self, long fid) -> OGRErr
OGRErr
OGR_L_DeleteFeature(OGRLayerH hDS, long nFID)
Delete feature from layer.
The feature with the indicated feature id is deleted from the layer if
supported by the driver. Most drivers do not support feature deletion,
and will return OGRERR_UNSUPPORTED_OPERATION. The
OGR_L_TestCapability() function may be called with OLCDeleteFeature to
check if the driver supports feature deletion.
This method is the same as the C++ method OGRLayer::DeleteFeature().
Parameters:
-----------
hLayer: handle to the layer
nFID: the feature id to be deleted from the layer
OGRERR_NONE on success.
"""
return _ogr.Layer_DeleteFeature(self, *args)
def SyncToDisk(self, *args):
"""
SyncToDisk(Layer self) -> OGRErr
OGRErr OGR_L_SyncToDisk(OGRLayerH
hDS)
Flush pending changes to disk.
This call is intended to force the layer to flush any pending writes
to disk, and leave the disk file in a consistent state. It would not
normally have any effect on read-only datasources.
Some layers do not implement this method, and will still return
OGRERR_NONE. The default implementation just returns OGRERR_NONE. An
error is only returned if an error occurs while attempting to flush to
disk.
In any event, you should always close any opened datasource with
OGR_DS_Destroy() that will ensure all data is correctly flushed.
This method is the same as the C++ method OGRLayer::SyncToDisk()
Parameters:
-----------
hLayer: handle to the layer
OGRERR_NONE if no error occurs (even if nothing is done) or an error
code.
"""
return _ogr.Layer_SyncToDisk(self, *args)
def GetLayerDefn(self, *args):
"""
GetLayerDefn(Layer self) -> FeatureDefn
OGRFeatureDefnH
OGR_L_GetLayerDefn(OGRLayerH hLayer)
Fetch the schema information for this layer.
The returned handle to the OGRFeatureDefn is owned by the OGRLayer,
and should not be modified or freed by the application. It
encapsulates the attribute schema of the features of the layer.
This function is the same as the C++ method OGRLayer::GetLayerDefn().
Parameters:
-----------
hLayer: handle to the layer to get the schema information.
an handle to the feature definition.
"""
return _ogr.Layer_GetLayerDefn(self, *args)
def GetFeatureCount(self, *args, **kwargs):
"""
GetFeatureCount(Layer self, int force=1) -> int
int
OGR_L_GetFeatureCount(OGRLayerH hLayer, int bForce)
Fetch the feature count in this layer.
Returns the number of features in the layer. For dynamic databases the
count may not be exact. If bForce is FALSE, and it would be expensive
to establish the feature count a value of -1 may be returned
indicating that the count isn't know. If bForce is TRUE some
implementations will actually scan the entire layer once to count
objects.
The returned count takes the spatial filter into account.
Note that some implementations of this method may alter the read
cursor of the layer.
This function is the same as the CPP OGRLayer::GetFeatureCount().
Parameters:
-----------
hLayer: handle to the layer that owned the features.
bForce: Flag indicating whether the count should be computed even if
it is expensive.
feature count, -1 if count not known.
"""
return _ogr.Layer_GetFeatureCount(self, *args, **kwargs)
def GetExtent(self, *args, **kwargs):
"""
GetExtent(Layer self, int force=1, int can_return_null=0, int geom_field=0)
OGRErr OGR_L_GetExtent(OGRLayerH
hLayer, OGREnvelope *psExtent, int bForce)
Fetch the extent of this layer.
Returns the extent (MBR) of the data in the layer. If bForce is FALSE,
and it would be expensive to establish the extent then OGRERR_FAILURE
will be returned indicating that the extent isn't know. If bForce is
TRUE then some implementations will actually scan the entire layer
once to compute the MBR of all the features in the layer.
Depending on the drivers, the returned extent may or may not take the
spatial filter into account. So it is safer to call OGR_L_GetExtent()
without setting a spatial filter.
Layers without any geometry may return OGRERR_FAILURE just indicating
that no meaningful extents could be collected.
Note that some implementations of this method may alter the read
cursor of the layer.
This function is the same as the C++ method OGRLayer::GetExtent().
Parameters:
-----------
hLayer: handle to the layer from which to get extent.
psExtent: the structure in which the extent value will be returned.
bForce: Flag indicating whether the extent should be computed even if
it is expensive.
OGRERR_NONE on success, OGRERR_FAILURE if extent not known.
"""
return _ogr.Layer_GetExtent(self, *args, **kwargs)
def TestCapability(self, *args):
"""
TestCapability(Layer self, char const * cap) -> bool
int
OGR_L_TestCapability(OGRLayerH hLayer, const char *pszCap)
Test if this layer supported the named capability.
The capability codes that can be tested are represented as strings,
but #defined constants exists to ensure correct spelling. Specific
layer types may implement class specific capabilities, but this can't
generally be discovered by the caller.
OLCRandomRead / "RandomRead": TRUE if the GetFeature() method is
implemented in an optimized way for this layer, as opposed to the
default implementation using ResetReading() and GetNextFeature() to
find the requested feature id.
OLCSequentialWrite / "SequentialWrite": TRUE if the CreateFeature()
method works for this layer. Note this means that this particular
layer is writable. The same OGRLayer class may returned FALSE for
other layer instances that are effectively read-only.
OLCRandomWrite / "RandomWrite": TRUE if the SetFeature() method is
operational on this layer. Note this means that this particular layer
is writable. The same OGRLayer class may returned FALSE for other
layer instances that are effectively read-only.
OLCFastSpatialFilter / "FastSpatialFilter": TRUE if this layer
implements spatial filtering efficiently. Layers that effectively read
all features, and test them with the OGRFeature intersection methods
should return FALSE. This can be used as a clue by the application
whether it should build and maintain its own spatial index for
features in this layer.
OLCFastFeatureCount / "FastFeatureCount": TRUE if this layer can
return a feature count (via OGR_L_GetFeatureCount()) efficiently ...
ie. without counting the features. In some cases this will return TRUE
until a spatial filter is installed after which it will return FALSE.
OLCFastGetExtent / "FastGetExtent": TRUE if this layer can return
its data extent (via OGR_L_GetExtent()) efficiently ... ie. without
scanning all the features. In some cases this will return TRUE until a
spatial filter is installed after which it will return FALSE.
OLCFastSetNextByIndex / "FastSetNextByIndex": TRUE if this layer can
perform the SetNextByIndex() call efficiently, otherwise FALSE.
OLCCreateField / "CreateField": TRUE if this layer can create new
fields on the current layer using CreateField(), otherwise FALSE.
OLCDeleteField / "DeleteField": TRUE if this layer can delete
existing fields on the current layer using DeleteField(), otherwise
FALSE.
OLCReorderFields / "ReorderFields": TRUE if this layer can reorder
existing fields on the current layer using ReorderField() or
ReorderFields(), otherwise FALSE.
OLCAlterFieldDefn / "AlterFieldDefn": TRUE if this layer can alter
the definition of an existing field on the current layer using
AlterFieldDefn(), otherwise FALSE.
OLCDeleteFeature / "DeleteFeature": TRUE if the DeleteFeature()
method is supported on this layer, otherwise FALSE.
OLCStringsAsUTF8 / "StringsAsUTF8": TRUE if values of OFTString
fields are assured to be in UTF-8 format. If FALSE the encoding of
fields is uncertain, though it might still be UTF-8.
OLCTransactions / "Transactions": TRUE if the StartTransaction(),
CommitTransaction() and RollbackTransaction() methods work in a
meaningful way, otherwise FALSE.
This function is the same as the C++ method
OGRLayer::TestCapability().
Parameters:
-----------
hLayer: handle to the layer to get the capability from.
pszCap: the name of the capability to test.
TRUE if the layer has the requested capability, or FALSE otherwise.
OGRLayers will return FALSE for any unrecognised capabilities.
"""
return _ogr.Layer_TestCapability(self, *args)
def CreateField(self, *args, **kwargs):
"""
CreateField(Layer self, FieldDefn field_def, int approx_ok=1) -> OGRErr
OGRErr
OGR_L_CreateField(OGRLayerH hLayer, OGRFieldDefnH hField, int
bApproxOK)
Create a new field on a layer.
You must use this to create new fields on a real layer. Internally the
OGRFeatureDefn for the layer will be updated to reflect the new field.
Applications should never modify the OGRFeatureDefn used by a layer
directly.
This function should not be called while there are feature objects in
existance that were obtained or created with the previous layer
definition.
Not all drivers support this function. You can query a layer to check
if it supports it with the OLCCreateField capability. Some drivers may
only support this method while there are still no features in the
layer. When it is supported, the existings features of the backing
file/database should be updated accordingly.
This function is the same as the C++ method OGRLayer::CreateField().
Parameters:
-----------
hLayer: handle to the layer to write the field definition.
hField: handle of the field definition to write to disk.
bApproxOK: If TRUE, the field may be created in a slightly different
form depending on the limitations of the format driver.
OGRERR_NONE on success.
"""
return _ogr.Layer_CreateField(self, *args, **kwargs)
def DeleteField(self, *args):
"""
DeleteField(Layer self, int iField) -> OGRErr
OGRErr
OGR_L_DeleteField(OGRLayerH hLayer, int iField)
Create a new field on a layer.
You must use this to delete existing fields on a real layer.
Internally the OGRFeatureDefn for the layer will be updated to reflect
the deleted field. Applications should never modify the OGRFeatureDefn
used by a layer directly.
This function should not be called while there are feature objects in
existance that were obtained or created with the previous layer
definition.
Not all drivers support this function. You can query a layer to check
if it supports it with the OLCDeleteField capability. Some drivers may
only support this method while there are still no features in the
layer. When it is supported, the existings features of the backing
file/database should be updated accordingly.
This function is the same as the C++ method OGRLayer::DeleteField().
Parameters:
-----------
hLayer: handle to the layer.
iField: index of the field to delete.
OGRERR_NONE on success.
OGR 1.9.0
"""
return _ogr.Layer_DeleteField(self, *args)
def ReorderField(self, *args):
"""
ReorderField(Layer self, int iOldFieldPos, int iNewFieldPos) -> OGRErr
OGRErr
OGR_L_ReorderField(OGRLayerH hLayer, int iOldFieldPos, int
iNewFieldPos)
Reorder an existing field on a layer.
This function is a conveniency wrapper of OGR_L_ReorderFields()
dedicated to move a single field.
You must use this to reorder existing fields on a real layer.
Internally the OGRFeatureDefn for the layer will be updated to reflect
the reordering of the fields. Applications should never modify the
OGRFeatureDefn used by a layer directly.
This function should not be called while there are feature objects in
existance that were obtained or created with the previous layer
definition.
The field definition that was at initial position iOldFieldPos will be
moved at position iNewFieldPos, and elements between will be shuffled
accordingly.
For example, let suppose the fields were "0","1","2","3","4"
initially. ReorderField(1, 3) will reorder them as
"0","2","3","1","4".
Not all drivers support this function. You can query a layer to check
if it supports it with the OLCReorderFields capability. Some drivers
may only support this method while there are still no features in the
layer. When it is supported, the existings features of the backing
file/database should be updated accordingly.
This function is the same as the C++ method OGRLayer::ReorderField().
Parameters:
-----------
hLayer: handle to the layer.
iOldFieldPos: previous position of the field to move. Must be in the
range [0,GetFieldCount()-1].
iNewFieldPos: new position of the field to move. Must be in the range
[0,GetFieldCount()-1].
OGRERR_NONE on success.
OGR 1.9.0
"""
return _ogr.Layer_ReorderField(self, *args)
def ReorderFields(self, *args):
"""
ReorderFields(Layer self, int nList) -> OGRErr
OGRErr
OGR_L_ReorderFields(OGRLayerH hLayer, int *panMap)
Reorder all the fields of a layer.
You must use this to reorder existing fields on a real layer.
Internally the OGRFeatureDefn for the layer will be updated to reflect
the reordering of the fields. Applications should never modify the
OGRFeatureDefn used by a layer directly.
This function should not be called while there are feature objects in
existance that were obtained or created with the previous layer
definition.
panMap is such that,for each field definition at position i after
reordering, its position before reordering was panMap[i].
For example, let suppose the fields were "0","1","2","3","4"
initially. ReorderFields([0,2,3,1,4]) will reorder them as
"0","2","3","1","4".
Not all drivers support this function. You can query a layer to check
if it supports it with the OLCReorderFields capability. Some drivers
may only support this method while there are still no features in the
layer. When it is supported, the existings features of the backing
file/database should be updated accordingly.
This function is the same as the C++ method OGRLayer::ReorderFields().
Parameters:
-----------
hLayer: handle to the layer.
panMap: an array of GetLayerDefn()->GetFieldCount() elements which is
a permutation of [0, GetLayerDefn()->GetFieldCount()-1].
OGRERR_NONE on success.
OGR 1.9.0
"""
return _ogr.Layer_ReorderFields(self, *args)
def AlterFieldDefn(self, *args):
"""
AlterFieldDefn(Layer self, int iField, FieldDefn field_def, int nFlags) -> OGRErr
OGRErr
OGR_L_AlterFieldDefn(OGRLayerH hLayer, int iField, OGRFieldDefnH
hNewFieldDefn, int nFlags)
Alter the definition of an existing field on a layer.
You must use this to alter the definition of an existing field of a
real layer. Internally the OGRFeatureDefn for the layer will be
updated to reflect the altered field. Applications should never modify
the OGRFeatureDefn used by a layer directly.
This function should not be called while there are feature objects in
existance that were obtained or created with the previous layer
definition.
Not all drivers support this function. You can query a layer to check
if it supports it with the OLCAlterFieldDefn capability. Some drivers
may only support this method while there are still no features in the
layer. When it is supported, the existings features of the backing
file/database should be updated accordingly. Some drivers might also
not support all update flags.
This function is the same as the C++ method
OGRLayer::AlterFieldDefn().
Parameters:
-----------
hLayer: handle to the layer.
iField: index of the field whose definition must be altered.
hNewFieldDefn: new field definition
nFlags: combination of ALTER_NAME_FLAG, ALTER_TYPE_FLAG and
ALTER_WIDTH_PRECISION_FLAG to indicate which of the name and/or type
and/or width and precision fields from the new field definition must
be taken into account.
OGRERR_NONE on success.
OGR 1.9.0
"""
return _ogr.Layer_AlterFieldDefn(self, *args)
def CreateGeomField(self, *args, **kwargs):
"""CreateGeomField(Layer self, GeomFieldDefn field_def, int approx_ok=1) -> OGRErr"""
return _ogr.Layer_CreateGeomField(self, *args, **kwargs)
def StartTransaction(self, *args):
"""
StartTransaction(Layer self) -> OGRErr
OGRErr
OGR_L_StartTransaction(OGRLayerH hLayer)
For datasources which support transactions, StartTransaction creates a
transaction.
If starting the transaction fails, will return OGRERR_FAILURE.
Datasources which do not support transactions will always return
OGRERR_NONE.
This function is the same as the C++ method
OGRLayer::StartTransaction().
Parameters:
-----------
hLayer: handle to the layer
OGRERR_NONE on success.
"""
return _ogr.Layer_StartTransaction(self, *args)
def CommitTransaction(self, *args):
"""
CommitTransaction(Layer self) -> OGRErr
OGRErr
OGR_L_CommitTransaction(OGRLayerH hLayer)
For datasources which support transactions, CommitTransaction commits
a transaction.
If no transaction is active, or the commit fails, will return
OGRERR_FAILURE. Datasources which do not support transactions will
always return OGRERR_NONE.
This function is the same as the C++ method
OGRLayer::CommitTransaction().
Parameters:
-----------
hLayer: handle to the layer
OGRERR_NONE on success.
"""
return _ogr.Layer_CommitTransaction(self, *args)
def RollbackTransaction(self, *args):
"""
RollbackTransaction(Layer self) -> OGRErr
OGRErr
OGR_L_RollbackTransaction(OGRLayerH hLayer)
For datasources which support transactions, RollbackTransaction will
roll back a datasource to its state before the start of the current
transaction. If no transaction is active, or the rollback fails, will
return OGRERR_FAILURE. Datasources which do not support transactions
will always return OGRERR_NONE.
This function is the same as the C++ method
OGRLayer::RollbackTransaction().
Parameters:
-----------
hLayer: handle to the layer
OGRERR_NONE on success.
"""
return _ogr.Layer_RollbackTransaction(self, *args)
def FindFieldIndex(self, *args):
"""FindFieldIndex(Layer self, char const * pszFieldName, int bExactMatch) -> int"""
return _ogr.Layer_FindFieldIndex(self, *args)
def GetSpatialRef(self, *args):
"""
GetSpatialRef(Layer self) -> SpatialReference
OGRSpatialReferenceH
OGR_L_GetSpatialRef(OGRLayerH hLayer)
Fetch the spatial reference system for this layer.
The returned object is owned by the OGRLayer and should not be
modified or freed by the application.
This function is the same as the C++ method OGRLayer::GetSpatialRef().
Parameters:
-----------
hLayer: handle to the layer to get the spatial reference from.
spatial reference, or NULL if there isn't one.
"""
return _ogr.Layer_GetSpatialRef(self, *args)
def GetFeaturesRead(self, *args):
"""
GetFeaturesRead(Layer self) -> GIntBig
GIntBig
OGR_L_GetFeaturesRead(OGRLayerH hLayer)
"""
return _ogr.Layer_GetFeaturesRead(self, *args)
def SetIgnoredFields(self, *args):
"""
SetIgnoredFields(Layer self, char const ** options) -> OGRErr
OGRErr
OGR_L_SetIgnoredFields(OGRLayerH hLayer, const char **papszFields)
Set which fields can be omitted when retrieving features from the
layer.
If the driver supports this functionality (testable using
OLCIgnoreFields capability), it will not fetch the specified fields in
subsequent calls to GetFeature() / GetNextFeature() and thus save some
processing time and/or bandwidth.
Besides field names of the layers, the following special fields can be
passed: "OGR_GEOMETRY" to ignore geometry and "OGR_STYLE" to
ignore layer style.
By default, no fields are ignored.
This method is the same as the C++ method OGRLayer::SetIgnoredFields()
Parameters:
-----------
papszFields: an array of field names terminated by NULL item. If NULL
is passed, the ignored list is cleared.
OGRERR_NONE if all field names have been resolved (even if the driver
does not support this method)
"""
return _ogr.Layer_SetIgnoredFields(self, *args)
def Intersection(self, *args, **kwargs):
"""
Intersection(Layer self, Layer method_layer, Layer result_layer, char ** options=None, GDALProgressFunc callback=0,
void * callback_data=None) -> OGRErr
"""
return _ogr.Layer_Intersection(self, *args, **kwargs)
def Union(self, *args, **kwargs):
"""
Union(Layer self, Layer method_layer, Layer result_layer, char ** options=None, GDALProgressFunc callback=0,
void * callback_data=None) -> OGRErr
"""
return _ogr.Layer_Union(self, *args, **kwargs)
def SymDifference(self, *args, **kwargs):
"""
SymDifference(Layer self, Layer method_layer, Layer result_layer, char ** options=None, GDALProgressFunc callback=0,
void * callback_data=None) -> OGRErr
"""
return _ogr.Layer_SymDifference(self, *args, **kwargs)
def Identity(self, *args, **kwargs):
"""
Identity(Layer self, Layer method_layer, Layer result_layer, char ** options=None, GDALProgressFunc callback=0,
void * callback_data=None) -> OGRErr
"""
return _ogr.Layer_Identity(self, *args, **kwargs)
def Update(self, *args, **kwargs):
"""
Update(Layer self, Layer method_layer, Layer result_layer, char ** options=None, GDALProgressFunc callback=0,
void * callback_data=None) -> OGRErr
"""
return _ogr.Layer_Update(self, *args, **kwargs)
def Clip(self, *args, **kwargs):
"""
Clip(Layer self, Layer method_layer, Layer result_layer, char ** options=None, GDALProgressFunc callback=0,
void * callback_data=None) -> OGRErr
"""
return _ogr.Layer_Clip(self, *args, **kwargs)
def Erase(self, *args, **kwargs):
"""
Erase(Layer self, Layer method_layer, Layer result_layer, char ** options=None, GDALProgressFunc callback=0,
void * callback_data=None) -> OGRErr
"""
return _ogr.Layer_Erase(self, *args, **kwargs)
def GetStyleTable(self, *args):
"""
GetStyleTable(Layer self) -> StyleTable
OGRStyleTableH
OGR_L_GetStyleTable(OGRLayerH hLayer)
"""
return _ogr.Layer_GetStyleTable(self, *args)
def SetStyleTable(self, *args):
"""
SetStyleTable(Layer self, StyleTable table)
void
OGR_L_SetStyleTable(OGRLayerH hLayer, OGRStyleTableH hStyleTable)
"""
return _ogr.Layer_SetStyleTable(self, *args)
def Reference(self):
"For backwards compatibility only."
pass
def Dereference(self):
"For backwards compatibility only."
pass
def __len__(self):
"""Returns the number of features in the layer"""
return self.GetFeatureCount()
# To avoid __len__ being called when testing boolean value
# which can have side effects (#4758)
def __nonzero__(self):
return True
# For Python 3 compat
__bool__ = __nonzero__
def __getitem__(self, value):
"""Support list and slice -like access to the layer.
r[0] would return the first feature on the layer.
r[0:4] would return a list of the first four features."""
if isinstance(value, slice):
import sys
output = []
if value.stop == sys.maxint:
#for an unending slice, sys.maxint is used
#We need to stop before that or GDAL will write an
##error to stdout
stop = len(self) - 1
else:
stop = value.stop
for i in xrange(value.start,stop,value.step):
feature = self.GetFeature(i)
if feature:
output.append(feature)
else:
return output
return output
if isinstance(value, int):
if value > len(self)-1:
raise IndexError
return self.GetFeature(value)
else:
raise TypeError("Input %s is not of IntType or SliceType" % type(value))
def CreateFields(self, fields):
"""Create a list of fields on the Layer"""
for i in fields:
self.CreateField(i)
def __iter__(self):
return self
def next(self):
feature = self.GetNextFeature()
if not feature:
raise StopIteration
else:
return feature
def schema(self):
output = []
defn = self.GetLayerDefn()
for n in range(defn.GetFieldCount()):
output.append(defn.GetFieldDefn(n))
return output
schema = property(schema)
Layer_swigregister = _ogr.Layer_swigregister
Layer_swigregister(Layer)
class Feature(object):
"""Proxy of C++ OGRFeatureShadow class"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
__swig_destroy__ = _ogr.delete_Feature
__del__ = lambda self : None;
def __init__(self, *args, **kwargs):
"""__init__(OGRFeatureShadow self, FeatureDefn feature_def) -> Feature"""
this = _ogr.new_Feature(*args, **kwargs)
try: self.this.append(this)
except: self.this = this
def GetDefnRef(self, *args):
"""
GetDefnRef(Feature self) -> FeatureDefn
OGRFeatureDefnH
OGR_F_GetDefnRef(OGRFeatureH hFeat)
Fetch feature definition.
This function is the same as the C++ method OGRFeature::GetDefnRef().
Parameters:
-----------
hFeat: handle to the feature to get the feature definition from.
an handle to the feature definition object on which feature depends.
"""
return _ogr.Feature_GetDefnRef(self, *args)
def SetGeometry(self, *args):
"""
SetGeometry(Feature self, Geometry geom) -> OGRErr
OGRErr
OGR_F_SetGeometry(OGRFeatureH hFeat, OGRGeometryH hGeom)
Set feature geometry.
This function updates the features geometry, and operate exactly as
SetGeometryDirectly(), except that this function does not assume
ownership of the passed geometry, but instead makes a copy of it.
This function is the same as the C++ OGRFeature::SetGeometry().
Parameters:
-----------
hFeat: handle to the feature on which new geometry is applied to.
hGeom: handle to the new geometry to apply to feature.
OGRERR_NONE if successful, or OGR_UNSUPPORTED_GEOMETRY_TYPE if the
geometry type is illegal for the OGRFeatureDefn (checking not yet
implemented).
"""
return _ogr.Feature_SetGeometry(self, *args)
def SetGeometryDirectly(self, *args):
"""
SetGeometryDirectly(Feature self, Geometry geom) -> OGRErr
OGRErr
OGR_F_SetGeometryDirectly(OGRFeatureH hFeat, OGRGeometryH hGeom)
Set feature geometry.
This function updates the features geometry, and operate exactly as
SetGeometry(), except that this function assumes ownership of the
passed geometry.
This function is the same as the C++ method
OGRFeature::SetGeometryDirectly.
Parameters:
-----------
hFeat: handle to the feature on which to apply the geometry.
hGeom: handle to the new geometry to apply to feature.
OGRERR_NONE if successful, or OGR_UNSUPPORTED_GEOMETRY_TYPE if the
geometry type is illegal for the OGRFeatureDefn (checking not yet
implemented).
"""
return _ogr.Feature_SetGeometryDirectly(self, *args)
def GetGeometryRef(self, *args):
"""
GetGeometryRef(Feature self) -> Geometry
OGRGeometryH
OGR_F_GetGeometryRef(OGRFeatureH hFeat)
Fetch an handle to feature geometry.
This function is the same as the C++ method
OGRFeature::GetGeometryRef().
Parameters:
-----------
hFeat: handle to the feature to get geometry from.
an handle to internal feature geometry. This object should not be
modified.
"""
return _ogr.Feature_GetGeometryRef(self, *args)
def SetGeomField(self, *args):
"""
SetGeomField(Feature self, int iField, Geometry geom) -> OGRErr
SetGeomField(Feature self, char const * name, Geometry geom) -> OGRErr
"""
return _ogr.Feature_SetGeomField(self, *args)
def SetGeomFieldDirectly(self, *args):
"""
SetGeomFieldDirectly(Feature self, int iField, Geometry geom) -> OGRErr
SetGeomFieldDirectly(Feature self, char const * name, Geometry geom) -> OGRErr
"""
return _ogr.Feature_SetGeomFieldDirectly(self, *args)
def GetGeomFieldRef(self, *args):
"""
GetGeomFieldRef(Feature self, int iField) -> Geometry
GetGeomFieldRef(Feature self, char const * name) -> Geometry
"""
return _ogr.Feature_GetGeomFieldRef(self, *args)
def Clone(self, *args):
"""
Clone(Feature self) -> Feature
OGRFeatureH OGR_F_Clone(OGRFeatureH
hFeat)
Duplicate feature.
The newly created feature is owned by the caller, and will have it's
own reference to the OGRFeatureDefn.
This function is the same as the C++ method OGRFeature::Clone().
Parameters:
-----------
hFeat: handle to the feature to clone.
an handle to the new feature, exactly matching this feature.
"""
return _ogr.Feature_Clone(self, *args)
def Equal(self, *args):
"""
Equal(Feature self, Feature feature) -> bool
int OGR_F_Equal(OGRFeatureH hFeat,
OGRFeatureH hOtherFeat)
Test if two features are the same.
Two features are considered equal if the share them (handle equality)
same OGRFeatureDefn, have the same field values, and the same geometry
(as tested by OGR_G_Equal()) as well as the same feature id.
This function is the same as the C++ method OGRFeature::Equal().
Parameters:
-----------
hFeat: handle to one of the feature.
hOtherFeat: handle to the other feature to test this one against.
TRUE if they are equal, otherwise FALSE.
"""
return _ogr.Feature_Equal(self, *args)
def GetFieldCount(self, *args):
"""
GetFieldCount(Feature self) -> int
int
OGR_F_GetFieldCount(OGRFeatureH hFeat)
Fetch number of fields on this feature This will always be the same as
the field count for the OGRFeatureDefn.
This function is the same as the C++ method
OGRFeature::GetFieldCount().
Parameters:
-----------
hFeat: handle to the feature to get the fields count from.
count of fields.
"""
return _ogr.Feature_GetFieldCount(self, *args)
def GetFieldDefnRef(self, *args):
"""
GetFieldDefnRef(Feature self, int id) -> FieldDefn
GetFieldDefnRef(Feature self, char const * name) -> FieldDefn
OGRFieldDefnH
OGR_F_GetFieldDefnRef(OGRFeatureH hFeat, int i)
Fetch definition for this field.
This function is the same as the C++ method
OGRFeature::GetFieldDefnRef().
Parameters:
-----------
hFeat: handle to the feature on which the field is found.
i: the field to fetch, from 0 to GetFieldCount()-1.
an handle to the field definition (from the OGRFeatureDefn). This is
an internal reference, and should not be deleted or modified.
"""
return _ogr.Feature_GetFieldDefnRef(self, *args)
def GetGeomFieldCount(self, *args):
"""GetGeomFieldCount(Feature self) -> int"""
return _ogr.Feature_GetGeomFieldCount(self, *args)
def GetGeomFieldDefnRef(self, *args):
"""
GetGeomFieldDefnRef(Feature self, int id) -> GeomFieldDefn
GetGeomFieldDefnRef(Feature self, char const * name) -> GeomFieldDefn
"""
return _ogr.Feature_GetGeomFieldDefnRef(self, *args)
def GetFieldAsString(self, *args):
"""
GetFieldAsString(Feature self, int id) -> char const
GetFieldAsString(Feature self, char const * name) -> char const *
const char*
OGR_F_GetFieldAsString(OGRFeatureH hFeat, int iField)
Fetch field value as a string.
OFTReal and OFTInteger fields will be translated to string using
sprintf(), but not necessarily using the established formatting rules.
Other field types, or errors will result in a return value of zero.
This function is the same as the C++ method
OGRFeature::GetFieldAsString().
Parameters:
-----------
hFeat: handle to the feature that owned the field.
iField: the field to fetch, from 0 to GetFieldCount()-1.
the field value. This string is internal, and should not be modified,
or freed. Its lifetime may be very brief.
"""
return _ogr.Feature_GetFieldAsString(self, *args)
def GetFieldAsInteger(self, *args):
"""
GetFieldAsInteger(Feature self, int id) -> int
GetFieldAsInteger(Feature self, char const * name) -> int
int
OGR_F_GetFieldAsInteger(OGRFeatureH hFeat, int iField)
Fetch field value as integer.
OFTString features will be translated using atoi(). OFTReal fields
will be cast to integer. Other field types, or errors will result in a
return value of zero.
This function is the same as the C++ method
OGRFeature::GetFieldAsInteger().
Parameters:
-----------
hFeat: handle to the feature that owned the field.
iField: the field to fetch, from 0 to GetFieldCount()-1.
the field value.
"""
return _ogr.Feature_GetFieldAsInteger(self, *args)
def GetFieldAsDouble(self, *args):
"""
GetFieldAsDouble(Feature self, int id) -> double
GetFieldAsDouble(Feature self, char const * name) -> double
double
OGR_F_GetFieldAsDouble(OGRFeatureH hFeat, int iField)
Fetch field value as a double.
OFTString features will be translated using atof(). OFTInteger fields
will be cast to double. Other field types, or errors will result in a
return value of zero.
This function is the same as the C++ method
OGRFeature::GetFieldAsDouble().
Parameters:
-----------
hFeat: handle to the feature that owned the field.
iField: the field to fetch, from 0 to GetFieldCount()-1.
the field value.
"""
return _ogr.Feature_GetFieldAsDouble(self, *args)
def GetFieldAsDateTime(self, *args):
"""
GetFieldAsDateTime(Feature self, int id)
int
OGR_F_GetFieldAsDateTime(OGRFeatureH hFeat, int iField, int *pnYear,
int *pnMonth, int *pnDay, int *pnHour, int *pnMinute, int *pnSecond,
int *pnTZFlag)
Fetch field value as date and time.
Currently this method only works for OFTDate, OFTTime and OFTDateTime
fields.
This function is the same as the C++ method
OGRFeature::GetFieldAsDateTime().
Parameters:
-----------
hFeat: handle to the feature that owned the field.
iField: the field to fetch, from 0 to GetFieldCount()-1.
pnYear: (including century)
pnMonth: (1-12)
pnDay: (1-31)
pnHour: (0-23)
pnMinute: (0-59)
pnSecond: (0-59)
pnTZFlag: (0=unknown, 1=localtime, 100=GMT, see data model for
details)
TRUE on success or FALSE on failure.
"""
return _ogr.Feature_GetFieldAsDateTime(self, *args)
def GetFieldAsIntegerList(self, *args):
"""
GetFieldAsIntegerList(Feature self, int id)
const int*
OGR_F_GetFieldAsIntegerList(OGRFeatureH hFeat, int iField, int
*pnCount)
Fetch field value as a list of integers.
Currently this function only works for OFTIntegerList fields.
This function is the same as the C++ method
OGRFeature::GetFieldAsIntegerList().
Parameters:
-----------
hFeat: handle to the feature that owned the field.
iField: the field to fetch, from 0 to GetFieldCount()-1.
pnCount: an integer to put the list count (number of integers) into.
the field value. This list is internal, and should not be modified, or
freed. Its lifetime may be very brief. If *pnCount is zero on return
the returned pointer may be NULL or non-NULL.
"""
return _ogr.Feature_GetFieldAsIntegerList(self, *args)
def GetFieldAsDoubleList(self, *args):
"""
GetFieldAsDoubleList(Feature self, int id)
const double*
OGR_F_GetFieldAsDoubleList(OGRFeatureH hFeat, int iField, int
*pnCount)
Fetch field value as a list of doubles.
Currently this function only works for OFTRealList fields.
This function is the same as the C++ method
OGRFeature::GetFieldAsDoubleList().
Parameters:
-----------
hFeat: handle to the feature that owned the field.
iField: the field to fetch, from 0 to GetFieldCount()-1.
pnCount: an integer to put the list count (number of doubles) into.
the field value. This list is internal, and should not be modified, or
freed. Its lifetime may be very brief. If *pnCount is zero on return
the returned pointer may be NULL or non-NULL.
"""
return _ogr.Feature_GetFieldAsDoubleList(self, *args)
def GetFieldAsStringList(self, *args):
"""
GetFieldAsStringList(Feature self, int id) -> char **
char**
OGR_F_GetFieldAsStringList(OGRFeatureH hFeat, int iField)
Fetch field value as a list of strings.
Currently this method only works for OFTStringList fields.
The returned list is terminated by a NULL pointer. The number of
elements can also be calculated using CSLCount().
This function is the same as the C++ method
OGRFeature::GetFieldAsStringList().
Parameters:
-----------
hFeat: handle to the feature that owned the field.
iField: the field to fetch, from 0 to GetFieldCount()-1.
the field value. This list is internal, and should not be modified, or
freed. Its lifetime may be very brief.
"""
return _ogr.Feature_GetFieldAsStringList(self, *args)
def IsFieldSet(self, *args):
"""
IsFieldSet(Feature self, int id) -> bool
IsFieldSet(Feature self, char const * name) -> bool
int OGR_F_IsFieldSet(OGRFeatureH
hFeat, int iField)
Test if a field has ever been assigned a value or not.
This function is the same as the C++ method OGRFeature::IsFieldSet().
Parameters:
-----------
hFeat: handle to the feature on which the field is.
iField: the field to test.
TRUE if the field has been set, otherwise false.
"""
return _ogr.Feature_IsFieldSet(self, *args)
def GetFieldIndex(self, *args):
"""
GetFieldIndex(Feature self, char const * name) -> int
int
OGR_F_GetFieldIndex(OGRFeatureH hFeat, const char *pszName)
Fetch the field index given field name.
This is a cover for the OGRFeatureDefn::GetFieldIndex() method.
This function is the same as the C++ method
OGRFeature::GetFieldIndex().
Parameters:
-----------
hFeat: handle to the feature on which the field is found.
pszName: the name of the field to search for.
the field index, or -1 if no matching field is found.
"""
return _ogr.Feature_GetFieldIndex(self, *args)
def GetGeomFieldIndex(self, *args):
"""GetGeomFieldIndex(Feature self, char const * name) -> int"""
return _ogr.Feature_GetGeomFieldIndex(self, *args)
def GetFID(self, *args):
"""
GetFID(Feature self) -> int
long OGR_F_GetFID(OGRFeatureH hFeat)
Get feature identifier.
This function is the same as the C++ method OGRFeature::GetFID().
Parameters:
-----------
hFeat: handle to the feature from which to get the feature
identifier.
feature id or OGRNullFID if none has been assigned.
"""
return _ogr.Feature_GetFID(self, *args)
def SetFID(self, *args):
"""
SetFID(Feature self, int fid) -> OGRErr
OGRErr OGR_F_SetFID(OGRFeatureH hFeat,
long nFID)
Set the feature identifier.
For specific types of features this operation may fail on illegal
features ids. Generally it always succeeds. Feature ids should be
greater than or equal to zero, with the exception of OGRNullFID (-1)
indicating that the feature id is unknown.
This function is the same as the C++ method OGRFeature::SetFID().
Parameters:
-----------
hFeat: handle to the feature to set the feature id to.
nFID: the new feature identifier value to assign.
On success OGRERR_NONE, or on failure some other value.
"""
return _ogr.Feature_SetFID(self, *args)
def DumpReadable(self, *args):
"""
DumpReadable(Feature self)
void
OGR_F_DumpReadable(OGRFeatureH hFeat, FILE *fpOut)
Dump this feature in a human readable form.
This dumps the attributes, and geometry; however, it doesn't
definition information (other than field types and names), nor does it
report the geometry spatial reference system.
This function is the same as the C++ method
OGRFeature::DumpReadable().
Parameters:
-----------
hFeat: handle to the feature to dump.
fpOut: the stream to write to, such as strout.
"""
return _ogr.Feature_DumpReadable(self, *args)
def UnsetField(self, *args):
"""
UnsetField(Feature self, int id)
UnsetField(Feature self, char const * name)
void OGR_F_UnsetField(OGRFeatureH
hFeat, int iField)
Clear a field, marking it as unset.
This function is the same as the C++ method OGRFeature::UnsetField().
Parameters:
-----------
hFeat: handle to the feature on which the field is.
iField: the field to unset.
"""
return _ogr.Feature_UnsetField(self, *args)
def SetField(self, *args):
"""
SetField(Feature self, int id, char const * value)
SetField(Feature self, char const * name, char const * value)
SetField(Feature self, int id, int value)
SetField(Feature self, char const * name, int value)
SetField(Feature self, int id, double value)
SetField(Feature self, char const * name, double value)
SetField(Feature self, int id, int year, int month, int day, int hour, int minute, int second, int tzflag)
SetField(Feature self, char const * name, int year, int month, int day, int hour, int minute, int second,
int tzflag)
"""
return _ogr.Feature_SetField(self, *args)
def SetFieldIntegerList(self, *args):
"""
SetFieldIntegerList(Feature self, int id, int nList)
void
OGR_F_SetFieldIntegerList(OGRFeatureH hFeat, int iField, int nCount,
int *panValues)
Set field to list of integers value.
This function currently on has an effect of OFTIntegerList fields.
This function is the same as the C++ method OGRFeature::SetField().
Parameters:
-----------
hFeat: handle to the feature that owned the field.
iField: the field to set, from 0 to GetFieldCount()-1.
nCount: the number of values in the list being assigned.
panValues: the values to assign.
"""
return _ogr.Feature_SetFieldIntegerList(self, *args)
def SetFieldDoubleList(self, *args):
"""
SetFieldDoubleList(Feature self, int id, int nList)
void
OGR_F_SetFieldDoubleList(OGRFeatureH hFeat, int iField, int nCount,
double *padfValues)
Set field to list of doubles value.
This function currently on has an effect of OFTRealList fields.
This function is the same as the C++ method OGRFeature::SetField().
Parameters:
-----------
hFeat: handle to the feature that owned the field.
iField: the field to set, from 0 to GetFieldCount()-1.
nCount: the number of values in the list being assigned.
padfValues: the values to assign.
"""
return _ogr.Feature_SetFieldDoubleList(self, *args)
def SetFieldStringList(self, *args):
"""
SetFieldStringList(Feature self, int id, char ** pList)
void
OGR_F_SetFieldStringList(OGRFeatureH hFeat, int iField, char
**papszValues)
Set field to list of strings value.
This function currently on has an effect of OFTStringList fields.
This function is the same as the C++ method OGRFeature::SetField().
Parameters:
-----------
hFeat: handle to the feature that owned the field.
iField: the field to set, from 0 to GetFieldCount()-1.
papszValues: the values to assign.
"""
return _ogr.Feature_SetFieldStringList(self, *args)
def SetFieldBinaryFromHexString(self, *args):
"""
SetFieldBinaryFromHexString(Feature self, int id, char const * pszValue)
SetFieldBinaryFromHexString(Feature self, char const * name, char const * pszValue)
"""
return _ogr.Feature_SetFieldBinaryFromHexString(self, *args)
def SetFrom(self, *args, **kwargs):
"""
SetFrom(Feature self, Feature other, int forgiving=1) -> OGRErr
OGRErr OGR_F_SetFrom(OGRFeatureH
hFeat, OGRFeatureH hOtherFeat, int bForgiving)
Set one feature from another.
Overwrite the contents of this feature from the geometry and
attributes of another. The hOtherFeature does not need to have the
same OGRFeatureDefn. Field values are copied by corresponding field
names. Field types do not have to exactly match. OGR_F_SetField*()
function conversion rules will be applied as needed.
This function is the same as the C++ method OGRFeature::SetFrom().
Parameters:
-----------
hFeat: handle to the feature to set to.
hOtherFeat: handle to the feature from which geometry, and field
values will be copied.
bForgiving: TRUE if the operation should continue despite lacking
output fields matching some of the source fields.
OGRERR_NONE if the operation succeeds, even if some values are not
transferred, otherwise an error code.
"""
return _ogr.Feature_SetFrom(self, *args, **kwargs)
def SetFromWithMap(self, *args):
"""
SetFromWithMap(Feature self, Feature other, int forgiving, int nList) -> OGRErr
OGRErr
OGR_F_SetFromWithMap(OGRFeatureH hFeat, OGRFeatureH hOtherFeat, int
bForgiving, int *panMap)
Set one feature from another.
Overwrite the contents of this feature from the geometry and
attributes of another. The hOtherFeature does not need to have the
same OGRFeatureDefn. Field values are copied according to the provided
indices map. Field types do not have to exactly match.
OGR_F_SetField*() function conversion rules will be applied as needed.
This is more efficient than OGR_F_SetFrom() in that this doesn't
lookup the fields by their names. Particularly useful when the field
names don't match.
This function is the same as the C++ method OGRFeature::SetFrom().
Parameters:
-----------
hFeat: handle to the feature to set to.
hOtherFeat: handle to the feature from which geometry, and field
values will be copied.
panMap: Array of the indices of the destination feature's fields
stored at the corresponding index of the source feature's fields. A
value of -1 should be used to ignore the source's field. The array
should not be NULL and be as long as the number of fields in the
source feature.
bForgiving: TRUE if the operation should continue despite lacking
output fields matching some of the source fields.
OGRERR_NONE if the operation succeeds, even if some values are not
transferred, otherwise an error code.
"""
return _ogr.Feature_SetFromWithMap(self, *args)
def GetStyleString(self, *args):
"""
GetStyleString(Feature self) -> char const *
const char*
OGR_F_GetStyleString(OGRFeatureH hFeat)
Fetch style string for this feature.
Set the OGR Feature Style Specification for details on the format of
this string, and ogr_featurestyle.h for services available to parse
it.
This function is the same as the C++ method
OGRFeature::GetStyleString().
Parameters:
-----------
hFeat: handle to the feature to get the style from.
a reference to a representation in string format, or NULL if there
isn't one.
"""
return _ogr.Feature_GetStyleString(self, *args)
def SetStyleString(self, *args):
"""
SetStyleString(Feature self, char const * the_string)
void
OGR_F_SetStyleString(OGRFeatureH hFeat, const char *pszStyle)
Set feature style string. This method operate exactly as
OGR_F_SetStyleStringDirectly() except that it does not assume
ownership of the passed string, but instead makes a copy of it.
This function is the same as the C++ method
OGRFeature::SetStyleString().
Parameters:
-----------
hFeat: handle to the feature to set style to.
pszStyle: the style string to apply to this feature, cannot be NULL.
"""
return _ogr.Feature_SetStyleString(self, *args)
def GetFieldType(self, *args):
"""
GetFieldType(Feature self, int id) -> OGRFieldType
GetFieldType(Feature self, char const * name) -> OGRFieldType
"""
return _ogr.Feature_GetFieldType(self, *args)
def Reference(self):
pass
def Dereference(self):
pass
def Destroy(self):
"Once called, self has effectively been destroyed. Do not access. For backwards compatiblity only"
_ogr.delete_Feature( self )
self.thisown = 0
def __cmp__(self, other):
"""Compares a feature to another for equality"""
return self.Equal(other)
def __copy__(self):
return self.Clone()
# This makes it possible to fetch fields in the form "feature.area".
# This has some risk of name collisions.
def __getattr__(self, key):
"""Returns the values of fields by the given name"""
if key == 'this':
return self.__dict__[key]
idx = self.GetFieldIndex(key)
if idx < 0:
idx = self.GetGeomFieldIndex(key)
if idx < 0:
raise AttributeError(key)
else:
return self.GetGeomFieldRef(idx)
else:
return self.GetField(idx)
# This makes it possible to set fields in the form "feature.area".
# This has some risk of name collisions.
def __setattr__(self, key, value):
"""Set the values of fields by the given name"""
if key == 'this' or key == 'thisown':
self.__dict__[key] = value
else:
idx = self.GetFieldIndex(key)
if idx != -1:
self.SetField2(idx,value)
else:
idx = self.GetGeomFieldIndex(key)
if idx != -1:
self.SetGeomField(idx, value)
else:
self.__dict__[key] = value
# This makes it possible to fetch fields in the form "feature['area']".
def __getitem__(self, key):
"""Returns the values of fields by the given name / field_index"""
if isinstance(key, str):
fld_index = self.GetFieldIndex(key)
if fld_index < 0:
if isinstance(key, str):
fld_index = self.GetGeomFieldIndex(key)
if fld_index < 0:
raise ValueError("Illegal field requested in GetField()")
else:
return self.GetGeomFieldRef(fld_index)
else:
return self.GetField(fld_index)
# This makes it possible to set fields in the form "feature['area'] = 123".
def __setitem__(self, key, value):
"""Returns the value of a field by field name / index"""
if isinstance(key, str):
fld_index = self.GetFieldIndex(key)
if fld_index < 0:
if isinstance(key, str):
fld_index = self.GetGeomFieldIndex(key)
if fld_index < 0:
raise ValueError("Illegal field requested in SetField()")
else:
return self.SetGeomField( fld_index, value )
else:
return self.SetField2( fld_index, value )
def GetField(self, fld_index):
if isinstance(fld_index, str):
fld_index = self.GetFieldIndex(fld_index)
if (fld_index < 0) or (fld_index > self.GetFieldCount()):
raise ValueError("Illegal field requested in GetField()")
if not (self.IsFieldSet(fld_index)):
return None
fld_type = self.GetFieldType(fld_index)
if fld_type == OFTInteger:
return self.GetFieldAsInteger(fld_index)
if fld_type == OFTReal:
return self.GetFieldAsDouble(fld_index)
if fld_type == OFTStringList:
return self.GetFieldAsStringList(fld_index)
if fld_type == OFTIntegerList:
return self.GetFieldAsIntegerList(fld_index)
if fld_type == OFTRealList:
return self.GetFieldAsDoubleList(fld_index)
## if fld_type == OFTDateTime or fld_type == OFTDate or fld_type == OFTTime:
# return self.GetFieldAsDate(fld_index)
# default to returning as a string. Should we add more types?
return self.GetFieldAsString(fld_index)
def SetField2(self, fld_index, value):
if isinstance(fld_index, str):
fld_index = self.GetFieldIndex(fld_index)
if (fld_index < 0) or (fld_index > self.GetFieldCount()):
raise ValueError("Illegal field requested in SetField2()")
if value is None:
self.UnsetField( fld_index )
return
if isinstance(value,list):
if len(value) == 0:
self.UnsetField( fld_index )
return
if isinstance(value[0],int):
self.SetFieldIntegerList(fld_index,value)
return
elif isinstance(value[0],float):
self.SetFieldDoubleList(fld_index,value)
return
elif isinstance(value[0],str):
self.SetFieldStringList(fld_index,value)
return
else:
raise TypeError( 'Unsupported type of list in SetField2()' )
try:
self.SetField( fld_index, value )
except:
self.SetField( fld_index, str(value) )
return
def keys(self):
names = []
for i in range(self.GetFieldCount()):
fieldname = self.GetFieldDefnRef(i).GetName()
names.append(fieldname)
return names
def items(self):
keys = self.keys()
output = {}
for key in keys:
output[key] = self.GetField(key)
return output
def geometry(self):
return self.GetGeometryRef()
def ExportToJson(self, as_object = False, options = None):
"""Exports a GeoJSON object which represents the Feature. The
as_object parameter determines whether the returned value
should be a Python object instead of a string. Defaults to False.
The options parameter is passed to Geometry.ExportToJson()"""
try:
import simplejson
except ImportError:
try:
import json as simplejson
except ImportError:
raise ImportError("Unable to import simplejson or json, needed for ExportToJson.")
geom = self.GetGeometryRef()
if geom is not None:
if options is None:
options = []
geom_json_string = geom.ExportToJson(options = options)
geom_json_object = simplejson.loads(geom_json_string)
else:
geom_json_object = None
output = {'type':'Feature',
'geometry': geom_json_object,
'properties': {}
}
fid = self.GetFID()
if fid != NullFID:
output['id'] = fid
for key in self.keys():
output['properties'][key] = self.GetField(key)
if not as_object:
output = simplejson.dumps(output)
return output
Feature_swigregister = _ogr.Feature_swigregister
Feature_swigregister(Feature)
class FeatureDefn(object):
"""Proxy of C++ OGRFeatureDefnShadow class"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
__swig_destroy__ = _ogr.delete_FeatureDefn
__del__ = lambda self : None;
def __init__(self, *args, **kwargs):
"""__init__(OGRFeatureDefnShadow self, char const * name_null_ok=None) -> FeatureDefn"""
this = _ogr.new_FeatureDefn(*args, **kwargs)
try: self.this.append(this)
except: self.this = this
def GetName(self, *args):
"""
GetName(FeatureDefn self) -> char const *
const char*
OGR_FD_GetName(OGRFeatureDefnH hDefn)
Get name of the OGRFeatureDefn passed as an argument.
This function is the same as the C++ method OGRFeatureDefn::GetName().
Parameters:
-----------
hDefn: handle to the feature definition to get the name from.
the name. This name is internal and should not be modified, or freed.
"""
return _ogr.FeatureDefn_GetName(self, *args)
def GetFieldCount(self, *args):
"""
GetFieldCount(FeatureDefn self) -> int
int
OGR_FD_GetFieldCount(OGRFeatureDefnH hDefn)
Fetch number of fields on the passed feature definition.
This function is the same as the C++ OGRFeatureDefn::GetFieldCount().
Parameters:
-----------
hDefn: handle to the feature definition to get the fields count from.
count of fields.
"""
return _ogr.FeatureDefn_GetFieldCount(self, *args)
def GetFieldDefn(self, *args):
"""
GetFieldDefn(FeatureDefn self, int i) -> FieldDefn
OGRFieldDefnH
OGR_FD_GetFieldDefn(OGRFeatureDefnH hDefn, int iField)
Fetch field definition of the passed feature definition.
This function is the same as the C++ method
OGRFeatureDefn::GetFieldDefn().
Starting with GDAL 1.7.0, this method will also issue an error if the
index is not valid.
Parameters:
-----------
hDefn: handle to the feature definition to get the field definition
from.
iField: the field to fetch, between 0 and GetFieldCount()-1.
an handle to an internal field definition object or NULL if invalid
index. This object should not be modified or freed by the application.
"""
return _ogr.FeatureDefn_GetFieldDefn(self, *args)
def GetFieldIndex(self, *args):
"""
GetFieldIndex(FeatureDefn self, char const * name) -> int
int
OGR_FD_GetFieldIndex(OGRFeatureDefnH hDefn, const char *pszFieldName)
Find field by name.
The field index of the first field matching the passed field name
(case insensitively) is returned.
This function is the same as the C++ method
OGRFeatureDefn::GetFieldIndex.
Parameters:
-----------
hDefn: handle to the feature definition to get field index from.
pszFieldName: the field name to search for.
the field index, or -1 if no match found.
"""
return _ogr.FeatureDefn_GetFieldIndex(self, *args)
def AddFieldDefn(self, *args):
"""
AddFieldDefn(FeatureDefn self, FieldDefn defn)
void
OGR_FD_AddFieldDefn(OGRFeatureDefnH hDefn, OGRFieldDefnH hNewField)
Add a new field definition to the passed feature definition.
To add a new field definition to a layer definition, do not use this
function directly, but use OGR_L_CreateField() instead.
This function should only be called while there are no OGRFeature
objects in existance based on this OGRFeatureDefn. The OGRFieldDefn
passed in is copied, and remains the responsibility of the caller.
This function is the same as the C++ method
OGRFeatureDefn::AddFieldDefn().
Parameters:
-----------
hDefn: handle to the feature definition to add the field definition
to.
hNewField: handle to the new field definition.
"""
return _ogr.FeatureDefn_AddFieldDefn(self, *args)
def GetGeomFieldCount(self, *args):
"""GetGeomFieldCount(FeatureDefn self) -> int"""
return _ogr.FeatureDefn_GetGeomFieldCount(self, *args)
def GetGeomFieldDefn(self, *args):
"""GetGeomFieldDefn(FeatureDefn self, int i) -> GeomFieldDefn"""
return _ogr.FeatureDefn_GetGeomFieldDefn(self, *args)
def GetGeomFieldIndex(self, *args):
"""GetGeomFieldIndex(FeatureDefn self, char const * name) -> int"""
return _ogr.FeatureDefn_GetGeomFieldIndex(self, *args)
def AddGeomFieldDefn(self, *args):
"""AddGeomFieldDefn(FeatureDefn self, GeomFieldDefn defn)"""
return _ogr.FeatureDefn_AddGeomFieldDefn(self, *args)
def DeleteGeomFieldDefn(self, *args):
"""DeleteGeomFieldDefn(FeatureDefn self, int idx) -> OGRErr"""
return _ogr.FeatureDefn_DeleteGeomFieldDefn(self, *args)
def GetGeomType(self, *args):
"""
GetGeomType(FeatureDefn self) -> OGRwkbGeometryType
OGRwkbGeometryType
OGR_FD_GetGeomType(OGRFeatureDefnH hDefn)
Fetch the geometry base type of the passed feature definition.
This function is the same as the C++ method
OGRFeatureDefn::GetGeomType().
Parameters:
-----------
hDefn: handle to the feature definition to get the geometry type
from.
the base type for all geometry related to this definition.
"""
return _ogr.FeatureDefn_GetGeomType(self, *args)
def SetGeomType(self, *args):
"""
SetGeomType(FeatureDefn self, OGRwkbGeometryType geom_type)
void
OGR_FD_SetGeomType(OGRFeatureDefnH hDefn, OGRwkbGeometryType eType)
Assign the base geometry type for the passed layer (the same as the
feature definition).
All geometry objects using this type must be of the defined type or a
derived type. The default upon creation is wkbUnknown which allows for
any geometry type. The geometry type should generally not be changed
after any OGRFeatures have been created against this definition.
This function is the same as the C++ method
OGRFeatureDefn::SetGeomType().
Parameters:
-----------
hDefn: handle to the layer or feature definition to set the geometry
type to.
eType: the new type to assign.
"""
return _ogr.FeatureDefn_SetGeomType(self, *args)
def GetReferenceCount(self, *args):
"""
GetReferenceCount(FeatureDefn self) -> int
int
OGR_FD_GetReferenceCount(OGRFeatureDefnH hDefn)
Fetch current reference count.
This function is the same as the C++ method
OGRFeatureDefn::GetReferenceCount().
Parameters:
-----------
hDefn: hanlde to the feature definition on witch OGRFeature are based
on.
the current reference count.
"""
return _ogr.FeatureDefn_GetReferenceCount(self, *args)
def IsGeometryIgnored(self, *args):
"""
IsGeometryIgnored(FeatureDefn self) -> int
int
OGR_FD_IsGeometryIgnored(OGRFeatureDefnH hDefn)
Determine whether the geometry can be omitted when fetching features.
This function is the same as the C++ method
OGRFeatureDefn::IsGeometryIgnored().
Parameters:
-----------
hDefn: hanlde to the feature definition on witch OGRFeature are based
on.
ignore state
"""
return _ogr.FeatureDefn_IsGeometryIgnored(self, *args)
def SetGeometryIgnored(self, *args):
"""
SetGeometryIgnored(FeatureDefn self, int bIgnored)
void
OGR_FD_SetGeometryIgnored(OGRFeatureDefnH hDefn, int bIgnore)
Set whether the geometry can be omitted when fetching features.
This function is the same as the C++ method
OGRFeatureDefn::SetGeometryIgnored().
Parameters:
-----------
hDefn: hanlde to the feature definition on witch OGRFeature are based
on.
bIgnore: ignore state
"""
return _ogr.FeatureDefn_SetGeometryIgnored(self, *args)
def IsStyleIgnored(self, *args):
"""
IsStyleIgnored(FeatureDefn self) -> int
int
OGR_FD_IsStyleIgnored(OGRFeatureDefnH hDefn)
Determine whether the style can be omitted when fetching features.
This function is the same as the C++ method
OGRFeatureDefn::IsStyleIgnored().
Parameters:
-----------
hDefn: handle to the feature definition on which OGRFeature are based
on.
ignore state
"""
return _ogr.FeatureDefn_IsStyleIgnored(self, *args)
def SetStyleIgnored(self, *args):
"""
SetStyleIgnored(FeatureDefn self, int bIgnored)
void
OGR_FD_SetStyleIgnored(OGRFeatureDefnH hDefn, int bIgnore)
Set whether the style can be omitted when fetching features.
This function is the same as the C++ method
OGRFeatureDefn::SetStyleIgnored().
Parameters:
-----------
hDefn: hanlde to the feature definition on witch OGRFeature are based
on.
bIgnore: ignore state
"""
return _ogr.FeatureDefn_SetStyleIgnored(self, *args)
def IsSame(self, *args):
"""IsSame(FeatureDefn self, FeatureDefn other_defn) -> int"""
return _ogr.FeatureDefn_IsSame(self, *args)
def Destroy(self):
"Once called, self has effectively been destroyed. Do not access. For backwards compatiblity only"
_ogr.delete_FeatureDefn( self )
self.thisown = 0
FeatureDefn_swigregister = _ogr.FeatureDefn_swigregister
FeatureDefn_swigregister(FeatureDefn)
class FieldDefn(object):
"""Proxy of C++ OGRFieldDefnShadow class"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
__swig_destroy__ = _ogr.delete_FieldDefn
__del__ = lambda self : None;
def __init__(self, *args, **kwargs):
"""__init__(OGRFieldDefnShadow self, char const * name_null_ok="unnamed", OGRFieldType field_type=OFTString) -> FieldDefn"""
this = _ogr.new_FieldDefn(*args, **kwargs)
try: self.this.append(this)
except: self.this = this
def GetName(self, *args):
"""GetName(FieldDefn self) -> char const *"""
return _ogr.FieldDefn_GetName(self, *args)
def GetNameRef(self, *args):
"""
GetNameRef(FieldDefn self) -> char const *
const char*
OGR_Fld_GetNameRef(OGRFieldDefnH hDefn)
Fetch name of this field.
This function is the same as the CPP method
OGRFieldDefn::GetNameRef().
Parameters:
-----------
hDefn: handle to the field definition.
the name of the field definition.
"""
return _ogr.FieldDefn_GetNameRef(self, *args)
def SetName(self, *args):
"""
SetName(FieldDefn self, char const * name)
void OGR_Fld_SetName(OGRFieldDefnH
hDefn, const char *pszName)
Reset the name of this field.
This function is the same as the CPP method OGRFieldDefn::SetName().
Parameters:
-----------
hDefn: handle to the field definition to apply the new name to.
pszName: the new name to apply.
"""
return _ogr.FieldDefn_SetName(self, *args)
def GetType(self, *args):
"""
GetType(FieldDefn self) -> OGRFieldType
OGRFieldType
OGR_Fld_GetType(OGRFieldDefnH hDefn)
Fetch type of this field.
This function is the same as the CPP method OGRFieldDefn::GetType().
Parameters:
-----------
hDefn: handle to the field definition to get type from.
field type.
"""
return _ogr.FieldDefn_GetType(self, *args)
def SetType(self, *args):
"""
SetType(FieldDefn self, OGRFieldType type)
void OGR_Fld_SetType(OGRFieldDefnH
hDefn, OGRFieldType eType)
Set the type of this field. This should never be done to an
OGRFieldDefn that is already part of an OGRFeatureDefn.
This function is the same as the CPP method OGRFieldDefn::SetType().
Parameters:
-----------
hDefn: handle to the field definition to set type to.
eType: the new field type.
"""
return _ogr.FieldDefn_SetType(self, *args)
def GetJustify(self, *args):
"""
GetJustify(FieldDefn self) -> OGRJustification
OGRJustification
OGR_Fld_GetJustify(OGRFieldDefnH hDefn)
Get the justification for this field.
This function is the same as the CPP method
OGRFieldDefn::GetJustify().
Parameters:
-----------
hDefn: handle to the field definition to get justification from.
the justification.
"""
return _ogr.FieldDefn_GetJustify(self, *args)
def SetJustify(self, *args):
"""
SetJustify(FieldDefn self, OGRJustification justify)
void
OGR_Fld_SetJustify(OGRFieldDefnH hDefn, OGRJustification eJustify)
Set the justification for this field.
This function is the same as the CPP method
OGRFieldDefn::SetJustify().
Parameters:
-----------
hDefn: handle to the field definition to set justification to.
eJustify: the new justification.
"""
return _ogr.FieldDefn_SetJustify(self, *args)
def GetWidth(self, *args):
"""
GetWidth(FieldDefn self) -> int
int OGR_Fld_GetWidth(OGRFieldDefnH
hDefn)
Get the formatting width for this field.
This function is the same as the CPP method OGRFieldDefn::GetWidth().
Parameters:
-----------
hDefn: handle to the field definition to get width from.
the width, zero means no specified width.
"""
return _ogr.FieldDefn_GetWidth(self, *args)
def SetWidth(self, *args):
"""
SetWidth(FieldDefn self, int width)
void OGR_Fld_SetWidth(OGRFieldDefnH
hDefn, int nNewWidth)
Set the formatting width for this field in characters.
This function is the same as the CPP method OGRFieldDefn::SetWidth().
Parameters:
-----------
hDefn: handle to the field definition to set width to.
nNewWidth: the new width.
"""
return _ogr.FieldDefn_SetWidth(self, *args)
def GetPrecision(self, *args):
"""
GetPrecision(FieldDefn self) -> int
int
OGR_Fld_GetPrecision(OGRFieldDefnH hDefn)
Get the formatting precision for this field. This should normally be
zero for fields of types other than OFTReal.
This function is the same as the CPP method
OGRFieldDefn::GetPrecision().
Parameters:
-----------
hDefn: handle to the field definition to get precision from.
the precision.
"""
return _ogr.FieldDefn_GetPrecision(self, *args)
def SetPrecision(self, *args):
"""
SetPrecision(FieldDefn self, int precision)
void
OGR_Fld_SetPrecision(OGRFieldDefnH hDefn, int nPrecision)
Set the formatting precision for this field in characters.
This should normally be zero for fields of types other than OFTReal.
This function is the same as the CPP method
OGRFieldDefn::SetPrecision().
Parameters:
-----------
hDefn: handle to the field definition to set precision to.
nPrecision: the new precision.
"""
return _ogr.FieldDefn_SetPrecision(self, *args)
def GetTypeName(self, *args):
"""GetTypeName(FieldDefn self) -> char const *"""
return _ogr.FieldDefn_GetTypeName(self, *args)
def GetFieldTypeName(self, *args):
"""GetFieldTypeName(FieldDefn self, OGRFieldType type) -> char const *"""
return _ogr.FieldDefn_GetFieldTypeName(self, *args)
def IsIgnored(self, *args):
"""
IsIgnored(FieldDefn self) -> int
int OGR_Fld_IsIgnored(OGRFieldDefnH
hDefn)
Return whether this field should be omitted when fetching features.
This method is the same as the C++ method OGRFieldDefn::IsIgnored().
Parameters:
-----------
hDefn: handle to the field definition
ignore state
"""
return _ogr.FieldDefn_IsIgnored(self, *args)
def SetIgnored(self, *args):
"""
SetIgnored(FieldDefn self, int bIgnored)
void
OGR_Fld_SetIgnored(OGRFieldDefnH hDefn, int ignore)
Set whether this field should be omitted when fetching features.
This method is the same as the C function OGRFieldDefn::SetIgnored().
Parameters:
-----------
hDefn: handle to the field definition
ignore: ignore state
"""
return _ogr.FieldDefn_SetIgnored(self, *args)
width = property(GetWidth, SetWidth)
type = property(GetType, SetType)
precision = property(GetPrecision, SetPrecision)
name = property(GetName, SetName)
justify = property(GetJustify, SetJustify)
def Destroy(self):
"Once called, self has effectively been destroyed. Do not access. For backwards compatiblity only"
_ogr.delete_FieldDefn( self )
self.thisown = 0
FieldDefn_swigregister = _ogr.FieldDefn_swigregister
FieldDefn_swigregister(FieldDefn)
class GeomFieldDefn(object):
"""Proxy of C++ OGRGeomFieldDefnShadow class"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
__swig_destroy__ = _ogr.delete_GeomFieldDefn
__del__ = lambda self : None;
def __init__(self, *args, **kwargs):
"""__init__(OGRGeomFieldDefnShadow self, char const * name_null_ok="", OGRwkbGeometryType field_type=wkbUnknown) -> GeomFieldDefn"""
this = _ogr.new_GeomFieldDefn(*args, **kwargs)
try: self.this.append(this)
except: self.this = this
def GetName(self, *args):
"""GetName(GeomFieldDefn self) -> char const *"""
return _ogr.GeomFieldDefn_GetName(self, *args)
def GetNameRef(self, *args):
"""GetNameRef(GeomFieldDefn self) -> char const *"""
return _ogr.GeomFieldDefn_GetNameRef(self, *args)
def SetName(self, *args):
"""SetName(GeomFieldDefn self, char const * name)"""
return _ogr.GeomFieldDefn_SetName(self, *args)
def GetType(self, *args):
"""GetType(GeomFieldDefn self) -> OGRwkbGeometryType"""
return _ogr.GeomFieldDefn_GetType(self, *args)
def SetType(self, *args):
"""SetType(GeomFieldDefn self, OGRwkbGeometryType type)"""
return _ogr.GeomFieldDefn_SetType(self, *args)
def GetSpatialRef(self, *args):
"""GetSpatialRef(GeomFieldDefn self) -> SpatialReference"""
return _ogr.GeomFieldDefn_GetSpatialRef(self, *args)
def SetSpatialRef(self, *args):
"""SetSpatialRef(GeomFieldDefn self, SpatialReference srs)"""
return _ogr.GeomFieldDefn_SetSpatialRef(self, *args)
def IsIgnored(self, *args):
"""IsIgnored(GeomFieldDefn self) -> int"""
return _ogr.GeomFieldDefn_IsIgnored(self, *args)
def SetIgnored(self, *args):
"""SetIgnored(GeomFieldDefn self, int bIgnored)"""
return _ogr.GeomFieldDefn_SetIgnored(self, *args)
type = property(GetType, SetType)
name = property(GetName, SetName)
srs = property(GetSpatialRef, SetSpatialRef)
GeomFieldDefn_swigregister = _ogr.GeomFieldDefn_swigregister
GeomFieldDefn_swigregister(GeomFieldDefn)
def CreateGeometryFromWkb(*args, **kwargs):
"""CreateGeometryFromWkb(int len, SpatialReference reference=None) -> Geometry"""
return _ogr.CreateGeometryFromWkb(*args, **kwargs)
def CreateGeometryFromWkt(*args, **kwargs):
"""CreateGeometryFromWkt(char ** val, SpatialReference reference=None) -> Geometry"""
return _ogr.CreateGeometryFromWkt(*args, **kwargs)
def CreateGeometryFromGML(*args):
"""CreateGeometryFromGML(char const * input_string) -> Geometry"""
return _ogr.CreateGeometryFromGML(*args)
def CreateGeometryFromJson(*args):
"""CreateGeometryFromJson(char const * input_string) -> Geometry"""
return _ogr.CreateGeometryFromJson(*args)
def BuildPolygonFromEdges(*args, **kwargs):
"""BuildPolygonFromEdges(Geometry hLineCollection, int bBestEffort=0, int bAutoClose=0, double dfTolerance=0) -> Geometry"""
return _ogr.BuildPolygonFromEdges(*args, **kwargs)
def ApproximateArcAngles(*args, **kwargs):
"""
ApproximateArcAngles(double dfCenterX, double dfCenterY, double dfZ, double dfPrimaryRadius, double dfSecondaryAxis,
double dfRotation, double dfStartAngle, double dfEndAngle, double dfMaxAngleStepSizeDegrees) -> Geometry
"""
return _ogr.ApproximateArcAngles(*args, **kwargs)
def ForceToPolygon(*args):
"""ForceToPolygon(Geometry geom_in) -> Geometry"""
return _ogr.ForceToPolygon(*args)
def ForceToLineString(*args):
"""ForceToLineString(Geometry geom_in) -> Geometry"""
return _ogr.ForceToLineString(*args)
def ForceToMultiPolygon(*args):
"""ForceToMultiPolygon(Geometry geom_in) -> Geometry"""
return _ogr.ForceToMultiPolygon(*args)
def ForceToMultiPoint(*args):
"""ForceToMultiPoint(Geometry geom_in) -> Geometry"""
return _ogr.ForceToMultiPoint(*args)
def ForceToMultiLineString(*args):
"""ForceToMultiLineString(Geometry geom_in) -> Geometry"""
return _ogr.ForceToMultiLineString(*args)
class Geometry(object):
"""Proxy of C++ OGRGeometryShadow class"""
thisown = _swig_property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
__swig_destroy__ = _ogr.delete_Geometry
__del__ = lambda self : None;
def __init__(self, *args, **kwargs):
"""__init__(OGRGeometryShadow self, OGRwkbGeometryType type=wkbUnknown, char * wkt=None, int wkb=0, char * gml=None) -> Geometry"""
this = _ogr.new_Geometry(*args, **kwargs)
try: self.this.append(this)
except: self.this = this
def ExportToWkt(self, *args):
"""
ExportToWkt(Geometry self) -> OGRErr
OGRErr
OGR_G_ExportToWkt(OGRGeometryH hGeom, char **ppszSrcText)
Convert a geometry into well known text format.
This function relates to the SFCOM IWks::ExportToWKT() method.
This function is the same as the CPP method
OGRGeometry::exportToWkt().
Parameters:
-----------
hGeom: handle on the geometry to convert to a text format from.
ppszSrcText: a text buffer is allocated by the program, and assigned
to the passed pointer.
Currently OGRERR_NONE is always returned.
"""
return _ogr.Geometry_ExportToWkt(self, *args)
def ExportToWkb(self, *args, **kwargs):
"""
ExportToWkb(Geometry self, OGRwkbByteOrder byte_order=wkbXDR) -> OGRErr
OGRErr
OGR_G_ExportToWkb(OGRGeometryH hGeom, OGRwkbByteOrder eOrder, unsigned
char *pabyDstBuffer)
Convert a geometry into well known binary format.
This function relates to the SFCOM IWks::ExportToWKB() method.
This function is the same as the CPP method
OGRGeometry::exportToWkb().
Parameters:
-----------
hGeom: handle on the geometry to convert to a well know binary data
from.
eOrder: One of wkbXDR or wkbNDR indicating MSB or LSB byte order
respectively.
pabyDstBuffer: a buffer into which the binary representation is
written. This buffer must be at least OGR_G_WkbSize() byte in size.
Currently OGRERR_NONE is always returned.
"""
return _ogr.Geometry_ExportToWkb(self, *args, **kwargs)
def ExportToGML(self, *args, **kwargs):
"""ExportToGML(Geometry self, char ** options=None) -> retStringAndCPLFree *"""
return _ogr.Geometry_ExportToGML(self, *args, **kwargs)
def ExportToKML(self, *args):
"""ExportToKML(Geometry self, char const * altitude_mode=None) -> retStringAndCPLFree *"""
return _ogr.Geometry_ExportToKML(self, *args)
def ExportToJson(self, *args, **kwargs):
"""ExportToJson(Geometry self, char ** options=None) -> retStringAndCPLFree *"""
return _ogr.Geometry_ExportToJson(self, *args, **kwargs)
def AddPoint(self, *args, **kwargs):
"""AddPoint(Geometry self, double x, double y, double z=0)"""
return _ogr.Geometry_AddPoint(self, *args, **kwargs)
def AddPoint_2D(self, *args):
"""AddPoint_2D(Geometry self, double x, double y)"""
return _ogr.Geometry_AddPoint_2D(self, *args)
def AddGeometryDirectly(self, *args):
"""AddGeometryDirectly(Geometry self, Geometry other_disown) -> OGRErr"""
return _ogr.Geometry_AddGeometryDirectly(self, *args)
def AddGeometry(self, *args):
"""AddGeometry(Geometry self, Geometry other) -> OGRErr"""
return _ogr.Geometry_AddGeometry(self, *args)
def Clone(self, *args):
"""
Clone(Geometry self) -> Geometry
OGRGeometryH OGR_G_Clone(OGRGeometryH
hGeom)
Make a copy of this object.
This function relates to the SFCOM IGeometry::clone() method.
This function is the same as the CPP method OGRGeometry::clone().
Parameters:
-----------
hGeom: handle on the geometry to clone from.
an handle on the copy of the geometry with the spatial reference
system as the original.
"""
return _ogr.Geometry_Clone(self, *args)
def GetGeometryType(self, *args):
"""
GetGeometryType(Geometry self) -> OGRwkbGeometryType
OGRwkbGeometryType
OGR_G_GetGeometryType(OGRGeometryH hGeom)
Fetch geometry type.
Note that the geometry type may include the 2.5D flag. To get a 2D
flattened version of the geometry type apply the wkbFlatten() macro to
the return result.
This function is the same as the CPP method
OGRGeometry::getGeometryType().
Parameters:
-----------
hGeom: handle on the geometry to get type from.
the geometry type code.
"""
return _ogr.Geometry_GetGeometryType(self, *args)
def GetGeometryName(self, *args):
"""
GetGeometryName(Geometry self) -> char const *
const char*
OGR_G_GetGeometryName(OGRGeometryH hGeom)
Fetch WKT name for geometry type.
There is no SFCOM analog to this function.
This function is the same as the CPP method
OGRGeometry::getGeometryName().
Parameters:
-----------
hGeom: handle on the geometry to get name from.
name used for this geometry type in well known text format.
"""
return _ogr.Geometry_GetGeometryName(self, *args)
def Length(self, *args):
"""Length(Geometry self) -> double"""
return _ogr.Geometry_Length(self, *args)
def Area(self, *args):
"""Area(Geometry self) -> double"""
return _ogr.Geometry_Area(self, *args)
def GetArea(self, *args):
"""GetArea(Geometry self) -> double"""
return _ogr.Geometry_GetArea(self, *args)
def GetPointCount(self, *args):
"""GetPointCount(Geometry self) -> int"""
return _ogr.Geometry_GetPointCount(self, *args)
def GetPoints(self, *args, **kwargs):
"""GetPoints(Geometry self, int nCoordDimension=0)"""
return _ogr.Geometry_GetPoints(self, *args, **kwargs)
def GetX(self, *args, **kwargs):
"""GetX(Geometry self, int point=0) -> double"""
return _ogr.Geometry_GetX(self, *args, **kwargs)
def GetY(self, *args, **kwargs):
"""GetY(Geometry self, int point=0) -> double"""
return _ogr.Geometry_GetY(self, *args, **kwargs)
def GetZ(self, *args, **kwargs):
"""GetZ(Geometry self, int point=0) -> double"""
return _ogr.Geometry_GetZ(self, *args, **kwargs)
def GetPoint(self, *args):
"""GetPoint(Geometry self, int iPoint=0)"""
return _ogr.Geometry_GetPoint(self, *args)
def GetPoint_2D(self, *args):
"""GetPoint_2D(Geometry self, int iPoint=0)"""
return _ogr.Geometry_GetPoint_2D(self, *args)
def GetGeometryCount(self, *args):
"""GetGeometryCount(Geometry self) -> int"""
return _ogr.Geometry_GetGeometryCount(self, *args)
def SetPoint(self, *args, **kwargs):
"""SetPoint(Geometry self, int point, double x, double y, double z=0)"""
return _ogr.Geometry_SetPoint(self, *args, **kwargs)
def SetPoint_2D(self, *args, **kwargs):
"""SetPoint_2D(Geometry self, int point, double x, double y)"""
return _ogr.Geometry_SetPoint_2D(self, *args, **kwargs)
def GetGeometryRef(self, *args):
"""GetGeometryRef(Geometry self, int geom) -> Geometry"""
return _ogr.Geometry_GetGeometryRef(self, *args)
def Simplify(self, *args):
"""
Simplify(Geometry self, double tolerance) -> Geometry
OGRGeometryH
OGR_G_Simplify(OGRGeometryH hThis, double dTolerance)
Compute a simplified geometry.
This function is the same as the C++ method OGRGeometry::Simplify().
This function is built on the GEOS library, check it for the
definition of the geometry operation. If OGR is built without the GEOS
library, this function will always fail, issuing a CPLE_NotSupported
error.
Parameters:
-----------
hThis: the geometry.
dTolerance: the distance tolerance for the simplification.
the simplified geometry or NULL if an error occurs.
OGR 1.8.0
"""
return _ogr.Geometry_Simplify(self, *args)
def SimplifyPreserveTopology(self, *args):
"""
SimplifyPreserveTopology(Geometry self, double tolerance) -> Geometry
OGRGeometryH
OGR_G_SimplifyPreserveTopology(OGRGeometryH hThis, double dTolerance)
Compute a simplified geometry.
This function is the same as the C++ method
OGRGeometry::SimplifyPreserveTopology().
This function is built on the GEOS library, check it for the
definition of the geometry operation. If OGR is built without the GEOS
library, this function will always fail, issuing a CPLE_NotSupported
error.
Parameters:
-----------
hThis: the geometry.
dTolerance: the distance tolerance for the simplification.
the simplified geometry or NULL if an error occurs.
OGR 1.9.0
"""
return _ogr.Geometry_SimplifyPreserveTopology(self, *args)
def Boundary(self, *args):
"""
Boundary(Geometry self) -> Geometry
OGRGeometryH
OGR_G_Boundary(OGRGeometryH hTarget)
Compute boundary.
A new geometry object is created and returned containing the boundary
of the geometry on which the method is invoked.
This function is the same as the C++ method OGR_G_Boundary().
This function is built on the GEOS library, check it for the
definition of the geometry operation. If OGR is built without the GEOS
library, this function will always fail, issuing a CPLE_NotSupported
error.
Parameters:
-----------
hTarget: The Geometry to calculate the boundary of.
a handle to a newly allocated geometry now owned by the caller, or
NULL on failure.
OGR 1.8.0
"""
return _ogr.Geometry_Boundary(self, *args)
def GetBoundary(self, *args):
"""
GetBoundary(Geometry self) -> Geometry
OGRGeometryH
OGR_G_GetBoundary(OGRGeometryH hTarget)
Compute boundary (deprecated).
Deprecated See: OGR_G_Boundary()
"""
return _ogr.Geometry_GetBoundary(self, *args)
def ConvexHull(self, *args):
"""
ConvexHull(Geometry self) -> Geometry
OGRGeometryH
OGR_G_ConvexHull(OGRGeometryH hTarget)
Compute convex hull.
A new geometry object is created and returned containing the convex
hull of the geometry on which the method is invoked.
This function is the same as the C++ method OGRGeometry::ConvexHull().
This function is built on the GEOS library, check it for the
definition of the geometry operation. If OGR is built without the GEOS
library, this function will always fail, issuing a CPLE_NotSupported
error.
Parameters:
-----------
hTarget: The Geometry to calculate the convex hull of.
a handle to a newly allocated geometry now owned by the caller, or
NULL on failure.
"""
return _ogr.Geometry_ConvexHull(self, *args)
def Buffer(self, *args, **kwargs):
"""
Buffer(Geometry self, double distance, int quadsecs=30) -> Geometry
OGRGeometryH OGR_G_Buffer(OGRGeometryH
hTarget, double dfDist, int nQuadSegs)
Compute buffer of geometry.
Builds a new geometry containing the buffer region around the geometry
on which it is invoked. The buffer is a polygon containing the region
within the buffer distance of the original geometry.
Some buffer sections are properly described as curves, but are
converted to approximate polygons. The nQuadSegs parameter can be used
to control how many segements should be used to define a 90 degree
curve - a quadrant of a circle. A value of 30 is a reasonable default.
Large values result in large numbers of vertices in the resulting
buffer geometry while small numbers reduce the accuracy of the result.
This function is the same as the C++ method OGRGeometry::Buffer().
This function is built on the GEOS library, check it for the
definition of the geometry operation. If OGR is built without the GEOS
library, this function will always fail, issuing a CPLE_NotSupported
error.
Parameters:
-----------
hTarget: the geometry.
dfDist: the buffer distance to be applied.
nQuadSegs: the number of segments used to approximate a 90 degree
(quadrant) of curvature.
the newly created geometry, or NULL if an error occurs.
"""
return _ogr.Geometry_Buffer(self, *args, **kwargs)
def Intersection(self, *args):
"""
Intersection(Geometry self, Geometry other) -> Geometry
OGRGeometryH
OGR_G_Intersection(OGRGeometryH hThis, OGRGeometryH hOther)
Compute intersection.
Generates a new geometry which is the region of intersection of the
two geometries operated on. The OGR_G_Intersects() function can be
used to test if two geometries intersect.
This function is the same as the C++ method
OGRGeometry::Intersection().
This function is built on the GEOS library, check it for the
definition of the geometry operation. If OGR is built without the GEOS
library, this function will always fail, issuing a CPLE_NotSupported
error.
Parameters:
-----------
hThis: the geometry.
hOther: the other geometry.
a new geometry representing the intersection or NULL if there is no
intersection or an error occurs.
"""
return _ogr.Geometry_Intersection(self, *args)
def Union(self, *args):
"""
Union(Geometry self, Geometry other) -> Geometry
OGRGeometryH OGR_G_Union(OGRGeometryH
hThis, OGRGeometryH hOther)
Compute union.
Generates a new geometry which is the region of union of the two
geometries operated on.
This function is the same as the C++ method OGRGeometry::Union().
This function is built on the GEOS library, check it for the
definition of the geometry operation. If OGR is built without the GEOS
library, this function will always fail, issuing a CPLE_NotSupported
error.
Parameters:
-----------
hThis: the geometry.
hOther: the other geometry.
a new geometry representing the union or NULL if an error occurs.
"""
return _ogr.Geometry_Union(self, *args)
def UnionCascaded(self, *args):
"""
UnionCascaded(Geometry self) -> Geometry
OGRGeometryH
OGR_G_UnionCascaded(OGRGeometryH hThis)
Compute union using cascading.
This function is the same as the C++ method
OGRGeometry::UnionCascaded().
This function is built on the GEOS library, check it for the
definition of the geometry operation. If OGR is built without the GEOS
library, this function will always fail, issuing a CPLE_NotSupported
error.
Parameters:
-----------
hThis: the geometry.
a new geometry representing the union or NULL if an error occurs.
"""
return _ogr.Geometry_UnionCascaded(self, *args)
def Difference(self, *args):
"""
Difference(Geometry self, Geometry other) -> Geometry
OGRGeometryH
OGR_G_Difference(OGRGeometryH hThis, OGRGeometryH hOther)
Compute difference.
Generates a new geometry which is the region of this geometry with the
region of the other geometry removed.
This function is the same as the C++ method OGRGeometry::Difference().
This function is built on the GEOS library, check it for the
definition of the geometry operation. If OGR is built without the GEOS
library, this function will always fail, issuing a CPLE_NotSupported
error.
Parameters:
-----------
hThis: the geometry.
hOther: the other geometry.
a new geometry representing the difference or NULL if the difference
is empty or an error occurs.
"""
return _ogr.Geometry_Difference(self, *args)
def SymDifference(self, *args):
"""
SymDifference(Geometry self, Geometry other) -> Geometry
OGRGeometryH
OGR_G_SymDifference(OGRGeometryH hThis, OGRGeometryH hOther)
Compute symmetric difference.
Generates a new geometry which is the symmetric difference of this
geometry and the other geometry.
This function is the same as the C++ method
OGRGeometry::SymmetricDifference().
This function is built on the GEOS library, check it for the
definition of the geometry operation. If OGR is built without the GEOS
library, this function will always fail, issuing a CPLE_NotSupported
error.
Parameters:
-----------
hThis: the geometry.
hOther: the other geometry.
a new geometry representing the symmetric difference or NULL if the
difference is empty or an error occurs.
OGR 1.8.0
"""
return _ogr.Geometry_SymDifference(self, *args)
def SymmetricDifference(self, *args):
"""
SymmetricDifference(Geometry self, Geometry other) -> Geometry
OGRGeometryH
OGR_G_SymmetricDifference(OGRGeometryH hThis, OGRGeometryH hOther)
Compute symmetric difference (deprecated).
Deprecated See: OGR_G_SymmetricDifference()
"""
return _ogr.Geometry_SymmetricDifference(self, *args)
def Distance(self, *args):
"""
Distance(Geometry self, Geometry other) -> double
double OGR_G_Distance(OGRGeometryH
hFirst, OGRGeometryH hOther)
Compute distance between two geometries.
Returns the shortest distance between the two geometries.
This function is the same as the C++ method OGRGeometry::Distance().
This function is built on the GEOS library, check it for the
definition of the geometry operation. If OGR is built without the GEOS
library, this function will always fail, issuing a CPLE_NotSupported
error.
Parameters:
-----------
hFirst: the first geometry to compare against.
hOther: the other geometry to compare against.
the distance between the geometries or -1 if an error occurs.
"""
return _ogr.Geometry_Distance(self, *args)
def Empty(self, *args):
"""
Empty(Geometry self)
void OGR_G_Empty(OGRGeometryH hGeom)
Clear geometry information. This restores the geometry to it's initial
state after construction, and before assignment of actual geometry.
This function relates to the SFCOM IGeometry::Empty() method.
This function is the same as the CPP method OGRGeometry::empty().
Parameters:
-----------
hGeom: handle on the geometry to empty.
"""
return _ogr.Geometry_Empty(self, *args)
def IsEmpty(self, *args):
"""
IsEmpty(Geometry self) -> bool
int OGR_G_IsEmpty(OGRGeometryH hGeom)
Test if the geometry is empty.
This method is the same as the CPP method OGRGeometry::IsEmpty().
Parameters:
-----------
hGeom: The Geometry to test.
TRUE if the geometry has no points, otherwise FALSE.
"""
return _ogr.Geometry_IsEmpty(self, *args)
def IsValid(self, *args):
"""
IsValid(Geometry self) -> bool
int OGR_G_IsValid(OGRGeometryH hGeom)
Test if the geometry is valid.
This function is the same as the C++ method OGRGeometry::IsValid().
This function is built on the GEOS library, check it for the
definition of the geometry operation. If OGR is built without the GEOS
library, this function will always return FALSE.
Parameters:
-----------
hGeom: The Geometry to test.
TRUE if the geometry has no points, otherwise FALSE.
"""
return _ogr.Geometry_IsValid(self, *args)
def IsSimple(self, *args):
"""
IsSimple(Geometry self) -> bool
int OGR_G_IsSimple(OGRGeometryH
hGeom)
Returns TRUE if the geometry is simple.
Returns TRUE if the geometry has no anomalous geometric points, such
as self intersection or self tangency. The description of each
instantiable geometric class will include the specific conditions that
cause an instance of that class to be classified as not simple.
This function is the same as the c++ method OGRGeometry::IsSimple()
method.
If OGR is built without the GEOS library, this function will always
return FALSE.
Parameters:
-----------
hGeom: The Geometry to test.
TRUE if object is simple, otherwise FALSE.
"""
return _ogr.Geometry_IsSimple(self, *args)
def IsRing(self, *args):
"""
IsRing(Geometry self) -> bool
int OGR_G_IsRing(OGRGeometryH hGeom)
Test if the geometry is a ring.
This function is the same as the C++ method OGRGeometry::IsRing().
This function is built on the GEOS library, check it for the
definition of the geometry operation. If OGR is built without the GEOS
library, this function will always return FALSE.
Parameters:
-----------
hGeom: The Geometry to test.
TRUE if the geometry has no points, otherwise FALSE.
"""
return _ogr.Geometry_IsRing(self, *args)
def Intersects(self, *args):
"""
Intersects(Geometry self, Geometry other) -> bool
int OGR_G_Intersects(OGRGeometryH
hGeom, OGRGeometryH hOtherGeom)
Do these features intersect?
Currently this is not implemented in a rigerous fashion, and generally
just tests whether the envelopes of the two features intersect.
Eventually this will be made rigerous.
This function is the same as the CPP method OGRGeometry::Intersects.
Parameters:
-----------
hGeom: handle on the first geometry.
hOtherGeom: handle on the other geometry to test against.
TRUE if the geometries intersect, otherwise FALSE.
"""
return _ogr.Geometry_Intersects(self, *args)
def Intersect(self, *args):
"""
Intersect(Geometry self, Geometry other) -> bool
int OGR_G_Intersect(OGRGeometryH
hGeom, OGRGeometryH hOtherGeom)
"""
return _ogr.Geometry_Intersect(self, *args)
def Equals(self, *args):
"""
Equals(Geometry self, Geometry other) -> bool
int OGR_G_Equals(OGRGeometryH hGeom,
OGRGeometryH hOther)
Returns TRUE if two geometries are equivalent.
This function is the same as the CPP method OGRGeometry::Equals()
method.
Parameters:
-----------
hGeom: handle on the first geometry.
hOther: handle on the other geometry to test against.
TRUE if equivalent or FALSE otherwise.
"""
return _ogr.Geometry_Equals(self, *args)
def Equal(self, *args):
"""
Equal(Geometry self, Geometry other) -> bool
int OGR_G_Equal(OGRGeometryH hGeom,
OGRGeometryH hOther)
"""
return _ogr.Geometry_Equal(self, *args)
def Disjoint(self, *args):
"""
Disjoint(Geometry self, Geometry other) -> bool
int OGR_G_Disjoint(OGRGeometryH
hThis, OGRGeometryH hOther)
Test for disjointness.
Tests if this geometry and the other geometry are disjoint.
This function is the same as the C++ method OGRGeometry::Disjoint().
This function is built on the GEOS library, check it for the
definition of the geometry operation. If OGR is built without the GEOS
library, this function will always fail, issuing a CPLE_NotSupported
error.
Parameters:
-----------
hThis: the geometry to compare.
hOther: the other geometry to compare.
TRUE if they are disjoint, otherwise FALSE.
"""
return _ogr.Geometry_Disjoint(self, *args)
def Touches(self, *args):
"""
Touches(Geometry self, Geometry other) -> bool
int OGR_G_Touches(OGRGeometryH hThis,
OGRGeometryH hOther)
Test for touching.
Tests if this geometry and the other geometry are touching.
This function is the same as the C++ method OGRGeometry::Touches().
This function is built on the GEOS library, check it for the
definition of the geometry operation. If OGR is built without the GEOS
library, this function will always fail, issuing a CPLE_NotSupported
error.
Parameters:
-----------
hThis: the geometry to compare.
hOther: the other geometry to compare.
TRUE if they are touching, otherwise FALSE.
"""
return _ogr.Geometry_Touches(self, *args)
def Crosses(self, *args):
"""
Crosses(Geometry self, Geometry other) -> bool
int OGR_G_Crosses(OGRGeometryH hThis,
OGRGeometryH hOther)
Test for crossing.
Tests if this geometry and the other geometry are crossing.
This function is the same as the C++ method OGRGeometry::Crosses().
This function is built on the GEOS library, check it for the
definition of the geometry operation. If OGR is built without the GEOS
library, this function will always fail, issuing a CPLE_NotSupported
error.
Parameters:
-----------
hThis: the geometry to compare.
hOther: the other geometry to compare.
TRUE if they are crossing, otherwise FALSE.
"""
return _ogr.Geometry_Crosses(self, *args)
def Within(self, *args):
"""
Within(Geometry self, Geometry other) -> bool
int OGR_G_Within(OGRGeometryH hThis,
OGRGeometryH hOther)
Test for containment.
Tests if this geometry is within the other geometry.
This function is the same as the C++ method OGRGeometry::Within().
This function is built on the GEOS library, check it for the
definition of the geometry operation. If OGR is built without the GEOS
library, this function will always fail, issuing a CPLE_NotSupported
error.
Parameters:
-----------
hThis: the geometry to compare.
hOther: the other geometry to compare.
TRUE if hThis is within hOther, otherwise FALSE.
"""
return _ogr.Geometry_Within(self, *args)
def Contains(self, *args):
"""
Contains(Geometry self, Geometry other) -> bool
int OGR_G_Contains(OGRGeometryH
hThis, OGRGeometryH hOther)
Test for containment.
Tests if this geometry contains the other geometry.
This function is the same as the C++ method OGRGeometry::Contains().
This function is built on the GEOS library, check it for the
definition of the geometry operation. If OGR is built without the GEOS
library, this function will always fail, issuing a CPLE_NotSupported
error.
Parameters:
-----------
hThis: the geometry to compare.
hOther: the other geometry to compare.
TRUE if hThis contains hOther geometry, otherwise FALSE.
"""
return _ogr.Geometry_Contains(self, *args)
def Overlaps(self, *args):
"""
Overlaps(Geometry self, Geometry other) -> bool
int OGR_G_Overlaps(OGRGeometryH
hThis, OGRGeometryH hOther)
Test for overlap.
Tests if this geometry and the other geometry overlap, that is their
intersection has a non-zero area.
This function is the same as the C++ method OGRGeometry::Overlaps().
This function is built on the GEOS library, check it for the
definition of the geometry operation. If OGR is built without the GEOS
library, this function will always fail, issuing a CPLE_NotSupported
error.
Parameters:
-----------
hThis: the geometry to compare.
hOther: the other geometry to compare.
TRUE if they are overlapping, otherwise FALSE.
"""
return _ogr.Geometry_Overlaps(self, *args)
def TransformTo(self, *args):
"""
TransformTo(Geometry self, SpatialReference reference) -> OGRErr
OGRErr
OGR_G_TransformTo(OGRGeometryH hGeom, OGRSpatialReferenceH hSRS)
Transform geometry to new spatial reference system.
This function will transform the coordinates of a geometry from their
current spatial reference system to a new target spatial reference
system. Normally this means reprojecting the vectors, but it could
include datum shifts, and changes of units.
This function will only work if the geometry already has an assigned
spatial reference system, and if it is transformable to the target
coordinate system.
Because this function requires internal creation and initialization of
an OGRCoordinateTransformation object it is significantly more
expensive to use this function to transform many geometries than it is
to create the OGRCoordinateTransformation in advance, and call
transform() with that transformation. This function exists primarily
for convenience when only transforming a single geometry.
This function is the same as the CPP method OGRGeometry::transformTo.
Parameters:
-----------
hGeom: handle on the geometry to apply the transform to.
hSRS: handle on the spatial reference system to apply.
OGRERR_NONE on success, or an error code.
"""
return _ogr.Geometry_TransformTo(self, *args)
def Transform(self, *args):
"""
Transform(Geometry self, CoordinateTransformation trans) -> OGRErr
OGRErr OGR_G_Transform(OGRGeometryH
hGeom, OGRCoordinateTransformationH hTransform)
Apply arbitrary coordinate transformation to geometry.
This function will transform the coordinates of a geometry from their
current spatial reference system to a new target spatial reference
system. Normally this means reprojecting the vectors, but it could
include datum shifts, and changes of units.
Note that this function does not require that the geometry already
have a spatial reference system. It will be assumed that they can be
treated as having the source spatial reference system of the
OGRCoordinateTransformation object, and the actual SRS of the geometry
will be ignored. On successful completion the output
OGRSpatialReference of the OGRCoordinateTransformation will be
assigned to the geometry.
This function is the same as the CPP method OGRGeometry::transform.
Parameters:
-----------
hGeom: handle on the geometry to apply the transform to.
hTransform: handle on the transformation to apply.
OGRERR_NONE on success or an error code.
"""
return _ogr.Geometry_Transform(self, *args)
def GetSpatialReference(self, *args):
"""
GetSpatialReference(Geometry self) -> SpatialReference
OGRSpatialReferenceH
OGR_G_GetSpatialReference(OGRGeometryH hGeom)
Returns spatial reference system for geometry.
This function relates to the SFCOM IGeometry::get_SpatialReference()
method.
This function is the same as the CPP method
OGRGeometry::getSpatialReference().
Parameters:
-----------
hGeom: handle on the geometry to get spatial reference from.
a reference to the spatial reference geometry.
"""
return _ogr.Geometry_GetSpatialReference(self, *args)
def AssignSpatialReference(self, *args):
"""
AssignSpatialReference(Geometry self, SpatialReference reference)
void
OGR_G_AssignSpatialReference(OGRGeometryH hGeom, OGRSpatialReferenceH
hSRS)
Assign spatial reference to this object.
Any existing spatial reference is replaced, but under no circumstances
does this result in the object being reprojected. It is just changing
the interpretation of the existing geometry. Note that assigning a
spatial reference increments the reference count on the
OGRSpatialReference, but does not copy it.
This is similar to the SFCOM IGeometry::put_SpatialReference() method.
This function is the same as the CPP method
OGRGeometry::assignSpatialReference.
Parameters:
-----------
hGeom: handle on the geometry to apply the new spatial reference
system.
hSRS: handle on the new spatial reference system to apply.
"""
return _ogr.Geometry_AssignSpatialReference(self, *args)
def CloseRings(self, *args):
"""
CloseRings(Geometry self)
void OGR_G_CloseRings(OGRGeometryH
hGeom)
Force rings to be closed.
If this geometry, or any contained geometries has polygon rings that
are not closed, they will be closed by adding the starting point at
the end.
Parameters:
-----------
hGeom: handle to the geometry.
"""
return _ogr.Geometry_CloseRings(self, *args)
def FlattenTo2D(self, *args):
"""
FlattenTo2D(Geometry self)
void
OGR_G_FlattenTo2D(OGRGeometryH hGeom)
Convert geometry to strictly 2D. In a sense this converts all Z
coordinates to 0.0.
This function is the same as the CPP method
OGRGeometry::flattenTo2D().
Parameters:
-----------
hGeom: handle on the geometry to convert.
"""
return _ogr.Geometry_FlattenTo2D(self, *args)
def Segmentize(self, *args):
"""
Segmentize(Geometry self, double dfMaxLength)
void OGR_G_Segmentize(OGRGeometryH
hGeom, double dfMaxLength)
Modify the geometry such it has no segment longer then the given
distance.
Interpolated points will have Z and M values (if needed) set to 0.
Distance computation is performed in 2d only
This function is the same as the CPP method OGRGeometry::segmentize().
Parameters:
-----------
hGeom: handle on the geometry to segmentize
dfMaxLength: the maximum distance between 2 points after
segmentization
"""
return _ogr.Geometry_Segmentize(self, *args)
def GetEnvelope(self, *args):
"""
GetEnvelope(Geometry self)
void
OGR_G_GetEnvelope(OGRGeometryH hGeom, OGREnvelope *psEnvelope)
Computes and returns the bounding envelope for this geometry in the
passed psEnvelope structure.
This function is the same as the CPP method
OGRGeometry::getEnvelope().
Parameters:
-----------
hGeom: handle of the geometry to get envelope from.
psEnvelope: the structure in which to place the results.
"""
return _ogr.Geometry_GetEnvelope(self, *args)
def GetEnvelope3D(self, *args):
"""
GetEnvelope3D(Geometry self)
void
OGR_G_GetEnvelope3D(OGRGeometryH hGeom, OGREnvelope3D *psEnvelope)
Computes and returns the bounding envelope (3D) for this geometry in
the passed psEnvelope structure.
This function is the same as the CPP method
OGRGeometry::getEnvelope().
Parameters:
-----------
hGeom: handle of the geometry to get envelope from.
psEnvelope: the structure in which to place the results.
OGR 1.9.0
"""
return _ogr.Geometry_GetEnvelope3D(self, *args)
def Centroid(self, *args):
"""
Centroid(Geometry self) -> Geometry
int OGR_G_Centroid(OGRGeometryH
hGeom, OGRGeometryH hCentroidPoint)
Compute the geometry centroid.
The centroid location is applied to the passed in OGRPoint object. The
centroid is not necessarily within the geometry.
This method relates to the SFCOM ISurface::get_Centroid() method
however the current implementation based on GEOS can operate on other
geometry types such as multipoint, linestring, geometrycollection such
as multipolygons. OGC SF SQL 1.1 defines the operation for surfaces
(polygons). SQL/MM-Part 3 defines the operation for surfaces and
multisurfaces (multipolygons).
This function is the same as the C++ method OGRGeometry::Centroid().
This function is built on the GEOS library, check it for the
definition of the geometry operation. If OGR is built without the GEOS
library, this function will always fail, issuing a CPLE_NotSupported
error.
OGRERR_NONE on success or OGRERR_FAILURE on error.
"""
return _ogr.Geometry_Centroid(self, *args)
def PointOnSurface(self, *args):
"""PointOnSurface(Geometry self) -> Geometry"""
return _ogr.Geometry_PointOnSurface(self, *args)
def WkbSize(self, *args):
"""
WkbSize(Geometry self) -> int
int OGR_G_WkbSize(OGRGeometryH hGeom)
Returns size of related binary representation.
This function returns the exact number of bytes required to hold the
well known binary representation of this geometry object. Its
computation may be slightly expensive for complex geometries.
This function relates to the SFCOM IWks::WkbSize() method.
This function is the same as the CPP method OGRGeometry::WkbSize().
Parameters:
-----------
hGeom: handle on the geometry to get the binary size from.
size of binary representation in bytes.
"""
return _ogr.Geometry_WkbSize(self, *args)
def GetCoordinateDimension(self, *args):
"""
GetCoordinateDimension(Geometry self) -> int
int
OGR_G_GetCoordinateDimension(OGRGeometryH hGeom)
Get the dimension of the coordinates in this geometry.
This function corresponds to the SFCOM IGeometry::GetDimension()
method.
This function is the same as the CPP method
OGRGeometry::getCoordinateDimension().
Parameters:
-----------
hGeom: handle on the geometry to get the dimension of the coordinates
from.
in practice this will return 2 or 3. It can also return 0 in the case
of an empty point.
"""
return _ogr.Geometry_GetCoordinateDimension(self, *args)
def SetCoordinateDimension(self, *args):
"""
SetCoordinateDimension(Geometry self, int dimension)
void
OGR_G_SetCoordinateDimension(OGRGeometryH hGeom, int nNewDimension)
Set the coordinate dimension.
This method sets the explicit coordinate dimension. Setting the
coordinate dimension of a geometry to 2 should zero out any existing Z
values. Setting the dimension of a geometry collection will not
necessarily affect the children geometries.
Parameters:
-----------
hGeom: handle on the geometry to set the dimension of the
coordinates.
nNewDimension: New coordinate dimension value, either 2 or 3.
"""
return _ogr.Geometry_SetCoordinateDimension(self, *args)
def GetDimension(self, *args):
"""
GetDimension(Geometry self) -> int
int
OGR_G_GetDimension(OGRGeometryH hGeom)
Get the dimension of this geometry.
This function corresponds to the SFCOM IGeometry::GetDimension()
method. It indicates the dimension of the geometry, but does not
indicate the dimension of the underlying space (as indicated by
OGR_G_GetCoordinateDimension() function).
This function is the same as the CPP method
OGRGeometry::getDimension().
Parameters:
-----------
hGeom: handle on the geometry to get the dimension from.
0 for points, 1 for lines and 2 for surfaces.
"""
return _ogr.Geometry_GetDimension(self, *args)
def Destroy(self):
self.__swig_destroy__(self)
self.__del__()
self.thisown = 0
def __str__(self):
return self.ExportToWkt()
def __reduce__(self):
return (self.__class__, (), self.ExportToWkb())
def __setstate__(self, state):
result = CreateGeometryFromWkb(state)
self.this = result.this
def __iter__(self):
self.iter_subgeom = 0
return self
def next(self):
if self.iter_subgeom < self.GetGeometryCount():
subgeom = self.GetGeometryRef(self.iter_subgeom)
self.iter_subgeom += 1
return subgeom
else:
raise StopIteration
Geometry_swigregister = _ogr.Geometry_swigregister
Geometry_swigregister(Geometry)
def GetDriverCount(*args):
"""GetDriverCount() -> int"""
return _ogr.GetDriverCount(*args)
def GetOpenDSCount(*args):
"""GetOpenDSCount() -> int"""
return _ogr.GetOpenDSCount(*args)
def SetGenerate_DB2_V72_BYTE_ORDER(*args):
"""SetGenerate_DB2_V72_BYTE_ORDER(int bGenerate_DB2_V72_BYTE_ORDER) -> OGRErr"""
return _ogr.SetGenerate_DB2_V72_BYTE_ORDER(*args)
def RegisterAll(*args):
"""RegisterAll()"""
return _ogr.RegisterAll(*args)
def GeometryTypeToName(*args):
"""GeometryTypeToName(OGRwkbGeometryType eType) -> char const *"""
return _ogr.GeometryTypeToName(*args)
def GetFieldTypeName(*args):
"""GetFieldTypeName(OGRFieldType type) -> char const *"""
return _ogr.GetFieldTypeName(*args)
def GetOpenDS(*args):
"""GetOpenDS(int ds_number) -> DataSource"""
return _ogr.GetOpenDS(*args)
def Open(*args, **kwargs):
"""Open(char const * utf8_path, int update=0) -> DataSource"""
return _ogr.Open(*args, **kwargs)
def OpenShared(*args, **kwargs):
"""OpenShared(char const * utf8_path, int update=0) -> DataSource"""
return _ogr.OpenShared(*args, **kwargs)
def GetDriverByName(*args):
"""GetDriverByName(char const * name) -> Driver"""
return _ogr.GetDriverByName(*args)
def GetDriver(*args):
"""GetDriver(int driver_number) -> Driver"""
return _ogr.GetDriver(*args)
def GeneralCmdLineProcessor(*args):
"""GeneralCmdLineProcessor(char ** papszArgv, int nOptions=0) -> char **"""
return _ogr.GeneralCmdLineProcessor(*args)
def TermProgress_nocb(*args, **kwargs):
"""TermProgress_nocb(double dfProgress, char const * pszMessage=None, void * pData=None) -> int"""
return _ogr.TermProgress_nocb(*args, **kwargs)
TermProgress = _ogr.TermProgress
| gpl-2.0 |
scott48074/Restorative-Justice-App | app/facesheet.py | 1 | 4506 | '''
Takes in a list of values from the database and creates a facesheet.
'''
import os
from docx import Document
from docx.enum.text import WD_ALIGN_PARAGRAPH
def assemble_address(street, apartment, city, state, zip_code):
address = street.title()
if apartment:
address += f' APT: {apartment.title()}'
address += f' {city.title()}, '
address += state.upper()
address += ' ' + zip_code
return address
def parse_row(row_list):
info = {'case_number': row_list[1],
'occurred_date': row_list[2],
'incident_type': row_list[3].title(),
'age': row_list[5],
'name': row_list[7].title(),
'address': assemble_address(row_list[8], row_list[9],
row_list[10], row_list[11],
row_list[12],
),
'DOB': row_list[13],
'phone': row_list[14],
'race': row_list[15].title(),
'sex': row_list[16].title(),
'district': row_list[18].title()}
return info
def district_line(document, district):
p = document.add_paragraph()
p.alignment = WD_ALIGN_PARAGRAPH.RIGHT
p.add_run(f'District: {district}').bold = True
def approval_line(document):
p = document.add_paragraph()
p.alignment = WD_ALIGN_PARAGRAPH.RIGHT
p.add_run('Selection: ').bold = True
p.add_run('Pass').bold = True
p.add_run(' | ').bold = True
p.add_run('Fail').bold = True
p.add_run().add_break()
p.add_run('Background: ').bold = True
p.add_run('Pass').bold = True
p.add_run(' | ').bold = True
p.add_run('Fail').bold = True
p.add_run().add_break()
def case_number_line(document, case_number):
p = document.add_paragraph()
p.add_run(f'Case Number: {case_number}')
def name_line(document, name):
p = document.add_paragraph()
p.add_run(f'Name: {name}')
def bio_line(document, sex, race, dob, age):
lines = ['Sex:\t', 'Race:\t', 'DOB:\t', 'Age:\t']
bio_list = [sex, race, dob, age]
p = document.add_paragraph()
for line, bio in zip(lines, bio_list):
p.add_run(f'{line}{bio}')
p.add_run().add_break()
def charge_line(document):
lines = ['Charge Type: State | Municipal',
'Description:', 'Court Date:', 'Citation#:']
p = document.add_paragraph()
for line in lines:
p.add_run(line)
p.add_run().add_break()
def address_line(document, address):
p = document.add_paragraph()
p.add_run(f'Address: {address}')
def phone_line(document, phone):
p = document.add_paragraph()
p.add_run(f'Phone: {phone}')
p.add_run().add_break()
p.add_run('Email:')
def background_line(document):
lines = ['Court Records:', 'Out of State Records:',
'Local Records:', 'Notes:']
for line in lines:
p = document.add_paragraph()
p.add_run(line).bold = True
def last_name_first(name):
suffix = ['II', 'IV', 'JR', 'SR']
name_list = name.split()
name_list.insert(0, name_list.pop())
if name_list[0][:2].upper() in suffix:
name_list.insert(0, name_list.pop())
name = "_".join(name_list)
return name
def save_facesheet(document, directory, name, district, district_folders):
name = last_name_first(name)
if district_folders:
path = f'{directory}/results/{district}/{name}/{name}.docx'
if not os.path.isdir(f'{directory}/results/{district}/{name}'):
os.makedirs(f'{directory}/results/{district}/{name}')
else:
path = f'{directory}/results/{name}/{name}.docx'
if not os.path.isdir(f'{directory}/results/{name}'):
os.makedirs(f'{directory}/results/{name}')
document.save(path)
def assemble_sheet(row_list, directory, district_folders):
info_dict = parse_row(row_list)
document = Document()
district_line(document, info_dict['district'])
approval_line(document)
case_number_line(document, info_dict['case_number'])
name_line(document, info_dict['name'])
bio_line(document, info_dict['sex'], info_dict['race'], info_dict['DOB'], info_dict['age'])
charge_line(document)
address_line(document, info_dict['address'])
phone_line(document, info_dict['phone'])
background_line(document)
save_facesheet(document, directory, info_dict['name'], info_dict['district'], district_folders)
def main():
pass
if __name__ == '__main__':
main()
| mit |
HiSPARC/station-software | user/python/Lib/idlelib/MultiStatusBar.py | 10 | 1348 | from Tkinter import *
class MultiStatusBar(Frame):
def __init__(self, master=None, **kw):
if master is None:
master = Tk()
Frame.__init__(self, master, **kw)
self.labels = {}
def set_label(self, name, text='', side=LEFT, width=0):
if name not in self.labels:
label = Label(self, borderwidth=0, anchor=W)
label.pack(side=side, pady=0, padx=4)
self.labels[name] = label
else:
label = self.labels[name]
if width != 0:
label.config(width=width)
label.config(text=text)
def _multistatus_bar(parent):
root = Tk()
width, height, x, y = list(map(int, re.split('[x+]', parent.geometry())))
root.geometry("+%d+%d" %(x, y + 150))
root.title("Test multistatus bar")
frame = Frame(root)
text = Text(frame)
text.pack()
msb = MultiStatusBar(frame)
msb.set_label("one", "hello")
msb.set_label("two", "world")
msb.pack(side=BOTTOM, fill=X)
def change():
msb.set_label("one", "foo")
msb.set_label("two", "bar")
button = Button(root, text="Update status", command=change)
button.pack(side=BOTTOM)
frame.pack()
frame.mainloop()
root.mainloop()
if __name__ == '__main__':
from idlelib.idle_test.htest import run
run(_multistatus_bar)
| gpl-3.0 |
Adai0808/scrapy-1 | scrapy/downloadermiddlewares/httpcompression.py | 138 | 2278 | import zlib
from scrapy.utils.gz import gunzip, is_gzipped
from scrapy.http import Response, TextResponse
from scrapy.responsetypes import responsetypes
from scrapy.exceptions import NotConfigured
class HttpCompressionMiddleware(object):
"""This middleware allows compressed (gzip, deflate) traffic to be
sent/received from web sites"""
@classmethod
def from_crawler(cls, crawler):
if not crawler.settings.getbool('COMPRESSION_ENABLED'):
raise NotConfigured
return cls()
def process_request(self, request, spider):
request.headers.setdefault('Accept-Encoding', 'gzip,deflate')
def process_response(self, request, response, spider):
if isinstance(response, Response):
content_encoding = response.headers.getlist('Content-Encoding')
if content_encoding and not is_gzipped(response):
encoding = content_encoding.pop()
decoded_body = self._decode(response.body, encoding.lower())
respcls = responsetypes.from_args(headers=response.headers, \
url=response.url)
kwargs = dict(cls=respcls, body=decoded_body)
if issubclass(respcls, TextResponse):
# force recalculating the encoding until we make sure the
# responsetypes guessing is reliable
kwargs['encoding'] = None
response = response.replace(**kwargs)
if not content_encoding:
del response.headers['Content-Encoding']
return response
def _decode(self, body, encoding):
if encoding == 'gzip' or encoding == 'x-gzip':
body = gunzip(body)
if encoding == 'deflate':
try:
body = zlib.decompress(body)
except zlib.error:
# ugly hack to work with raw deflate content that may
# be sent by microsoft servers. For more information, see:
# http://carsten.codimi.de/gzip.yaws/
# http://www.port80software.com/200ok/archive/2005/10/31/868.aspx
# http://www.gzip.org/zlib/zlib_faq.html#faq38
body = zlib.decompress(body, -15)
return body
| bsd-3-clause |
cwilson1031/omaha | third_party/gmock/scripts/generator/cpp/gmock_class.py | 85 | 5957 | #!/usr/bin/env python
#
# Copyright 2008 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generate Google Mock classes from base classes.
This program will read in a C++ source file and output the Google Mock
classes for the specified classes. If no class is specified, all
classes in the source file are emitted.
Usage:
gmock_class.py header-file.h [ClassName]...
Output is sent to stdout.
"""
__author__ = 'nnorwitz@google.com (Neal Norwitz)'
import os
import re
import sets
import sys
from cpp import ast
from cpp import utils
_VERSION = (1, 0, 1) # The version of this script.
# How many spaces to indent. Can set me with the INDENT environment variable.
_INDENT = 2
def _GenerateMethods(output_lines, source, class_node):
function_type = ast.FUNCTION_VIRTUAL | ast.FUNCTION_PURE_VIRTUAL
ctor_or_dtor = ast.FUNCTION_CTOR | ast.FUNCTION_DTOR
for node in class_node.body:
# We only care about virtual functions.
if (isinstance(node, ast.Function) and
node.modifiers & function_type and
not node.modifiers & ctor_or_dtor):
# Pick out all the elements we need from the original function.
const = ''
if node.modifiers & ast.FUNCTION_CONST:
const = 'CONST_'
return_type = 'void'
if node.return_type:
# Add modifiers like 'const'.
modifiers = ''
if node.return_type.modifiers:
modifiers = ' '.join(node.return_type.modifiers) + ' '
return_type = modifiers + node.return_type.name
if node.return_type.pointer:
return_type += '*'
if node.return_type.reference:
return_type += '&'
prefix = 'MOCK_%sMETHOD%d' % (const, len(node.parameters))
args = ''
if node.parameters:
# Get the full text of the parameters from the start
# of the first parameter to the end of the last parameter.
start = node.parameters[0].start
end = node.parameters[-1].end
# Remove // comments.
args_strings = re.sub(r'//.*', '', source[start:end])
# Condense multiple spaces and eliminate newlines putting the
# parameters together on a single line. Ensure there is a
# space in an argument which is split by a newline without
# intervening whitespace, e.g.: int\nBar
args = re.sub(' +', ' ', args_strings.replace('\n', ' '))
# Create the prototype.
indent = ' ' * _INDENT
line = ('%s%s(%s,\n%s%s(%s));' %
(indent, prefix, node.name, indent*3, return_type, args))
output_lines.append(line)
def _GenerateMocks(filename, source, ast_list, desired_class_names):
processed_class_names = sets.Set()
lines = []
for node in ast_list:
if (isinstance(node, ast.Class) and node.body and
# desired_class_names being None means that all classes are selected.
(not desired_class_names or node.name in desired_class_names)):
class_name = node.name
processed_class_names.add(class_name)
class_node = node
# Add namespace before the class.
if class_node.namespace:
lines.extend(['namespace %s {' % n for n in class_node.namespace]) # }
lines.append('')
# Add the class prolog.
lines.append('class Mock%s : public %s {' % (class_name, class_name)) # }
lines.append('%spublic:' % (' ' * (_INDENT // 2)))
# Add all the methods.
_GenerateMethods(lines, source, class_node)
# Close the class.
if lines:
# If there are no virtual methods, no need for a public label.
if len(lines) == 2:
del lines[-1]
# Only close the class if there really is a class.
lines.append('};')
lines.append('') # Add an extra newline.
# Close the namespace.
if class_node.namespace:
for i in range(len(class_node.namespace)-1, -1, -1):
lines.append('} // namespace %s' % class_node.namespace[i])
lines.append('') # Add an extra newline.
if desired_class_names:
missing_class_name_list = list(desired_class_names - processed_class_names)
if missing_class_name_list:
missing_class_name_list.sort()
sys.stderr.write('Class(es) not found in %s: %s\n' %
(filename, ', '.join(missing_class_name_list)))
elif not processed_class_names:
sys.stderr.write('No class found in %s\n' % filename)
return lines
def main(argv=sys.argv):
if len(argv) < 2:
sys.stderr.write('Google Mock Class Generator v%s\n\n' %
'.'.join(map(str, _VERSION)))
sys.stderr.write(__doc__)
return 1
global _INDENT
try:
_INDENT = int(os.environ['INDENT'])
except KeyError:
pass
except:
sys.stderr.write('Unable to use indent of %s\n' % os.environ.get('INDENT'))
filename = argv[1]
desired_class_names = None # None means all classes in the source file.
if len(argv) >= 3:
desired_class_names = sets.Set(argv[2:])
source = utils.ReadFile(filename)
if source is None:
return 1
builder = ast.BuilderFromSource(source, filename)
try:
entire_ast = filter(None, builder.Generate())
except KeyboardInterrupt:
return
except:
# An error message was already printed since we couldn't parse.
pass
else:
lines = _GenerateMocks(filename, source, entire_ast, desired_class_names)
sys.stdout.write('\n'.join(lines))
if __name__ == '__main__':
main(sys.argv)
| apache-2.0 |
roselleebarle04/django | tests/m2m_recursive/models.py | 410 | 1120 | """
Many-to-many relationships between the same two tables
In this example, a ``Person`` can have many friends, who are also ``Person``
objects. Friendship is a symmetrical relationship - if I am your friend, you
are my friend. Here, ``friends`` is an example of a symmetrical
``ManyToManyField``.
A ``Person`` can also have many idols - but while I may idolize you, you may
not think the same of me. Here, ``idols`` is an example of a non-symmetrical
``ManyToManyField``. Only recursive ``ManyToManyField`` fields may be
non-symmetrical, and they are symmetrical by default.
This test validates that the many-to-many table is created using a mangled name
if there is a name clash, and tests that symmetry is preserved where
appropriate.
"""
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Person(models.Model):
name = models.CharField(max_length=20)
friends = models.ManyToManyField('self')
idols = models.ManyToManyField('self', symmetrical=False, related_name='stalkers')
def __str__(self):
return self.name
| bsd-3-clause |
kri5/pghoard | test/test_restore.py | 1 | 4618 | """
pghoard
Copyright (c) 2015 Ohmu Ltd
See LICENSE for details
"""
from .base import PGHoardTestCase
from dateutil import tz
from pghoard.restore import create_recovery_conf, Restore, RestoreError
from unittest.mock import Mock
import datetime
import os
import pytest
class TestRecoveryConf(PGHoardTestCase):
def test_recovery_targets(self, tmpdir):
r = Restore()
r._load_config = Mock() # pylint: disable=protected-access
r._get_object_storage = Mock() # pylint: disable=protected-access
with pytest.raises(RestoreError) as excinfo:
r.run(args=[
"get-basebackup",
"--config=" + str(tmpdir),
"--target-dir=" + str(tmpdir),
"--site=test",
"--recovery-target-action=promote",
"--recovery-target-name=foobar",
"--recovery-target-xid=42",
])
assert "at most one" in str(excinfo.value)
with pytest.raises(RestoreError) as excinfo:
r.run(args=[
"get-basebackup",
"--config=" + str(tmpdir),
"--target-dir=" + str(tmpdir),
"--site=test",
"--recovery-target-action=promote",
"--recovery-target-time=foobar",
])
assert "recovery_target_time 'foobar'" in str(excinfo.value)
def test_find_nearest_backup(self):
r = Restore()
r.storage = Mock()
basebackups = [{"name": "2015-02-12_0", "metadata": {"start-time": "2015-02-12T14:07:19+00:00"}},
{"name": "2015-02-13_0", "metadata": {"start-time": "2015-02-13T14:07:19+00:00"}}]
r.storage.list_basebackups = Mock(return_value=basebackups)
assert r._find_nearest_basebackup() == "2015-02-13_0" # pylint: disable=protected-access
utc = tz.tzutc()
recovery_time = datetime.datetime(2015, 2, 1)
recovery_time = recovery_time.replace(tzinfo=utc)
with pytest.raises(RestoreError):
r._find_nearest_basebackup(recovery_time) # pylint: disable=protected-access
recovery_time = datetime.datetime(2015, 2, 12, 14, 20)
recovery_time = recovery_time.replace(tzinfo=utc)
assert r._find_nearest_basebackup(recovery_time) == "2015-02-12_0" # pylint: disable=protected-access
def test_create_recovery_conf(self):
td = self.temp_dir
fn = os.path.join(td, "recovery.conf")
def getdata():
with open(fn, "r") as fp:
return fp.read()
assert not os.path.exists(fn)
create_recovery_conf(td, "dummysite", None)
assert "primary_conninfo" not in getdata()
create_recovery_conf(td, "dummysite", "")
assert "primary_conninfo" not in getdata()
create_recovery_conf(td, "dummysite", "dbname='test'")
assert "primary_conninfo" in getdata() # make sure it's there
assert "''test''" in getdata() # make sure it's quoted
assert "standby_mode = 'on'" in getdata()
content = create_recovery_conf(td, "dummysite", "dbname='test'", restore_to_master=True)
assert "primary_conninfo" in content
assert "standby_mode = 'on'" not in content
content = create_recovery_conf(td, "dummysite",
recovery_end_command="echo 'done' > /tmp/done",
recovery_target_xid="42")
assert content == getdata()
assert "primary_conninfo" not in content
assert "recovery_end_command = 'echo ''done'' > /tmp/done'" in content
# NOTE: multiple recovery targets don't really make sense in
# recovery.conf: PostgreSQL just uses the last entry.
# create_recovery_conf doesn't check them as it's called late enough
# for that check to be useless. Let's just make sure we can write
# lines for all of them.
now = datetime.datetime.now()
content = create_recovery_conf(td, "dummysite",
recovery_end_command="/bin/false",
recovery_target_action="shutdown",
recovery_target_name="testpoint",
recovery_target_time=now,
recovery_target_xid="42")
assert "recovery_target_action" in content
assert "recovery_target_name" in content
assert "recovery_target_time" in content
assert "recovery_target_xid" in content
assert str(now) in content
| apache-2.0 |
shikhar413/openmc | tests/regression_tests/diff_tally/test.py | 10 | 4122 | import glob
import os
import pandas as pd
import openmc
import pytest
from tests.testing_harness import PyAPITestHarness
class DiffTallyTestHarness(PyAPITestHarness):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Set settings explicitly
self._model.settings.batches = 3
self._model.settings.inactive = 0
self._model.settings.particles = 100
self._model.settings.source = openmc.Source(space=openmc.stats.Box(
[-160, -160, -183], [160, 160, 183]))
self._model.settings.temperature['multipole'] = True
filt_mats = openmc.MaterialFilter((1, 3))
filt_eout = openmc.EnergyoutFilter((0.0, 0.625, 20.0e6))
# We want density derivatives for both water and fuel to get coverage
# for both fissile and non-fissile materials.
d1 = openmc.TallyDerivative(derivative_id=1)
d1.variable = 'density'
d1.material = 3
d2 = openmc.TallyDerivative(derivative_id=2)
d2.variable = 'density'
d2.material = 1
# O-16 is a good nuclide to test against because it is present in both
# water and fuel. Some routines need to recognize that they have the
# perturbed nuclide but not the perturbed material.
d3 = openmc.TallyDerivative(derivative_id=3)
d3.variable = 'nuclide_density'
d3.material = 1
d3.nuclide = 'O16'
# A fissile nuclide, just for good measure.
d4 = openmc.TallyDerivative(derivative_id=4)
d4.variable = 'nuclide_density'
d4.material = 1
d4.nuclide = 'U235'
# Temperature derivatives.
d5 = openmc.TallyDerivative(derivative_id=5)
d5.variable = 'temperature'
d5.material = 1
derivs = [d1, d2, d3, d4, d5]
# Cover the flux score.
for i in range(5):
t = openmc.Tally()
t.scores = ['flux']
t.filters = [filt_mats]
t.derivative = derivs[i]
self._model.tallies.append(t)
# Cover supported scores with a collision estimator.
for i in range(5):
t = openmc.Tally()
t.scores = ['total', 'absorption', 'scatter', 'fission', 'nu-fission']
t.filters = [filt_mats]
t.nuclides = ['total', 'U235']
t.derivative = derivs[i]
self._model.tallies.append(t)
# Cover an analog estimator.
for i in range(5):
t = openmc.Tally()
t.scores = ['absorption']
t.filters = [filt_mats]
t.estimator = 'analog'
t.derivative = derivs[i]
self._model.tallies.append(t)
# Energyout filter and total nuclide for the density derivatives.
for i in range(2):
t = openmc.Tally()
t.scores = ['nu-fission', 'scatter']
t.filters = [filt_mats, filt_eout]
t.nuclides = ['total', 'U235']
t.derivative = derivs[i]
self._model.tallies.append(t)
# Energyout filter without total nuclide for other derivatives.
for i in range(2, 5):
t = openmc.Tally()
t.scores = ['nu-fission', 'scatter']
t.filters = [filt_mats, filt_eout]
t.nuclides = ['U235']
t.derivative = derivs[i]
self._model.tallies.append(t)
def _get_results(self):
# Read the statepoint and summary files.
statepoint = glob.glob(os.path.join(os.getcwd(), self._sp_name))[0]
sp = openmc.StatePoint(statepoint)
# Extract the tally data as a Pandas DataFrame.
df = pd.DataFrame()
for t in sp.tallies.values():
df = df.append(t.get_pandas_dataframe(), ignore_index=True)
# Extract the relevant data as a CSV string.
cols = ('d_material', 'd_nuclide', 'd_variable', 'score', 'mean',
'std. dev.')
return df.to_csv(None, columns=cols, index=False, float_format='%.7e')
def test_diff_tally():
harness = DiffTallyTestHarness('statepoint.3.h5')
harness.main()
| mit |
wtgme/labeldoc2vec | gensim/models/labeldoc2vec.py | 1 | 45979 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Radim Rehurek <me@radimrehurek.com>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Deep learning via the distributed memory and distributed bag of words models from
[1]_, using either hierarchical softmax or negative sampling [2]_ [3]_.
**Make sure you have a C compiler before installing gensim, to use optimized (compiled)
doc2vec training** (70x speedup [blog]_).
Initialize a model with e.g.::
>>> model = Doc2Vec(documents, size=100, window=8, min_count=5, workers=4)
Persist a model to disk with::
>>> model.save(fname)
>>> model = Doc2Vec.load(fname) # you can continue training with the loaded model!
The model can also be instantiated from an existing file on disk in the word2vec C format::
>>> model = Doc2Vec.load_word2vec_format('/tmp/vectors.txt', binary=False) # C text format
>>> model = Doc2Vec.load_word2vec_format('/tmp/vectors.bin', binary=True) # C binary format
.. [1] Quoc Le and Tomas Mikolov. Distributed Representations of Sentences and Documents. http://arxiv.org/pdf/1405.4053v2.pdf
.. [2] Tomas Mikolov, Kai Chen, Greg Corrado, and Jeffrey Dean. Efficient Estimation of Word Representations in Vector Space. In Proceedings of Workshop at ICLR, 2013.
.. [3] Tomas Mikolov, Ilya Sutskever, Kai Chen, Greg Corrado, and Jeffrey Dean. Distributed Representations of Words and Phrases and their Compositionality.
In Proceedings of NIPS, 2013.
.. [blog] Optimizing word2vec in gensim, http://radimrehurek.com/2013/09/word2vec-in-python-part-two-optimizing/
"""
import logging
import os
import warnings
try:
from queue import Queue
except ImportError:
from Queue import Queue
from collections import namedtuple, defaultdict
from timeit import default_timer
from numpy import zeros, exp, random, sum as np_sum, outer, add as np_add, concatenate, \
repeat as np_repeat, array, float32 as REAL, empty, ones, memmap as np_memmap, \
sqrt, newaxis, ndarray, dot, vstack, dtype, divide as np_divide
from gensim import utils, matutils # utility fnc for pickling, common scipy operations etc
from word2vec import Word2Vec, Vocab, train_cbow_pair, train_sg_pair, train_batch_sg
from six.moves import xrange, zip
from six import string_types, integer_types, itervalues
import random
logger = logging.getLogger(__name__)
try:
from gensim.models.labeldoc2vec_inner import train_label_document_dbow, train_label_document_dm, train_label_document_dm_concat
from gensim.models.word2vec_inner import FAST_VERSION # blas-adaptation shared from word2vec
logger.info('Fast version of {0} is being used'.format(__name__))
except ImportError:
logger.info('Slow version of {0} is being used'.format(__name__))
# failed... fall back to plain numpy (20-80x slower training than the above)
FAST_VERSION = -1
# def train_dl_pair(negative, label_index, context_index, alpha, learn_vectors=True, learn_hidden=True,
# context_vectors=None, context_locks=None, label_vectors=None, label_locks=None):
# print '-----------------------------------'
# print '------------Lower version------------'
# print '-----------------------------------'
# l1 = context_vectors[context_index] # input word (NN input/projection layer)
# lock_factor = context_locks[context_index]
#
# neu1e = zeros(l1.shape)
#
# # use this word (label = 1) + `negative` other random words not from this sentence (label = 0)
# neg_size = min(negative+1, len(label_vectors))
# word_indices = random.sample(range(len(label_vectors)), neg_size)
# final_labels = zeros(neg_size)
# if label_index not in word_indices:
# word_indices[0] = label_index
# final_labels[0] = 1
# else:
# index_pos = word_indices.index(label_index)
# final_labels[index_pos] = 1
# l2b = label_vectors[word_indices] # 2d matrix, k+1 x layer1_size
# fb = 1. / (1. + exp(-dot(l1, l2b.T))) # propagate hidden -> output
# gb = (final_labels - fb) * alpha # vector of error gradients multiplied by the learning rate
# if learn_hidden:
# label_vectors[word_indices] += outer(gb, l1) # learn hidden -> output
# neu1e += dot(gb, l2b) # save error
#
# if learn_vectors:
# # l1 += neu1e * lock_factor # learn input -> hidden (mutates model.syn0[word2.index], if that is l1)
# context_vectors[context_index] += neu1e * lock_factor # learn input -> hidden (mutates model.syn0[word2.index], if that is l1)
# return neu1e
#
#
# def train_label_document_dbow(model, doc_words, doctag_indexes, doclabel_indexes, alpha, work=None,
# train_words=False, learn_doctags=True, learn_words=True, learn_hidden=True,
# word_vectors=None, word_locks=None, doctag_vectors=None, doctag_locks=None,
# doclabel_vectors=None, doclabel_locks=None):
# """
# Update distributed bag of words model ("PV-DBOW") by training on a single document.
#
# Called internally from `Doc2Vec.train()` and `Doc2Vec.infer_vector()`.
#
# The document is provided as `doc_words`, a list of word tokens which are looked up
# in the model's vocab dictionary, and `doctag_indexes`, which provide indexes
# into the doctag_vectors array.
#
# If `train_words` is True, simultaneously train word-to-word (not just doc-to-word)
# examples, exactly as per Word2Vec skip-gram training. (Without this option,
# word vectors are neither consulted nor updated during DBOW doc vector training.)
#
# Any of `learn_doctags', `learn_words`, and `learn_hidden` may be set False to
# prevent learning-updates to those respective model weights, as if using the
# (partially-)frozen model to infer other compatible vectors.
#
# This is the non-optimized, Python version. If you have cython installed, gensim
# will use the optimized version from doc2vec_inner instead.
#
# """
# if doctag_vectors is None:
# doctag_vectors = model.docvecs.doctag_syn0
# if doctag_locks is None:
# doctag_locks = model.docvecs.doctag_syn0_lockf
#
# if doclabel_vectors is None:
# doclabel_vectors = model.labelvecs.doctag_syn0
# if doclabel_locks is None:
# doclabel_locks = model.labelvecs.doctag_syn0_lockf
#
# if train_words and learn_words:
# train_batch_sg(model, [doc_words], alpha, work)
# for doctag_index in doctag_indexes:
# for word in doc_words:
# train_sg_pair(model, word, doctag_index, alpha, learn_vectors=learn_doctags,
# learn_hidden=learn_hidden, context_vectors=doctag_vectors,
# context_locks=doctag_locks)
# for doclabel_index in doclabel_indexes:
# train_dl_pair(model.negative, doclabel_index, doctag_index, alpha, learn_vectors=learn_doctags,
# learn_hidden=learn_hidden, context_vectors=doctag_vectors,
# context_locks=doctag_locks, label_vectors=doclabel_vectors, label_locks=doclabel_locks)
#
#
# return len(doc_words)
#
# def train_label_document_dm(model, doc_words, doctag_indexes, doclabel_indexes, alpha, work=None, neu1=None,
# learn_doctags=True, learn_words=True, learn_hidden=True,
# word_vectors=None, word_locks=None, doctag_vectors=None, doctag_locks=None,
# doclabel_vectors=None, doclabel_locks=None):
# """
# Update distributed memory model ("PV-DM") by training on a single document.
#
# Called internally from `Doc2Vec.train()` and `Doc2Vec.infer_vector()`. This
# method implements the DM model with a projection (input) layer that is
# either the sum or mean of the context vectors, depending on the model's
# `dm_mean` configuration field. See `train_label_document_dm_concat()` for the DM
# model with a concatenated input layer.
#
# The document is provided as `doc_words`, a list of word tokens which are looked up
# in the model's vocab dictionary, and `doctag_indexes`, which provide indexes
# into the doctag_vectors array.
#
# Any of `learn_doctags', `learn_words`, and `learn_hidden` may be set False to
# prevent learning-updates to those respective model weights, as if using the
# (partially-)frozen model to infer other compatible vectors.
#
# This is the non-optimized, Python version. If you have a C compiler, gensim
# will use the optimized version from doc2vec_inner instead.
#
# """
# if word_vectors is None:
# word_vectors = model.syn0
# if word_locks is None:
# word_locks = model.syn0_lockf
#
# if doctag_vectors is None:
# doctag_vectors = model.docvecs.doctag_syn0
# if doctag_locks is None:
# doctag_locks = model.docvecs.doctag_syn0_lockf
#
# if doclabel_vectors is None:
# doclabel_vectors = model.labelvecs.doctag_syn0
# if doclabel_locks is None:
# doclabel_locks = model.labelvecs.doctag_syn0_lockf
#
# word_vocabs = [model.vocab[w] for w in doc_words if w in model.vocab and
# model.vocab[w].sample_int > model.random.rand() * 2**32]
#
# for pos, word in enumerate(word_vocabs):
# reduced_window = model.random.randint(model.window) # `b` in the original doc2vec code
# start = max(0, pos - model.window + reduced_window)
# window_pos = enumerate(word_vocabs[start:(pos + model.window + 1 - reduced_window)], start)
# word2_indexes = [word2.index for pos2, word2 in window_pos if pos2 != pos]
# l1 = np_sum(word_vectors[word2_indexes], axis=0) + np_sum(doctag_vectors[doctag_indexes], axis=0)
# count = len(word2_indexes) + len(doctag_indexes)
# if model.cbow_mean and count > 1 :
# l1 /= count
# neu1e = train_cbow_pair(model, word, word2_indexes, l1, alpha,
# learn_vectors=False, learn_hidden=learn_hidden)
# if not model.cbow_mean and count > 1:
# neu1e /= count
# if learn_doctags:
# for i in doctag_indexes:
# doctag_vectors[i] += neu1e * doctag_locks[i]
# if learn_words:
# for i in word2_indexes:
# word_vectors[i] += neu1e * word_locks[i]
# for doctag_index in doctag_indexes:
# for doclabel_index in doclabel_indexes:
# train_dl_pair(model.negative, doclabel_index, doctag_index, alpha, learn_vectors=learn_doctags,
# learn_hidden=learn_hidden, context_vectors=doctag_vectors,
# context_locks=doctag_locks, label_vectors=doclabel_vectors, label_locks=doclabel_locks)
#
# return len(word_vocabs)
#
# def train_label_document_dm_concat(model, doc_words, doctag_indexes, doclabel_indexes, alpha, work=None, neu1=None,
# learn_doctags=True, learn_words=True, learn_hidden=True,
# word_vectors=None, word_locks=None, doctag_vectors=None, doctag_locks=None,
# doclabel_vectors=None, doclabel_locks=None):
# """
# Update distributed memory model ("PV-DM") by training on a single document, using a
# concatenation of the context window word vectors (rather than a sum or average).
#
# Called internally from `Doc2Vec.train()` and `Doc2Vec.infer_vector()`.
#
# The document is provided as `doc_words`, a list of word tokens which are looked up
# in the model's vocab dictionary, and `doctag_indexes`, which provide indexes
# into the doctag_vectors array.
#
# Any of `learn_doctags', `learn_words`, and `learn_hidden` may be set False to
# prevent learning-updates to those respective model weights, as if using the
# (partially-)frozen model to infer other compatible vectors.
#
# This is the non-optimized, Python version. If you have a C compiler, gensim
# will use the optimized version from doc2vec_inner instead.
#
# """
# if word_vectors is None:
# word_vectors = model.syn0
# if word_locks is None:
# word_locks = model.syn0_lockf
#
# if doctag_vectors is None:
# doctag_vectors = model.docvecs.doctag_syn0
# if doctag_locks is None:
# doctag_locks = model.docvecs.doctag_syn0_lockf
#
# if doclabel_vectors is None:
# doclabel_vectors = model.labelvecs.doctag_syn0
# if doclabel_locks is None:
# doclabel_locks = model.labelvecs.doctag_syn0_lockf
#
# word_vocabs = [model.vocab[w] for w in doc_words if w in model.vocab and
# model.vocab[w].sample_int > model.random.rand() * 2**32]
# doctag_len = len(doctag_indexes)
# if doctag_len != model.dm_tag_count:
# return 0 # skip doc without expected number of doctag(s) (TODO: warn/pad?)
#
# null_word = model.vocab['\0']
# pre_pad_count = model.window
# post_pad_count = model.window
# padded_document_indexes = (
# (pre_pad_count * [null_word.index]) # pre-padding
# + [word.index for word in word_vocabs if word is not None] # elide out-of-Vocabulary words
# + (post_pad_count * [null_word.index]) # post-padding
# )
#
# for pos in range(pre_pad_count, len(padded_document_indexes) - post_pad_count):
# word_context_indexes = (
# padded_document_indexes[(pos - pre_pad_count): pos] # preceding words
# + padded_document_indexes[(pos + 1):(pos + 1 + post_pad_count)] # following words
# )
# word_context_len = len(word_context_indexes)
# predict_word = model.vocab[model.index2word[padded_document_indexes[pos]]]
# # numpy advanced-indexing copies; concatenate, flatten to 1d
# l1 = concatenate((doctag_vectors[doctag_indexes], word_vectors[word_context_indexes])).ravel()
# neu1e = train_cbow_pair(model, predict_word, None, l1, alpha,
# learn_hidden=learn_hidden, learn_vectors=False)
#
# # filter by locks and shape for addition to source vectors
# e_locks = concatenate((doctag_locks[doctag_indexes], word_locks[word_context_indexes]))
# neu1e_r = (neu1e.reshape(-1, model.vector_size)
# * np_repeat(e_locks, model.vector_size).reshape(-1, model.vector_size))
#
# if learn_doctags:
# np_add.at(doctag_vectors, doctag_indexes, neu1e_r[:doctag_len])
# if learn_words:
# np_add.at(word_vectors, word_context_indexes, neu1e_r[doctag_len:])
# for doctag_index in doctag_indexes:
# for doclabel_index in doclabel_indexes:
# train_dl_pair(model.negative, doclabel_index, doctag_index, alpha, learn_vectors=learn_doctags,
# learn_hidden=learn_hidden, context_vectors=doctag_vectors,
# context_locks=doctag_locks, label_vectors=doclabel_vectors, label_locks=doclabel_locks)
#
# return len(padded_document_indexes) - pre_pad_count - post_pad_count
class LabeledTaggedDocument(namedtuple('LabeledTaggedDocument', 'words tags labels')):
"""
A single document, made up of `words` (a list of unicode string tokens)
and `tags` (a list of tokens). Tags may be one or more unicode string
tokens, but typical practice (which will also be most memory-efficient) is
for the tags list to include a unique integer id as the only tag.
Replaces "sentence as a list of words" from Word2Vec.
"""
def __str__(self):
return '%s(%s, %s)' % (self.__class__.__name__, self.words, self.tags, self.labels)
class DocvecsArray(utils.SaveLoad):
"""
Default storage of doc vectors during/after training, in a numpy array.
As the 'docvecs' property of a Doc2Vec model, allows access and
comparison of document vectors.
>>> docvec = d2v_model.docvecs[99]
>>> docvec = d2v_model.docvecs['SENT_99'] # if string tag used in training
>>> sims = d2v_model.docvecs.most_similar(99)
>>> sims = d2v_model.docvecs.most_similar('SENT_99')
>>> sims = d2v_model.docvecs.most_similar(docvec)
If only plain int tags are presented during training, the dict (of
string tag -> index) and list (of index -> string tag) stay empty,
saving memory.
Supplying a mapfile_path (as by initializing a Doc2Vec model with a
'docvecs_mapfile' value) will use a pair of memory-mapped
files as the array backing for doctag_syn0/doctag_syn0_lockf values.
The Doc2Vec model automatically uses this class, but a future alternative
implementation, based on another persistence mechanism like LMDB, LevelDB,
or SQLite, should also be possible.
"""
def __init__(self, mapfile_path=None):
self.doctags = {} # string -> Doctag (only filled if necessary)
self.max_rawint = -1 # highest rawint-indexed doctag
self.offset2doctag = [] # int offset-past-(max_rawint+1) -> String (only filled if necessary)
self.count = 0
self.mapfile_path = mapfile_path
def note_doctag(self, key, document_no, document_length):
"""Note a document tag during initial corpus scan, for structure sizing."""
if isinstance(key, int):
self.max_rawint = max(self.max_rawint, key)
else:
if key in self.doctags:
self.doctags[key] = self.doctags[key].repeat(document_length)
else:
self.doctags[key] = Doctag(len(self.offset2doctag), document_length, 1)
self.offset2doctag.append(key)
self.count = self.max_rawint + 1 + len(self.offset2doctag)
def indexed_doctags(self, doctag_tokens):
"""Return indexes and backing-arrays used in training examples."""
return ([self._int_index(index) for index in doctag_tokens if index in self],
self.doctag_syn0, self.doctag_syn0_lockf, doctag_tokens)
def trained_item(self, indexed_tuple):
"""Persist any changes made to the given indexes (matching tuple previously
returned by indexed_doctags()); a no-op for this implementation"""
pass
def _int_index(self, index):
"""Return int index for either string or int index"""
if isinstance(index, int):
return index
else:
return self.max_rawint + 1 + self.doctags[index].offset
def _key_index(self, i_index, missing=None):
"""Return string index for given int index, if available"""
warnings.warn("use DocvecsArray.index_to_doctag", DeprecationWarning)
return self.index_to_doctag(i_index)
def index_to_doctag(self, i_index):
"""Return string key for given i_index, if available. Otherwise return raw int doctag (same int)."""
candidate_offset = i_index - self.max_rawint - 1
if 0 <= candidate_offset < len(self.offset2doctag):
return self.offset2doctag[candidate_offset]
else:
return i_index
def __getitem__(self, index):
"""
Accept a single key (int or string tag) or list of keys as input.
If a single string or int, return designated tag's vector
representation, as a 1D numpy array.
If a list, return designated tags' vector representations as a
2D numpy array: #tags x #vector_size.
"""
if isinstance(index, string_types + (int,)):
return self.doctag_syn0[self._int_index(index)]
return vstack([self[i] for i in index])
def __len__(self):
return self.count
def __contains__(self, index):
if isinstance(index, int):
return index < self.count
else:
return index in self.doctags
def borrow_from(self, other_docvecs):
self.count = other_docvecs.count
self.doctags = other_docvecs.doctags
self.offset2doctag = other_docvecs.offset2doctag
def clear_sims(self):
self.doctag_syn0norm = None
def estimated_lookup_memory(self):
"""Estimated memory for tag lookup; 0 if using pure int tags."""
return 60 * len(self.offset2doctag) + 140 * len(self.doctags)
def reset_weights(self, model):
length = max(len(self.doctags), self.count)
if self.mapfile_path:
self.doctag_syn0 = np_memmap(self.mapfile_path+'.doctag_syn0', dtype=REAL,
mode='w+', shape=(length, model.vector_size))
self.doctag_syn0_lockf = np_memmap(self.mapfile_path+'.doctag_syn0_lockf', dtype=REAL,
mode='w+', shape=(length,))
self.doctag_syn0_lockf.fill(1.0)
else:
self.doctag_syn0 = empty((length, model.vector_size), dtype=REAL)
self.doctag_syn0_lockf = ones((length,), dtype=REAL) # zeros suppress learning
for i in xrange(length):
# construct deterministic seed from index AND model seed
seed = "%d %s" % (model.seed, self.index_to_doctag(i))
self.doctag_syn0[i] = model.seeded_vector(seed)
def init_sims(self, replace=False):
"""
Precompute L2-normalized vectors.
If `replace` is set, forget the original vectors and only keep the normalized
ones = saves lots of memory!
Note that you **cannot continue training or inference** after doing a replace.
The model becomes effectively read-only = you can call `most_similar`, `similarity`
etc., but not `train` or `infer_vector`.
"""
if getattr(self, 'doctag_syn0norm', None) is None or replace:
logger.info("precomputing L2-norms of doc weight vectors")
if replace:
for i in xrange(self.doctag_syn0.shape[0]):
self.doctag_syn0[i, :] /= sqrt((self.doctag_syn0[i, :] ** 2).sum(-1))
self.doctag_syn0norm = self.doctag_syn0
else:
if self.mapfile_path:
self.doctag_syn0norm = np_memmap(
self.mapfile_path+'.doctag_syn0norm', dtype=REAL,
mode='w+', shape=self.doctag_syn0.shape)
else:
self.doctag_syn0norm = empty(self.doctag_syn0.shape, dtype=REAL)
np_divide(self.doctag_syn0, sqrt((self.doctag_syn0 ** 2).sum(-1))[..., newaxis], self.doctag_syn0norm)
def most_similar(self, positive=[], negative=[], topn=10, clip_start=0, clip_end=None, indexer=None):
"""
Find the top-N most similar docvecs known from training. Positive docs contribute
positively towards the similarity, negative docs negatively.
This method computes cosine similarity between a simple mean of the projection
weight vectors of the given docs. Docs may be specified as vectors, integer indexes
of trained docvecs, or if the documents were originally presented with string tags,
by the corresponding tags.
The 'clip_start' and 'clip_end' allow limiting results to a particular contiguous
range of the underlying doctag_syn0norm vectors. (This may be useful if the ordering
there was chosen to be significant, such as more popular tag IDs in lower indexes.)
"""
self.init_sims()
clip_end = clip_end or len(self.doctag_syn0norm)
if isinstance(positive, string_types + integer_types) and not negative:
# allow calls like most_similar('dog'), as a shorthand for most_similar(['dog'])
positive = [positive]
# add weights for each doc, if not already present; default to 1.0 for positive and -1.0 for negative docs
positive = [
(doc, 1.0) if isinstance(doc, string_types + (ndarray,) + integer_types)
else doc for doc in positive
]
negative = [
(doc, -1.0) if isinstance(doc, string_types + (ndarray,) + integer_types)
else doc for doc in negative
]
# compute the weighted average of all docs
all_docs, mean = set(), []
for doc, weight in positive + negative:
if isinstance(doc, ndarray):
mean.append(weight * doc)
elif doc in self.doctags or doc < self.count:
mean.append(weight * self.doctag_syn0norm[self._int_index(doc)])
all_docs.add(self._int_index(doc))
else:
raise KeyError("doc '%s' not in trained set" % doc)
if not mean:
raise ValueError("cannot compute similarity with no input")
mean = matutils.unitvec(array(mean).mean(axis=0)).astype(REAL)
if indexer is not None:
return indexer.most_similar(mean, topn)
dists = dot(self.doctag_syn0norm[clip_start:clip_end], mean)
if not topn:
return dists
best = matutils.argsort(dists, topn=topn + len(all_docs), reverse=True)
# ignore (don't return) docs from the input
result = [(self.index_to_doctag(sim), float(dists[sim])) for sim in best if sim not in all_docs]
return result[:topn]
def doesnt_match(self, docs):
"""
Which doc from the given list doesn't go with the others?
(TODO: Accept vectors of out-of-training-set docs, as if from inference.)
"""
self.init_sims()
docs = [doc for doc in docs if doc in self.doctags or 0 <= doc < self.count] # filter out unknowns
logger.debug("using docs %s" % docs)
if not docs:
raise ValueError("cannot select a doc from an empty list")
vectors = vstack(self.doctag_syn0norm[self._int_index(doc)] for doc in docs).astype(REAL)
mean = matutils.unitvec(vectors.mean(axis=0)).astype(REAL)
dists = dot(vectors, mean)
return sorted(zip(dists, docs))[0][1]
def similarity(self, d1, d2):
"""
Compute cosine similarity between two docvecs in the trained set, specified by int index or
string tag. (TODO: Accept vectors of out-of-training-set docs, as if from inference.)
"""
return dot(matutils.unitvec(self[d1]), matutils.unitvec(self[d2]))
def n_similarity(self, ds1, ds2):
"""
Compute cosine similarity between two sets of docvecs from the trained set, specified by int
index or string tag. (TODO: Accept vectors of out-of-training-set docs, as if from inference.)
"""
v1 = [self[doc] for doc in ds1]
v2 = [self[doc] for doc in ds2]
return dot(matutils.unitvec(array(v1).mean(axis=0)), matutils.unitvec(array(v2).mean(axis=0)))
def similarity_unseen_docs(self, model, doc_words1, doc_words2, alpha=0.1, min_alpha=0.0001, steps=5):
"""
Compute cosine similarity between two post-bulk out of training documents.
Document should be a list of (word) tokens.
"""
d1 = model.infer_vector(doc_words=doc_words1, alpha=alpha, min_alpha=min_alpha, steps=steps)
d2 = model.infer_vector(doc_words=doc_words2, alpha=alpha, min_alpha=min_alpha, steps=steps)
return dot(matutils.unitvec(d1), matutils.unitvec(d2))
class Doctag(namedtuple('Doctag', 'offset, word_count, doc_count')):
"""A string document tag discovered during the initial vocabulary
scan. (The document-vector equivalent of a Vocab object.)
Will not be used if all presented document tags are ints.
The offset is only the true index into the doctags_syn0/doctags_syn0_lockf
if-and-only-if no raw-int tags were used. If any raw-int tags were used,
string Doctag vectors begin at index (max_rawint + 1), so the true index is
(rawint_index + 1 + offset). See also DocvecsArray.index_to_doctag().
"""
__slots__ = ()
def repeat(self, word_count):
return self._replace(word_count=self.word_count + word_count, doc_count=self.doc_count + 1)
class LabelDoc2Vec(Word2Vec):
"""Class for training, using and evaluating neural networks described in http://arxiv.org/pdf/1405.4053v2.pdf"""
def __init__(self, documents=None, size=300, alpha=0.025, window=8, min_count=5,
max_vocab_size=None, sample=0, seed=1, workers=1, min_alpha=0.0001,
dm=1, hs=1, negative=0, dbow_words=0, dm_mean=0, dm_concat=0, dm_tag_count=1,
docvecs=None, docvecs_mapfile=None, labelvecs=None, labelvecs_mapfile=None,
comment=None, trim_rule=None, **kwargs):
"""
Initialize the model from an iterable of `documents`. Each document is a
LabeledTaggedDocument object that will be used for training.
The `documents` iterable can be simply a list of LabeledTaggedDocument elements, but for larger corpora,
consider an iterable that streams the documents directly from disk/network.
If you don't supply `documents`, the model is left uninitialized -- use if
you plan to initialize it in some other way.
`dm` defines the training algorithm. By default (`dm=1`), 'distributed memory' (PV-DM) is used.
Otherwise, `distributed bag of words` (PV-DBOW) is employed.
`size` is the dimensionality of the feature vectors.
`window` is the maximum distance between the predicted word and context words used for prediction
within a document.
`alpha` is the initial learning rate (will linearly drop to zero as training progresses).
`seed` = for the random number generator.
Note that for a fully deterministically-reproducible run, you must also limit the model to
a single worker thread, to eliminate ordering jitter from OS thread scheduling. (In Python
3, reproducibility between interpreter launches also requires use of the PYTHONHASHSEED
environment variable to control hash randomization.)
`min_count` = ignore all words with total frequency lower than this.
`max_vocab_size` = limit RAM during vocabulary building; if there are more unique
words than this, then prune the infrequent ones. Every 10 million word types
need about 1GB of RAM. Set to `None` for no limit (default).
`sample` = threshold for configuring which higher-frequency words are randomly downsampled;
default is 0 (off), useful value is 1e-5.
`workers` = use this many worker threads to train the model (=faster training with multicore machines).
`iter` = number of iterations (epochs) over the corpus. The default inherited from Word2Vec is 5,
but values of 10 or 20 are common in published 'Paragraph Vector' experiments.
`hs` = if 1 (default), hierarchical sampling will be used for model training (else set to 0).
`negative` = if > 0, negative sampling will be used, the int for negative
specifies how many "noise words" should be drawn (usually between 5-20).
`dm_mean` = if 0 (default), use the sum of the context word vectors. If 1, use the mean.
Only applies when dm is used in non-concatenative mode.
`dm_concat` = if 1, use concatenation of context vectors rather than sum/average;
default is 0 (off). Note concatenation results in a much-larger model, as the input
is no longer the size of one (sampled or arithmatically combined) word vector, but the
size of the tag(s) and all words in the context strung together.
`dm_tag_count` = expected constant number of document tags per document, when using
dm_concat mode; default is 1.
`dbow_words` if set to 1 trains word-vectors (in skip-gram fashion) simultaneous with DBOW
doc-vector training; default is 0 (faster training of doc-vectors only).
`trim_rule` = vocabulary trimming rule, specifies whether certain words should remain
in the vocabulary, be trimmed away, or handled using the default (discard if word count < min_count).
Can be None (min_count will be used), or a callable that accepts parameters (word, count, min_count) and
returns either util.RULE_DISCARD, util.RULE_KEEP or util.RULE_DEFAULT.
Note: The rule, if given, is only used prune vocabulary during build_vocab() and is not stored as part
of the model.
"""
super(LabelDoc2Vec, self).__init__(
size=size, alpha=alpha, window=window, min_count=min_count, max_vocab_size=max_vocab_size,
sample=sample, seed=seed, workers=workers, min_alpha=min_alpha,
sg=(1+dm) % 2, hs=hs, negative=negative, cbow_mean=dm_mean,
null_word=dm_concat, **kwargs)
self.dbow_words = dbow_words
self.dm_concat = dm_concat
self.dm_tag_count = dm_tag_count
if self.dm and self.dm_concat:
self.layer1_size = (self.dm_tag_count + (2 * self.window)) * self.vector_size
else:
self.layer1_size = size
self.docvecs = docvecs or DocvecsArray(docvecs_mapfile)
self.labelvecs = labelvecs or DocvecsArray(labelvecs_mapfile)
self.comment = comment
if documents is not None:
self.build_vocab(documents, trim_rule=trim_rule)
self.train(documents)
@property
def dm(self):
return not self.sg # opposite of SG
@property
def dbow(self):
return self.sg # same as SG
def clear_sims(self):
super(LabelDoc2Vec, self).clear_sims()
self.docvecs.clear_sims()
self.labelvecs.clear_sims()
def reset_weights(self):
if self.dm and self.dm_concat:
# expand l1 size to match concatenated tags+words length
self.layer1_size = (self.dm_tag_count + (2 * self.window)) * self.vector_size
logger.info("using concatenative %d-dimensional layer1" % (self.layer1_size))
super(LabelDoc2Vec, self).reset_weights()
self.docvecs.reset_weights(self)
self.labelvecs.reset_weights(self)
def reset_from(self, other_model):
"""Reuse shareable structures from other_model."""
self.docvecs.borrow_from(other_model.docvecs)
self.labelvecs.borrow_from(other_model.labelvecs)
super(LabelDoc2Vec, self).reset_from(other_model)
def scan_vocab(self, documents, progress_per=10000, trim_rule=None, update=False):
logger.info("collecting all words and their counts")
document_no = -1
total_words = 0
min_reduce = 1
interval_start = default_timer() - 0.00001 # guard against next sample being identical
interval_count = 0
checked_string_types = 0
vocab = defaultdict(int)
for document_no, document in enumerate(documents):
if not checked_string_types:
if isinstance(document.words, string_types):
logger.warn("Each 'words' should be a list of words (usually unicode strings)."
"First 'words' here is instead plain %s." % type(document.words))
checked_string_types += 1
if document_no % progress_per == 0:
interval_rate = (total_words - interval_count) / (default_timer() - interval_start)
logger.info("PROGRESS: at example #%i, processed %i words (%i/s), %i word types, %i tags, %i labels",
document_no, total_words, interval_rate, len(vocab), len(self.docvecs), len(self.labelvecs))
interval_start = default_timer()
interval_count = total_words
document_length = len(document.words)
for tag in document.tags:
self.docvecs.note_doctag(tag, document_no, document_length)
for label in document.labels:
self.labelvecs.note_doctag(label, document_no, document_length)
for word in document.words:
vocab[word] += 1
total_words += len(document.words)
if self.max_vocab_size and len(vocab) > self.max_vocab_size:
utils.prune_vocab(vocab, min_reduce, trim_rule=trim_rule)
min_reduce += 1
logger.info("collected %i word types and %i unique tags and %i unique labels from a corpus of %i examples and %i words",
len(vocab), len(self.docvecs), len(self.labelvecs), document_no + 1, total_words)
self.corpus_count = document_no + 1
self.raw_vocab = vocab
def _do_train_job(self, job, alpha, inits):
work, neu1 = inits
tally = 0
for doc in job:
indexed_doctags = self.docvecs.indexed_doctags(doc.tags)
indexed_doclabels = self.labelvecs.indexed_doctags(doc.labels)
doctag_indexes, doctag_vectors, doctag_locks, ignored = indexed_doctags
doclabel_indexes, doclabel_vectors, doclabel_locks, ignored = indexed_doclabels
if self.sg:
tally += train_label_document_dbow(self, doc.words, doctag_indexes, doclabel_indexes, alpha, work,
train_words=self.dbow_words, doctag_vectors=doctag_vectors,
doctag_locks=doctag_locks, doclabel_vectors=doclabel_vectors,
doclabel_locks=doclabel_locks)
elif self.dm_concat:
tally += train_label_document_dm_concat(self, doc.words, doctag_indexes, doclabel_indexes, alpha, work, neu1,
doctag_vectors=doctag_vectors, doctag_locks=doctag_locks,
doclabel_vectors=doclabel_vectors, doclabel_locks=doclabel_locks)
else:
tally += train_label_document_dm(self, doc.words, doctag_indexes, doclabel_indexes, alpha, work, neu1,
doctag_vectors=doctag_vectors, doctag_locks=doctag_locks,
doclabel_vectors=doclabel_vectors, doclabel_locks=doclabel_locks)
self.docvecs.trained_item(indexed_doctags)
self.labelvecs.trained_item(indexed_doclabels)
return tally, self._raw_word_count(job)
def _raw_word_count(self, job):
"""Return the number of words in a given job."""
return sum(len(sentence.words) for sentence in job)
def infer_vector_label(self, doc_words, alpha=0.1, min_alpha=0.0001, steps=5):
"""
Infer a vector for given post-bulk training document.
Document should be a list of (word) tokens.
"""
doctag_vectors = empty((1, self.vector_size), dtype=REAL)
doctag_vectors[0] = self.seeded_vector(' '.join(doc_words))
doctag_locks = ones(1, dtype=REAL)
doctag_indexes = [0]
doclabel_vectors = empty((1, self.vector_size), dtype=REAL)
doclabel_vectors[0] = self.seeded_vector(' '.join(doc_words))
doclabel_locks = ones(1, dtype=REAL)
doclabel_indexes = [0]
work = zeros(self.layer1_size, dtype=REAL)
if not self.sg:
neu1 = matutils.zeros_aligned(self.layer1_size, dtype=REAL)
for i in range(steps):
if self.sg:
train_label_document_dbow(self, doc_words, doctag_indexes, doclabel_indexes, alpha, work,
learn_words=False, learn_hidden=False,
doctag_vectors=doctag_vectors, doctag_locks=doctag_locks,
doclabel_vectors=doclabel_vectors, doclabel_locks=doclabel_locks)
elif self.dm_concat:
train_label_document_dm_concat(self, doc_words, doctag_indexes, doclabel_indexes, alpha, work, neu1,
learn_words=False, learn_hidden=False,
doctag_vectors=doctag_vectors, doctag_locks=doctag_locks,
doclabel_vectors=doclabel_vectors, doclabel_locks=doclabel_locks)
else:
train_label_document_dm(self, doc_words, doctag_indexes, doclabel_indexes, alpha, work, neu1,
learn_words=False, learn_hidden=False,
doctag_vectors=doctag_vectors, doctag_locks=doctag_locks,
doclabel_vectors=doclabel_vectors, doclabel_locks=doclabel_locks)
alpha = ((alpha - min_alpha) / (steps - i)) + min_alpha
return doctag_vectors[0]
def estimate_memory(self, vocab_size=None, report=None):
"""Estimate required memory for a model using current settings."""
report = report or {}
report['doctag_lookup'] = self.docvecs.estimated_lookup_memory()
report['doctag_syn0'] = self.docvecs.count * self.vector_size * dtype(REAL).itemsize
report['doclabel_lookup'] = self.labelvecs.estimated_lookup_memory()
report['doclabel_syn0'] = self.labelvecs.count * self.vector_size * dtype(REAL).itemsize
return super(LabelDoc2Vec, self).estimate_memory(vocab_size, report=report)
def __str__(self):
"""Abbreviated name reflecting major configuration paramaters."""
segments = []
if self.comment:
segments.append('"%s"' % self.comment)
if self.sg:
if self.dbow_words:
segments.append('dbow+w') # also training words
else:
segments.append('dbow') # PV-DBOW (skip-gram-style)
else: # PV-DM...
if self.dm_concat:
segments.append('dm/c') # ...with concatenative context layer
else:
if self.cbow_mean:
segments.append('dm/m')
else:
segments.append('dm/s')
segments.append('d%d' % self.vector_size) # dimensions
if self.negative:
segments.append('n%d' % self.negative) # negative samples
if self.hs:
segments.append('hs')
if not self.sg or (self.sg and self.dbow_words):
segments.append('w%d' % self.window) # window size, when relevant
if self.min_count > 1:
segments.append('mc%d' % self.min_count)
if self.sample > 0:
segments.append('s%g' % self.sample)
if self.workers > 1:
segments.append('t%d' % self.workers)
return '%s(%s)' % (self.__class__.__name__, ','.join(segments))
class TaggedBrownCorpus(object):
"""Iterate over documents from the Brown corpus (part of NLTK data), yielding
each document out as a LabeledTaggedDocument object."""
def __init__(self, dirname):
self.dirname = dirname
def __iter__(self):
for fname in os.listdir(self.dirname):
fname = os.path.join(self.dirname, fname)
if not os.path.isfile(fname):
continue
for item_no, line in enumerate(utils.smart_open(fname)):
line = utils.to_unicode(line)
# each file line is a single document in the Brown corpus
# each token is WORD/POS_TAG
token_tags = [t.split('/') for t in line.split() if len(t.split('/')) == 2]
# ignore words with non-alphabetic tags like ",", "!" etc (punctuation, weird stuff)
words = ["%s/%s" % (token.lower(), tag[:2]) for token, tag in token_tags if tag[:2].isalpha()]
if not words: # don't bother sending out empty documents
continue
yield LabeledTaggedDocument(words, ['%s_SENT_%s' % (fname, item_no)], [])
class TaggedLineDocument(object):
"""Simple format: one document = one line = one LabeledTaggedDocument object.
Words are expected to be already preprocessed and separated by whitespace,
tags are constructed automatically from the document line number."""
def __init__(self, source):
"""
`source` can be either a string (filename) or a file object.
Example::
documents = TaggedLineDocument('myfile.txt')
Or for compressed files::
documents = TaggedLineDocument('compressed_text.txt.bz2')
documents = TaggedLineDocument('compressed_text.txt.gz')
"""
self.source = source
def __iter__(self):
"""Iterate through the lines in the source."""
try:
# Assume it is a file-like object and try treating it as such
# Things that don't have seek will trigger an exception
self.source.seek(0)
for item_no, line in enumerate(self.source):
yield LabeledTaggedDocument(utils.to_unicode(line).split(), [item_no], [])
except AttributeError:
# If it didn't work like a file, use it as a string filename
with utils.smart_open(self.source) as fin:
for item_no, line in enumerate(fin):
yield LabeledTaggedDocument(utils.to_unicode(line).split(), [item_no], [])
| lgpl-2.1 |
grahamking/goodenergy | campaign/management/commands/ge_copy_campaign.py | 1 | 4417 | """Copies the contents (indicators and actions) of one campaign into another
"""
# Copyright 2010,2011 Good Energy Research Inc. <graham@goodenergy.ca>, <jeremy@goodenergy.ca>
#
# This file is part of Good Energy.
#
# Good Energy is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Good Energy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Good Energy. If not, see <http://www.gnu.org/licenses/>.
#
# Disable the pylint check for dynamically added attributes. This happens a lot
# with Django DB model usage.
# pylint: disable-msg=E1101
# pylint: disable-msg=E1103
from django.core.management.base import BaseCommand, CommandError
from profile.models import Profile
from campaign.models import Campaign
from indicator.models import IndicatorLikert, Option
from action.models import Action
def copy_indicators(from_campaign, to_campaign):
"""Copies indicators and options from from_campaign to to_campaign"""
for indicator in IndicatorLikert.objects.filter(campaign=from_campaign):
new_indicator, is_created = IndicatorLikert.objects.get_or_create(
campaign = to_campaign,
position = indicator.position,
name = indicator.name,
question = indicator.question,
is_synthetic = indicator.is_synthetic,
description = indicator.description)
for option in indicator.option_set.all():
Option.objects.get_or_create(
indicator = new_indicator,
value = option.value,
position = option.position)
if is_created:
print('Created indicator %s' % new_indicator)
def copy_actions(from_campaign, to_campaign, action_owner):
"""Copies Actions from from_campaign to to_campaign"""
for action in from_campaign.action_set.all():
new_action, is_created = Action.objects.get_or_create(
campaign = to_campaign,
title = action.title,
description = action.description,
learn_more = action.learn_more,
created_by = action_owner)
if is_created:
print('Created action %s' % new_action)
class Command(BaseCommand):
"""Copies the contents (indicators and actions) of one campaign into another"""
option_list = BaseCommand.option_list
help = 'Copies the contents (indicators and actions) from one campaign into another'
args = '<from_campaign_id> <to_campaign_id> <action_owner_username>'
def handle(
self,
from_campaign_id=None,
to_campaign_id=None,
action_username=None,
*args,
**options):
"""Main entry point for command"""
if not from_campaign_id or not to_campaign_id or not action_username:
raise CommandError('Usage is ge_copy_campaign %s' % self.args)
try:
from_campaign = Campaign.objects.get(id=from_campaign_id)
except Campaign.DoesNotExist:
raise CommandError('FROM Campaign with id %s not found' % from_campaign_id)
try:
to_campaign = Campaign.objects.get(id=to_campaign_id)
except Campaign.DoesNotExist:
raise CommandError('TO Campaign with id %s not found' % to_campaign_id)
try:
action_user = Profile.objects.get(user__username=action_username)
except Profile.DoesNotExist:
raise CommandError("Profile for username %s not found" % action_username)
print('Copying contents of {from_c} into {to_c}.'.\
format(from_c=from_campaign, to_c = to_campaign))
confirm = raw_input('Continue? [y|n]')
if confirm != 'y':
raise CommandError('Abort')
copy_indicators(from_campaign, to_campaign)
copy_actions(from_campaign, to_campaign, action_user)
| agpl-3.0 |
rosudrag/Freemium-winner | VirtualEnvironment/Lib/site-packages/sqlparse/filters.py | 9 | 24242 | # -*- coding: utf-8 -*-
import re
from os.path import abspath, join
from sqlparse import sql, tokens as T
from sqlparse.engine import FilterStack
from sqlparse.lexer import tokenize
from sqlparse.pipeline import Pipeline
from sqlparse.tokens import (Comment, Comparison, Keyword, Name, Punctuation,
String, Whitespace)
from sqlparse.utils import memoize_generator
from sqlparse.utils import split_unquoted_newlines
# --------------------------
# token process
class _CaseFilter:
ttype = None
def __init__(self, case=None):
if case is None:
case = 'upper'
assert case in ['lower', 'upper', 'capitalize']
self.convert = getattr(str, case)
def process(self, stack, stream):
for ttype, value in stream:
if ttype in self.ttype:
value = self.convert(value)
yield ttype, value
class KeywordCaseFilter(_CaseFilter):
ttype = T.Keyword
class IdentifierCaseFilter(_CaseFilter):
ttype = (T.Name, T.String.Symbol)
def process(self, stack, stream):
for ttype, value in stream:
if ttype in self.ttype and not value.strip()[0] == '"':
value = self.convert(value)
yield ttype, value
class TruncateStringFilter:
def __init__(self, width, char):
self.width = max(width, 1)
self.char = str(char)
def process(self, stack, stream):
for ttype, value in stream:
if ttype is T.Literal.String.Single:
if value[:2] == '\'\'':
inner = value[2:-2]
quote = '\'\''
else:
inner = value[1:-1]
quote = '\''
if len(inner) > self.width:
value = ''.join((quote, inner[:self.width], self.char,
quote))
yield ttype, value
class GetComments:
"""Get the comments from a stack"""
def process(self, stack, stream):
for token_type, value in stream:
if token_type in Comment:
yield token_type, value
class StripComments:
"""Strip the comments from a stack"""
def process(self, stack, stream):
for token_type, value in stream:
if token_type not in Comment:
yield token_type, value
def StripWhitespace(stream):
"Strip the useless whitespaces from a stream leaving only the minimal ones"
last_type = None
has_space = False
ignore_group = frozenset((Comparison, Punctuation))
for token_type, value in stream:
# We got a previous token (not empty first ones)
if last_type:
if token_type in Whitespace:
has_space = True
continue
# Ignore first empty spaces and dot-commas
elif token_type in (Whitespace, Whitespace.Newline, ignore_group):
continue
# Yield a whitespace if it can't be ignored
if has_space:
if not ignore_group.intersection((last_type, token_type)):
yield Whitespace, ' '
has_space = False
# Yield the token and set its type for checking with the next one
yield token_type, value
last_type = token_type
class IncludeStatement:
"""Filter that enable a INCLUDE statement"""
def __init__(self, dirpath=".", maxrecursive=10, raiseexceptions=False):
if maxrecursive <= 0:
raise ValueError('Max recursion limit reached')
self.dirpath = abspath(dirpath)
self.maxRecursive = maxrecursive
self.raiseexceptions = raiseexceptions
self.detected = False
@memoize_generator
def process(self, stack, stream):
# Run over all tokens in the stream
for token_type, value in stream:
# INCLUDE statement found, set detected mode
if token_type in Name and value.upper() == 'INCLUDE':
self.detected = True
continue
# INCLUDE statement was found, parse it
elif self.detected:
# Omit whitespaces
if token_type in Whitespace:
continue
# Found file path to include
if token_type in String.Symbol:
# if token_type in tokens.String.Symbol:
# Get path of file to include
path = join(self.dirpath, value[1:-1])
try:
f = open(path)
raw_sql = f.read()
f.close()
# There was a problem loading the include file
except IOError as err:
# Raise the exception to the interpreter
if self.raiseexceptions:
raise
# Put the exception as a comment on the SQL code
yield Comment, '-- IOError: %s\n' % err
else:
# Create new FilterStack to parse readed file
# and add all its tokens to the main stack recursively
try:
filtr = IncludeStatement(self.dirpath,
self.maxRecursive - 1,
self.raiseexceptions)
# Max recursion limit reached
except ValueError as err:
# Raise the exception to the interpreter
if self.raiseexceptions:
raise
# Put the exception as a comment on the SQL code
yield Comment, '-- ValueError: %s\n' % err
stack = FilterStack()
stack.preprocess.append(filtr)
for tv in stack.run(raw_sql):
yield tv
# Set normal mode
self.detected = False
# Don't include any token while in detected mode
continue
# Normal token
yield token_type, value
# ----------------------
# statement process
class StripCommentsFilter:
def _get_next_comment(self, tlist):
# TODO(andi) Comment types should be unified, see related issue38
token = tlist.token_next_by_instance(0, sql.Comment)
if token is None:
token = tlist.token_next_by_type(0, T.Comment)
return token
def _process(self, tlist):
token = self._get_next_comment(tlist)
while token:
tidx = tlist.token_index(token)
prev = tlist.token_prev(tidx, False)
next_ = tlist.token_next(tidx, False)
# Replace by whitespace if prev and next exist and if they're not
# whitespaces. This doesn't apply if prev or next is a paranthesis.
if (prev is not None and next_ is not None
and not prev.is_whitespace() and not next_.is_whitespace()
and not (prev.match(T.Punctuation, '(')
or next_.match(T.Punctuation, ')'))):
tlist.tokens[tidx] = sql.Token(T.Whitespace, ' ')
else:
tlist.tokens.pop(tidx)
token = self._get_next_comment(tlist)
def process(self, stack, stmt):
[self.process(stack, sgroup) for sgroup in stmt.get_sublists()]
self._process(stmt)
class StripWhitespaceFilter:
def _stripws(self, tlist):
func_name = '_stripws_%s' % tlist.__class__.__name__.lower()
func = getattr(self, func_name, self._stripws_default)
func(tlist)
def _stripws_default(self, tlist):
last_was_ws = False
for token in tlist.tokens:
if token.is_whitespace():
if last_was_ws:
token.value = ''
else:
token.value = ' '
last_was_ws = token.is_whitespace()
def _stripws_identifierlist(self, tlist):
# Removes newlines before commas, see issue140
last_nl = None
for token in tlist.tokens[:]:
if (token.ttype is T.Punctuation
and token.value == ','
and last_nl is not None):
tlist.tokens.remove(last_nl)
if token.is_whitespace():
last_nl = token
else:
last_nl = None
return self._stripws_default(tlist)
def _stripws_parenthesis(self, tlist):
if tlist.tokens[1].is_whitespace():
tlist.tokens.pop(1)
if tlist.tokens[-2].is_whitespace():
tlist.tokens.pop(-2)
self._stripws_default(tlist)
def process(self, stack, stmt, depth=0):
[self.process(stack, sgroup, depth + 1)
for sgroup in stmt.get_sublists()]
self._stripws(stmt)
if (
depth == 0
and stmt.tokens
and stmt.tokens[-1].is_whitespace()
):
stmt.tokens.pop(-1)
class ReindentFilter:
def __init__(self, width=2, char=' ', line_width=None):
self.width = width
self.char = char
self.indent = 0
self.offset = 0
self.line_width = line_width
self._curr_stmt = None
self._last_stmt = None
def _flatten_up_to_token(self, token):
"""Yields all tokens up to token plus the next one."""
# helper for _get_offset
iterator = self._curr_stmt.flatten()
for t in iterator:
yield t
if t == token:
raise StopIteration
def _get_offset(self, token):
raw = ''.join(map(str, self._flatten_up_to_token(token)))
line = raw.splitlines()[-1]
# Now take current offset into account and return relative offset.
full_offset = len(line) - len(self.char * (self.width * self.indent))
return full_offset - self.offset
def nl(self):
# TODO: newline character should be configurable
space = (self.char * ((self.indent * self.width) + self.offset))
# Detect runaway indenting due to parsing errors
if len(space) > 200:
# something seems to be wrong, flip back
self.indent = self.offset = 0
space = (self.char * ((self.indent * self.width) + self.offset))
ws = '\n' + space
return sql.Token(T.Whitespace, ws)
def _split_kwds(self, tlist):
split_words = ('FROM', 'STRAIGHT_JOIN$', 'JOIN$', 'AND', 'OR',
'GROUP', 'ORDER', 'UNION', 'VALUES',
'SET', 'BETWEEN', 'EXCEPT', 'HAVING')
def _next_token(i):
t = tlist.token_next_match(i, T.Keyword, split_words,
regex=True)
if t and t.value.upper() == 'BETWEEN':
t = _next_token(tlist.token_index(t) + 1)
if t and t.value.upper() == 'AND':
t = _next_token(tlist.token_index(t) + 1)
return t
idx = 0
token = _next_token(idx)
added = set()
while token:
prev = tlist.token_prev(tlist.token_index(token), False)
offset = 1
if prev and prev.is_whitespace() and prev not in added:
tlist.tokens.pop(tlist.token_index(prev))
offset += 1
uprev = str(prev)
if (prev and (uprev.endswith('\n') or uprev.endswith('\r'))):
nl = tlist.token_next(token)
else:
nl = self.nl()
added.add(nl)
tlist.insert_before(token, nl)
offset += 1
token = _next_token(tlist.token_index(nl) + offset)
def _split_statements(self, tlist):
idx = 0
token = tlist.token_next_by_type(idx, (T.Keyword.DDL, T.Keyword.DML))
while token:
prev = tlist.token_prev(tlist.token_index(token), False)
if prev and prev.is_whitespace():
tlist.tokens.pop(tlist.token_index(prev))
# only break if it's not the first token
if prev:
nl = self.nl()
tlist.insert_before(token, nl)
token = tlist.token_next_by_type(tlist.token_index(token) + 1,
(T.Keyword.DDL, T.Keyword.DML))
def _process(self, tlist):
func_name = '_process_%s' % tlist.__class__.__name__.lower()
func = getattr(self, func_name, self._process_default)
func(tlist)
def _process_where(self, tlist):
token = tlist.token_next_match(0, T.Keyword, 'WHERE')
try:
tlist.insert_before(token, self.nl())
except ValueError: # issue121, errors in statement
pass
self.indent += 1
self._process_default(tlist)
self.indent -= 1
def _process_having(self, tlist):
token = tlist.token_next_match(0, T.Keyword, 'HAVING')
try:
tlist.insert_before(token, self.nl())
except ValueError: # issue121, errors in statement
pass
self.indent += 1
self._process_default(tlist)
self.indent -= 1
def _process_parenthesis(self, tlist):
first = tlist.token_next(0)
indented = False
if first and first.ttype in (T.Keyword.DML, T.Keyword.DDL):
self.indent += 1
tlist.tokens.insert(0, self.nl())
indented = True
num_offset = self._get_offset(
tlist.token_next_match(0, T.Punctuation, '('))
self.offset += num_offset
self._process_default(tlist, stmts=not indented)
if indented:
self.indent -= 1
self.offset -= num_offset
def _process_identifierlist(self, tlist):
identifiers = list(tlist.get_identifiers())
if len(identifiers) > 1 and not tlist.within(sql.Function):
first = list(identifiers[0].flatten())[0]
if self.char == '\t':
# when using tabs we don't count the actual word length
# in spaces.
num_offset = 1
else:
num_offset = self._get_offset(first) - len(first.value)
self.offset += num_offset
for token in identifiers[1:]:
tlist.insert_before(token, self.nl())
self.offset -= num_offset
self._process_default(tlist)
def _process_case(self, tlist):
is_first = True
num_offset = None
case = tlist.tokens[0]
outer_offset = self._get_offset(case) - len(case.value)
self.offset += outer_offset
for cond, value in tlist.get_cases():
if is_first:
tcond = list(cond[0].flatten())[0]
is_first = False
num_offset = self._get_offset(tcond) - len(tcond.value)
self.offset += num_offset
continue
if cond is None:
token = value[0]
else:
token = cond[0]
tlist.insert_before(token, self.nl())
# Line breaks on group level are done. Now let's add an offset of
# 5 (=length of "when", "then", "else") and process subgroups.
self.offset += 5
self._process_default(tlist)
self.offset -= 5
if num_offset is not None:
self.offset -= num_offset
end = tlist.token_next_match(0, T.Keyword, 'END')
tlist.insert_before(end, self.nl())
self.offset -= outer_offset
def _process_default(self, tlist, stmts=True, kwds=True):
if stmts:
self._split_statements(tlist)
if kwds:
self._split_kwds(tlist)
[self._process(sgroup) for sgroup in tlist.get_sublists()]
def process(self, stack, stmt):
if isinstance(stmt, sql.Statement):
self._curr_stmt = stmt
self._process(stmt)
if isinstance(stmt, sql.Statement):
if self._last_stmt is not None:
if str(self._last_stmt).endswith('\n'):
nl = '\n'
else:
nl = '\n\n'
stmt.tokens.insert(
0, sql.Token(T.Whitespace, nl))
if self._last_stmt != stmt:
self._last_stmt = stmt
# FIXME: Doesn't work ;)
class RightMarginFilter:
keep_together = (
# sql.TypeCast, sql.Identifier, sql.Alias,
)
def __init__(self, width=79):
self.width = width
self.line = ''
def _process(self, stack, group, stream):
for token in stream:
if token.is_whitespace() and '\n' in token.value:
if token.value.endswith('\n'):
self.line = ''
else:
self.line = token.value.splitlines()[-1]
elif (token.is_group()
and not token.__class__ in self.keep_together):
token.tokens = self._process(stack, token, token.tokens)
else:
val = str(token)
if len(self.line) + len(val) > self.width:
match = re.search('^ +', self.line)
if match is not None:
indent = match.group()
else:
indent = ''
yield sql.Token(T.Whitespace, '\n%s' % indent)
self.line = indent
self.line += val
yield token
def process(self, stack, group):
return
group.tokens = self._process(stack, group, group.tokens)
class ColumnsSelect:
"""Get the columns names of a SELECT query"""
def process(self, stack, stream):
mode = 0
oldValue = ""
parenthesis = 0
for token_type, value in stream:
# Ignore comments
if token_type in Comment:
continue
# We have not detected a SELECT statement
if mode == 0:
if token_type in Keyword and value == 'SELECT':
mode = 1
# We have detected a SELECT statement
elif mode == 1:
if value == 'FROM':
if oldValue:
yield oldValue
mode = 3 # Columns have been checked
elif value == 'AS':
oldValue = ""
mode = 2
elif (token_type == Punctuation
and value == ',' and not parenthesis):
if oldValue:
yield oldValue
oldValue = ""
elif token_type not in Whitespace:
if value == '(':
parenthesis += 1
elif value == ')':
parenthesis -= 1
oldValue += value
# We are processing an AS keyword
elif mode == 2:
# We check also for Keywords because a bug in SQLParse
if token_type == Name or token_type == Keyword:
yield value
mode = 1
# ---------------------------
# postprocess
class SerializerUnicode:
def process(self, stack, stmt):
raw = str(stmt)
lines = split_unquoted_newlines(raw)
res = '\n'.join(line.rstrip() for line in lines)
return res
def Tokens2Unicode(stream):
result = ""
for _, value in stream:
result += str(value)
return result
class OutputFilter:
varname_prefix = ''
def __init__(self, varname='sql'):
self.varname = self.varname_prefix + varname
self.count = 0
def _process(self, stream, varname, has_nl):
raise NotImplementedError
def process(self, stack, stmt):
self.count += 1
if self.count > 1:
varname = '%s%d' % (self.varname, self.count)
else:
varname = self.varname
has_nl = len(str(stmt).strip().splitlines()) > 1
stmt.tokens = self._process(stmt.tokens, varname, has_nl)
return stmt
class OutputPythonFilter(OutputFilter):
def _process(self, stream, varname, has_nl):
# SQL query asignation to varname
if self.count > 1:
yield sql.Token(T.Whitespace, '\n')
yield sql.Token(T.Name, varname)
yield sql.Token(T.Whitespace, ' ')
yield sql.Token(T.Operator, '=')
yield sql.Token(T.Whitespace, ' ')
if has_nl:
yield sql.Token(T.Operator, '(')
yield sql.Token(T.Text, "'")
# Print the tokens on the quote
for token in stream:
# Token is a new line separator
if token.is_whitespace() and '\n' in token.value:
# Close quote and add a new line
yield sql.Token(T.Text, " '")
yield sql.Token(T.Whitespace, '\n')
# Quote header on secondary lines
yield sql.Token(T.Whitespace, ' ' * (len(varname) + 4))
yield sql.Token(T.Text, "'")
# Indentation
after_lb = token.value.split('\n', 1)[1]
if after_lb:
yield sql.Token(T.Whitespace, after_lb)
continue
# Token has escape chars
elif "'" in token.value:
token.value = token.value.replace("'", "\\'")
# Put the token
yield sql.Token(T.Text, token.value)
# Close quote
yield sql.Token(T.Text, "'")
if has_nl:
yield sql.Token(T.Operator, ')')
class OutputPHPFilter(OutputFilter):
varname_prefix = '$'
def _process(self, stream, varname, has_nl):
# SQL query asignation to varname (quote header)
if self.count > 1:
yield sql.Token(T.Whitespace, '\n')
yield sql.Token(T.Name, varname)
yield sql.Token(T.Whitespace, ' ')
if has_nl:
yield sql.Token(T.Whitespace, ' ')
yield sql.Token(T.Operator, '=')
yield sql.Token(T.Whitespace, ' ')
yield sql.Token(T.Text, '"')
# Print the tokens on the quote
for token in stream:
# Token is a new line separator
if token.is_whitespace() and '\n' in token.value:
# Close quote and add a new line
yield sql.Token(T.Text, ' ";')
yield sql.Token(T.Whitespace, '\n')
# Quote header on secondary lines
yield sql.Token(T.Name, varname)
yield sql.Token(T.Whitespace, ' ')
yield sql.Token(T.Operator, '.=')
yield sql.Token(T.Whitespace, ' ')
yield sql.Token(T.Text, '"')
# Indentation
after_lb = token.value.split('\n', 1)[1]
if after_lb:
yield sql.Token(T.Whitespace, after_lb)
continue
# Token has escape chars
elif '"' in token.value:
token.value = token.value.replace('"', '\\"')
# Put the token
yield sql.Token(T.Text, token.value)
# Close quote
yield sql.Token(T.Text, '"')
yield sql.Token(T.Punctuation, ';')
class Limit:
"""Get the LIMIT of a query.
If not defined, return -1 (SQL specification for no LIMIT query)
"""
def process(self, stack, stream):
index = 7
stream = list(stream)
stream.reverse()
# Run over all tokens in the stream from the end
for token_type, value in stream:
index -= 1
# if index and token_type in Keyword:
if index and token_type in Keyword and value == 'LIMIT':
return stream[4 - index][1]
return -1
def compact(stream):
"""Function that return a compacted version of the stream"""
pipe = Pipeline()
pipe.append(StripComments())
pipe.append(StripWhitespace)
return pipe(stream)
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.