repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
benreynwar/pyvivado | pyvivado/vivado_task.py | 1 | 3244 | import logging
import os
import subprocess
import warnings
from pyvivado import task, config
logger = logging.getLogger(__name__)
class VivadoTask(task.Task):
'''
A python wrapper to keep track of a spawned Vivado process.
'''
# How to log different kinds of Vivado messages.
MESSAGE_MAPPING = {
'DEBUG': logger.debug,
'INFO': logger.debug,
'WARNING': logger.warning,
'CRITICAL WARNING': logger.error,
'ERROR': logger.error,
'FATAL_ERROR': logger.error,
# This is a hack to get 'assert's in the HDL with severity 'Failure'
# to log an error message.
'Failure': logger.error,
}
DEFAULT_FAILURE_MESSAGE_TYPES = (
'CRITICAL_WARNING', 'ERROR', 'FATAL_ERROR', 'Failure')
@classmethod
def create(cls, collection, command_text, description=None):
'''
Create the files necessary for the Vivado process.
Args:
collection: A TasksCollection in which the task is to be added.
command_text: The TCL command we will execute.
description: A description of this task.
'''
# Generate the TCL script that this Vivado process will run.
command_template_fn = os.path.join(config.tcldir, 'vivado_task.tcl.t')
with open(command_template_fn, 'r') as f:
command_template = f.read()
command = command_template.format(
tcl_directory=config.tcldir,
command=command_text
)
# Make parent directory absolute
logger.debug('Creating a new VivadoTask in directory {}'.format(
collection.directory))
logger.debug('Command is {}'.format(command_text))
t = super().create(collection=collection, description=description)
# Create the command file.
command_fn = os.path.join(t.directory, 'command.tcl')
with open(command_fn, 'w') as f:
f.write(command)
return t
def __init__(self, directory):
super().__init__(directory=directory)
def run(self):
'''
Spawn the process that will run the vivado process.
'''
cwd = os.getcwd()
os.chdir(self.directory)
stdout_fn = 'stdout.txt'
stderr_fn = 'stderr.txt'
command_fn = 'command.tcl'
if os.name == 'nt':
commands = [config.vivado, '-log', stdout_fn, '-mode', 'batch',
'-source', command_fn]
logger.debug('running vivado task in directory {}'.format(self.directory))
logger.debug('command is {}'.format(' '.join(commands)))
self.process = subprocess.Popen(
commands,
# So that process stays alive when terminal is closed
# in Windows.
# Commented out because doesn't seem to be working now.
creationflags=subprocess.CREATE_NEW_CONSOLE,
)
logger.debug('started process')
else:
commands = [config.vivado, '-mode', 'batch', '-source',
command_fn]
self.launch_unix_subprocess(
commands, stdout_fn=stdout_fn, stderr_fn=stderr_fn)
os.chdir(cwd)
| mit |
abhattad4/Digi-Menu | django/utils/synch.py | 586 | 2558 | """
Synchronization primitives:
- reader-writer lock (preference to writers)
(Contributed to Django by eugene@lazutkin.com)
"""
import contextlib
import threading
class RWLock(object):
"""
Classic implementation of reader-writer lock with preference to writers.
Readers can access a resource simultaneously.
Writers get an exclusive access.
API is self-descriptive:
reader_enters()
reader_leaves()
writer_enters()
writer_leaves()
"""
def __init__(self):
self.mutex = threading.RLock()
self.can_read = threading.Semaphore(0)
self.can_write = threading.Semaphore(0)
self.active_readers = 0
self.active_writers = 0
self.waiting_readers = 0
self.waiting_writers = 0
def reader_enters(self):
with self.mutex:
if self.active_writers == 0 and self.waiting_writers == 0:
self.active_readers += 1
self.can_read.release()
else:
self.waiting_readers += 1
self.can_read.acquire()
def reader_leaves(self):
with self.mutex:
self.active_readers -= 1
if self.active_readers == 0 and self.waiting_writers != 0:
self.active_writers += 1
self.waiting_writers -= 1
self.can_write.release()
@contextlib.contextmanager
def reader(self):
self.reader_enters()
try:
yield
finally:
self.reader_leaves()
def writer_enters(self):
with self.mutex:
if self.active_writers == 0 and self.waiting_writers == 0 and self.active_readers == 0:
self.active_writers += 1
self.can_write.release()
else:
self.waiting_writers += 1
self.can_write.acquire()
def writer_leaves(self):
with self.mutex:
self.active_writers -= 1
if self.waiting_writers != 0:
self.active_writers += 1
self.waiting_writers -= 1
self.can_write.release()
elif self.waiting_readers != 0:
t = self.waiting_readers
self.waiting_readers = 0
self.active_readers += t
while t > 0:
self.can_read.release()
t -= 1
@contextlib.contextmanager
def writer(self):
self.writer_enters()
try:
yield
finally:
self.writer_leaves()
| bsd-3-clause |
efktr/data | sider.py | 1 | 4731 | # Import data from SIDER
# Current url for latest: ftp://xi.embl.de/SIDER/latest/
from __future__ import print_function
from ftplib import FTP
import gzip
import os
import csv
import json
source_domain = 'xi.embl.de'
source_folder = '/SIDER/latest/'
temp_folder = './temp'
data_folder = './data'
scope_name = 'sider'
if not os.path.isdir(temp_folder):
os.makedirs(temp_folder)
# Utils
def stitch_flat_to_pubchem(cid):
assert cid.startswith('CID')
return int(cid[3:]) - 1e8
def stitch_stereo_to_pubchem(cid):
assert cid.startswith('CID')
return int(cid[3:])
# END Utils
if not os.path.isfile(os.path.join(temp_folder, scope_name, "meddra_freq.tsv.gz")):
print("Connecting to FTP server and collecting data...")
ftp = FTP(source_domain)
ftp.login('anonymous', 'anonymous')
ftp.cwd(source_folder)
filenames = ftp.nlst()
if not os.path.isdir(os.path.join(temp_folder, scope_name)):
os.makedirs(os.path.join(temp_folder, scope_name))
for filename in filenames:
local_filename = os.path.join(temp_folder, scope_name, filename)
if not filename.startswith("."):
print("Writing file", filename)
current = open(local_filename, 'wb')
ftp.retrbinary('RETR ' + filename, current.write)
current.close()
ftp.quit()
print("Unzipping file ...")
with gzip.open(os.path.join(temp_folder, scope_name, "meddra_freq.tsv.gz"), 'rb') as f:
sider_freq_content = f.read()
f.close()
print("Reading data ...")
stitch_to_umls = {}
umls_dictionary = set()
reader = csv.DictReader(sider_freq_content.split("\n"), delimiter='\t',
fieldnames=[
'stitch_id_flat',
'stitch_id_stereo',
'umls_cui_from_label',
'placebo',
'frequency',
'lower',
'upper',
'meddra_type',
'umls_cui_from_meddra',
'side_effect_name'])
for row in reader:
umls_cui = row["umls_cui_from_meddra"] if row["umls_cui_from_meddra"] is not None and row["umls_cui_from_meddra"] != '' else row["umls_cui_from_label"]
if umls_cui is not None and umls_cui != '':
umls_dictionary.add((umls_cui, row['side_effect_name']))
if not row["stitch_id_stereo"] in stitch_to_umls:
stitch_to_umls[row["stitch_id_stereo"]] = {
"stitchId": row["stitch_id_stereo"],
"stitchIdFlat": row["stitch_id_flat"],
"pubChemId": stitch_stereo_to_pubchem(row["stitch_id_stereo"]),
"adverseReactions": {
umls_cui: {
"umlsId": umls_cui,
"lower": float(row["lower"]),
"upper": float(row["upper"]),
"count": 1.0
}
}
}
else:
if not umls_cui in stitch_to_umls[row["stitch_id_stereo"]]['adverseReactions']:
stitch_to_umls[row["stitch_id_stereo"]]['adverseReactions'][umls_cui] = {
"umlsId": umls_cui,
"lower": float(row["lower"]),
"upper": float(row["upper"]),
"count": 1.0
}
else:
stitch_to_umls[row["stitch_id_stereo"]]['adverseReactions'][umls_cui]["count"] += 1.0
current = stitch_to_umls[row["stitch_id_stereo"]]['adverseReactions'][umls_cui]
stitch_to_umls[row["stitch_id_stereo"]]['adverseReactions'][umls_cui]['lower'] = current['lower'] + (1 / current['count']) * (float(row["lower"]) - current['lower'])
stitch_to_umls[row["stitch_id_stereo"]]['adverseReactions'][umls_cui]['upper'] = current['upper'] + (1 / current['count']) * (float(row["upper"]) - current['upper'])
else:
print("Lost one dictionary item")
print("Writing data ...")
if not os.path.isdir(data_folder):
os.makedirs(data_folder)
if not os.path.isdir(os.path.join(data_folder, scope_name)):
os.makedirs(os.path.join(data_folder, scope_name))
with open(os.path.join(data_folder, scope_name, "umlsDictionary.json"), 'wb') as out:
out.write(json.dumps(list([{"umlsId": e[0], "name": e[1]} for e in umls_dictionary])))
out.close()
with open(os.path.join(data_folder, scope_name, "stitchToUmls.json"), 'wb') as out:
result = stitch_to_umls.values()
for e in result:
e['adverseReactions'] = e['adverseReactions'].values()
out.write(json.dumps(result))
out.close()
print("Done.")
quit() | gpl-3.0 |
SYSTRAN/geographic-api-python-client | systran_geographic_api/models/__init__.py | 1 | 2228 | """
Copyright 2015 SYSTRAN Software, Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
# import models into model package
from .boundaries import Boundaries
from .full_position import FullPosition
from .position import Position
from .poi_address_components import POIAddressComponents
from .poi_address import POIAddress
from .address_components import AddressComponents
from .address import Address
from .lite_location import LiteLocation
from .full_poi_location import FullPOILocation
from .full_location import FullLocation
from .opening_dates import OpeningDates
from .phone_number import PhoneNumber
from .mail import Mail
from .contact import Contact
from .opening_hours import OpeningHours
from .photo import Photo
from .video import Video
from .description import Description
from .review import Review
from .booking import Booking
from .lite_poi import LitePOI
from .full_poi import FullPOI
from .destination import Destination
from .full_destination import FullDestination
from .chapter import Chapter
from .inspiration import Inspiration
from .full_inspiration import FullInspiration
from .poi_response import PoiResponse
from .destination_response import DestinationResponse
from .inspiration_response import InspirationResponse
from .poi_details_response import PoiDetailsResponse
from .poi_types_response import PoiTypesResponse
from .destination_details_response import DestinationDetailsResponse
from .inspiration_details_response import InspirationDetailsResponse
from .supported_languages_response import SupportedLanguagesResponse
from .api_version_response import ApiVersionResponse
from .error_response import ErrorResponse
| apache-2.0 |
vrieni/orange | Orange/OrangeCanvas/orngTabs.py | 6 | 32744 | # Author: Gregor Leban (gregor.leban@fri.uni-lj.si)
# Description:
# tab for showing widgets and widget button class
#
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import os.path, sys
from string import strip, count, replace
import orngDoc, orngOutput, orngRegistry, orngSignalManager
from orngSignalManager import InputSignal, OutputSignal, resolveSignal
import OWGUIEx
import orngHelp
WB_TOOLBOX = 0
WB_TREEVIEW = 1
WB_TREEVIEW_NO_ICONS = 2
WB_TABBAR_NO_TEXT = 3
WB_TABBAR_TEXT = 4
# we have to use a custom class since QLabel by default ignores the mouse
# events if it is showing text (it does not ignore events if it's showing an icon)
class OrangeLabel(QLabel):
def mousePressEvent(self, e):
pos = self.mapToParent(e.pos())
ev = QMouseEvent(e.type(), pos, e.button(), e.buttons(), e.modifiers())
self.parent().mousePressEvent(ev)
def mouseMoveEvent(self, e):
pos = self.mapToParent(e.pos())
ev = QMouseEvent(e.type(), pos, e.button(), e.buttons(), e.modifiers())
self.parent().mouseMoveEvent(ev)
def mouseReleaseEvent(self, e):
pos = self.mapToParent(e.pos())
ev = QMouseEvent(e.type(), pos, e.button(), e.buttons(), e.modifiers())
self.parent().mouseReleaseEvent(ev)
class WidgetButtonBase():
def __init__(self, name, widgetInfo, widgetTabs, canvasDlg):
self.shiftPressed = 0
self.ctrlPressed = 0
self.name = name
self.widgetInfo = widgetInfo
self.widgetTabs = widgetTabs
self.canvasDlg = canvasDlg
def clicked(self, rightClick = False, pos = None):
if self.ctrlPressed:
qApp.canvasDlg.helpWindow.showHelpFor(self.widgetInfo, False)
return
win = self.canvasDlg.schema
if pos:
pos = win.mapFromGlobal(pos)
win.addWidget(self.widgetInfo, pos.x(), pos.y())
else:
win.addWidget(self.widgetInfo)
if (rightClick or self.shiftPressed):
import orngCanvasItems
if isinstance(rightClick, orngCanvasItems.CanvasWidget):
win.addLine(rightClick, win.widgets[-1])
elif len(win.widgets) > 1:
win.addLine(win.widgets[-2], win.widgets[-1])
#return win.widgets[-1]
class WidgetButton(QFrame, WidgetButtonBase):
def __init__(self, tab, name, widgetInfo, widgetTabs, canvasDlg, buttonType = 2, size=30):
QFrame.__init__(self)
WidgetButtonBase.__init__(self, name, widgetInfo, widgetTabs, canvasDlg)
self.buttonType = buttonType
self.iconSize = size
self.setLayout(buttonType == WB_TOOLBOX and QHBoxLayout() or QVBoxLayout())
self.pixmapWidget = QLabel(self)
self.textWidget = OrangeLabel(self)
if buttonType == WB_TABBAR_NO_TEXT:
self.textWidget.hide()
self.layout().setMargin(3)
if buttonType != WB_TOOLBOX:
self.layout().setSpacing(0)
self.icon = canvasDlg.getWidgetIcon(widgetInfo)
self.pixmapWidget.setPixmap(self.icon.pixmap(self.iconSize, self.iconSize))
self.pixmapWidget.setScaledContents(1)
self.pixmapWidget.setFixedSize(QSize(self.iconSize, self.iconSize))
#split long names into two lines
buttonName = name
if self.buttonType == WB_TABBAR_TEXT:
numSpaces = count(buttonName, " ")
if numSpaces == 1: buttonName = replace(buttonName, " ", "<br>")
elif numSpaces > 1:
mid = len(buttonName)/2; i = 0
found = 0
while "<br>" not in buttonName:
if buttonName[mid + i] == " ": buttonName = buttonName[:mid + i] + "<br>" + buttonName[(mid + i + 1):]
elif buttonName[mid - i] == " ": buttonName = buttonName[:mid - i] + "<br>" + buttonName[(mid - i + 1):]
i+=1
else:
buttonName += "<br>"
self.layout().addWidget(self.pixmapWidget)
self.layout().addWidget(self.textWidget)
if self.buttonType != WB_TOOLBOX:
self.textWidget.setText("<div align=\"center\">" + buttonName + "</div>")
self.layout().setAlignment(self.pixmapWidget, Qt.AlignHCenter)
self.layout().setAlignment(self.textWidget, Qt.AlignHCenter)
else:
self.textWidget.setText(name)
self.setToolTip(widgetInfo.tooltipText)
# we need to handle context menu event, otherwise we get a popup when pressing the right button on one of the icons
def contextMenuEvent(self, ev):
ev.accept()
def mouseMoveEvent(self, e):
### Semaphore "busy" is needed for some widgets whose loading takes more time, e.g. Select Data
### Since the active window cannot change during dragging, we wouldn't have to remember the window; but let's leave the code in, it can't hurt
schema = self.canvasDlg.schema
if hasattr(self, "busy"):
return
self.busy = 1
inside = schema.canvasView.rect().contains(schema.canvasView.mapFromGlobal(self.mapToGlobal(e.pos())) - QPoint(50,50))
p = QPointF(schema.canvasView.mapFromGlobal(self.mapToGlobal(e.pos()))) + QPointF(schema.canvasView.mapToScene(QPoint(0, 0)))
dinwin, widget = getattr(self, "widgetDragging", (None, None))
if dinwin and (dinwin != schema or not inside):
dinwin.removeWidget(widget)
delattr(self, "widgetDragging")
#dinwin.canvasView.scene().update()
if inside:
if not widget:
widget = schema.addWidget(self.widgetInfo, p.x() - 24, p.y() - 24)
self.widgetDragging = schema, widget
# in case we got an exception when creating a widget instance
if widget == None:
delattr(self, "busy")
return
widget.setCoords(p.x() - widget.rect().width()/2, p.y() - widget.rect().height()/2)
import orngCanvasItems
items = schema.canvas.collidingItems(widget)
widget.invalidPosition = widget.selected = (schema.canvasView.findItemTypeCount(items, orngCanvasItems.CanvasWidget) > 0)
delattr(self, "busy")
def mousePressEvent(self, e):
self.setFrameShape(QFrame.StyledPanel)
self.layout().setMargin(self.layout().margin()-2)
def mouseReleaseEvent(self, e):
self.layout().setMargin(self.layout().margin()+2)
self.setFrameShape(QFrame.NoFrame)
dinwin, widget = getattr(self, "widgetDragging", (None, None))
self.shiftPressed = e.modifiers() & Qt.ShiftModifier
self.ctrlPressed = e.modifiers() & Qt.ControlModifier
if widget:
if widget.invalidPosition:
dinwin.removeWidget(widget)
# dinwin.canvasView.scene().update()
elif self.shiftPressed and len(dinwin.widgets) > 1:
dinwin.addLine(dinwin.widgets[-2], dinwin.widgets[-1])
delattr(self, "widgetDragging")
# we say that we clicked the button only if we released the mouse inside the button
if e.pos().x() >= 0 and e.pos().x() < self.width() and e.pos().y() > 0 and e.pos().y() < self.height():
self.clicked(e.button() == Qt.RightButton)
def wheelEvent(self, ev):
if self.parent() and self.buttonType != WB_TOOLBOX:
hs = self.parent().tab.horizontalScrollBar()
hs.setValue(min(max(hs.minimum(), hs.value()-ev.delta()), hs.maximum()))
else:
QFrame.wheelEvent(self, ev)
class WidgetTreeItem(QTreeWidgetItem, WidgetButtonBase):
def __init__(self, parent, name, widgetInfo, tabs, canvasDlg, wbType=1):
QTreeWidgetItem.__init__(self, parent)
WidgetButtonBase.__init__(self, name, widgetInfo, tabs, canvasDlg)
if wbType == WB_TREEVIEW:
self.setIcon(0, canvasDlg.getWidgetIcon(widgetInfo))
self.setText(0, name)
self.setToolTip(0, widgetInfo.tooltipText)
def adjustSize(self):
pass
class MyTreeWidget(QTreeWidget):
def __init__(self, canvasDlg, parent = None):
QTreeWidget.__init__(self, parent)
self.canvasDlg = canvasDlg
self.setMouseTracking(1)
self.setHeaderHidden(1)
self.mousePressed = 0
self.mouseRightClick = 0
self.connect(self, SIGNAL("itemClicked (QTreeWidgetItem *,int)"), self.itemClicked)
self.setStyleSheet(""" QTreeView::item {padding: 2px 0px 2px 0px} """) # show items a little bit apart from each other
def mouseMoveEvent(self, e):
if not self.mousePressed: # this is needed, otherwise another widget in the tree might get selected while we drag the icon to the canvas
QTreeWidget.mouseMoveEvent(self, e)
### Semaphore "busy" is needed for some widgets whose loading takes more time, e.g. Select Data
### Since the active window cannot change during dragging, we wouldn't have to remember the window; but let's leave the code in, it can't hurt
schema = self.canvasDlg.schema
if hasattr(self, "busy"):
return
self.busy = 1
inside = schema.canvasView.rect().contains(schema.canvasView.mapFromGlobal(self.mapToGlobal(e.pos())) - QPoint(50,50))
p = QPointF(schema.canvasView.mapFromGlobal(self.mapToGlobal(e.pos()))) + QPointF(schema.canvasView.mapToScene(QPoint(0, 0)))
dinwin, widget = getattr(self, "widgetDragging", (None, None))
if dinwin and (dinwin != schema or not inside):
dinwin.removeWidget(widget)
delattr(self, "widgetDragging")
# dinwin.canvasView.scene().update()
if inside:
if not widget and self.selectedItems() != [] and isinstance(self.selectedItems()[0], WidgetTreeItem):
widget = schema.addWidget(self.selectedItems()[0].widgetInfo, p.x() - 24, p.y() - 24)
self.widgetDragging = schema, widget
# in case we got an exception when creating a widget instance
if widget == None:
delattr(self, "busy")
return
widget.setCoords(p.x() - widget.rect().width()/2, p.y() - widget.rect().height()/2)
# schema.canvasView.scene().update()
import orngCanvasItems
items = schema.canvas.collidingItems(widget)
widget.invalidPosition = widget.selected = (schema.canvasView.findItemTypeCount(items, orngCanvasItems.CanvasWidget) > 0)
delattr(self, "busy")
def mousePressEvent(self, e):
QTreeWidget.mousePressEvent(self, e)
self.mousePressed = 1
self.shiftPressed = bool(e.modifiers() & Qt.ShiftModifier)
self.ctrlPressed = bool(e.modifiers() & Qt.ControlModifier)
self.mouseRightClick = e.button() == Qt.RightButton
def mouseReleaseEvent(self, e):
QTreeWidget.mouseReleaseEvent(self, e)
dinwin, widget = getattr(self, "widgetDragging", (None, None))
self.shiftPressed = bool(e.modifiers() & Qt.ShiftModifier)
self.ctrlPressed = bool(e.modifiers() & Qt.ControlModifier)
if widget:
if widget.invalidPosition:
dinwin.removeWidget(widget)
# dinwin.canvasView.scene().update()
elif self.shiftPressed and len(dinwin.widgets) > 1:
dinwin.addLine(dinwin.widgets[-2], dinwin.widgets[-1])
delattr(self, "widgetDragging")
self.mousePressed = 0
def itemClicked(self, item, column):
if isinstance(item, WidgetTreeFolder):
return
if self.ctrlPressed:
qApp.canvasDlg.helpWindow.showHelpFor(item.widgetInfo, False)
return
win = self.canvasDlg.schema
win.addWidget(item.widgetInfo)
if (self.mouseRightClick or self.shiftPressed) and len(win.widgets) > 1:
win.addLine(win.widgets[-2], win.widgets[-1])
class WidgetScrollArea(QScrollArea):
def wheelEvent(self, ev):
hs = self.horizontalScrollBar()
hs.setValue(min(max(hs.minimum(), hs.value()-ev.delta()), hs.maximum()))
class WidgetListBase:
def __init__(self, canvasDlg, widgetInfo):
self.canvasDlg = canvasDlg
self.widgetInfo = widgetInfo
self.allWidgets = []
self.tabDict = {}
self.tabs = []
def createWidgetTabs(self, widgetTabList, widgetRegistry, widgetDir, picsDir, defaultPic):
self.widgetDir = widgetDir
self.picsDir = picsDir
self.defaultPic = defaultPic
widgetTypeList = self.canvasDlg.settings["widgetListType"]
size = min(len(self.canvasDlg.toolbarIconSizeList)-1, self.canvasDlg.settings["toolbarIconSize"])
iconSize = self.canvasDlg.toolbarIconSizeList[size]
# find tab names that are not in widgetTabList
extraTabs = [(name, 1) for name in widgetRegistry.keys() if name not in [tab for (tab, s) in widgetTabList]]
extraTabs.sort()
# first insert the default tab names
for (tabName, show) in widgetTabList + extraTabs:
if not show or not widgetRegistry.has_key(tabName): continue
tab = self.insertWidgetTab(tabName, show)
widgets = [(int(widgetInfo.priority), name, widgetInfo) for (name, widgetInfo) in widgetRegistry[tabName].items()]
widgets.sort()
exIndex = 0
for (priority, name, widgetInfo) in widgets:
if isinstance(self, WidgetTree):
button = WidgetTreeItem(tab, name, widgetInfo, self, self.canvasDlg, widgetTypeList)
else:
button = WidgetButton(tab, name, widgetInfo, self, self.canvasDlg, widgetTypeList, iconSize)
for k in range(priority/1000 - exIndex):
tab.layout().addSpacing(10)
exIndex = priority / 1000
tab.layout().addWidget(button)
tab.widgets.append(button)
self.allWidgets.append(button)
if hasattr(tab, "adjustSize"):
tab.adjustSize()
# return the list of tabs and their status (shown/hidden)
return widgetTabList + extraTabs
class WidgetTabs(WidgetListBase, QTabWidget):
def __init__(self, canvasDlg, widgetInfo, *args):
WidgetListBase.__init__(self, canvasDlg, widgetInfo)
QTabWidget.__init__(self, *args)
def insertWidgetTab(self, name, show = 1):
if self.tabDict.has_key(name):
if show: self.tabDict[name].tab.show()
else: self.tabDict[name].tab.hide()
return self.tabDict[name]
tab = WidgetScrollArea(self)
tab.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOn)
tab.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
widgetSpace = QWidget(self)
widgetSpace.setLayout(QHBoxLayout())
widgetSpace.layout().setSpacing(0)
widgetSpace.layout().setMargin(0)
widgetSpace.tab = tab
widgetSpace.widgets = []
tab.setWidget(widgetSpace)
self.tabDict[name] = widgetSpace
if show:
self.addTab(tab, name)
self.tabs.append((name, 2, widgetSpace))
else:
tab.hide()
self.tabs.append((name, 0, widgetSpace))
return widgetSpace
class WidgetTree(WidgetListBase, QDockWidget):
def __init__(self, canvasDlg, widgetInfo, *args):
WidgetListBase.__init__(self, canvasDlg, widgetInfo)
QDockWidget.__init__(self, "Widgets")
self.treeWidget = MyTreeWidget(canvasDlg, self)
self.treeWidget.tabDict = self.tabDict
self.treeWidget.setFocusPolicy(Qt.ClickFocus) # this is needed otherwise the document window will sometimes strangely lose focus and the output window will be focused
self.actions = categoriesPopup.allActions
# a widget container to hold the search area and the widget tree
self.containerWidget = QWidget()
containerBoxLayout = QBoxLayout(QBoxLayout.TopToBottom, self.containerWidget)
if sys.platform == "darwin":
containerBoxLayout.setContentsMargins(0,0,0,0)
self.widgetSuggestEdit = OWGUIEx.lineEditHint(self, None, None, useRE = 0, caseSensitive = 0, matchAnywhere = 1, autoSizeListWidget = 1, callback = self.widgetSuggestCallback)
self.widgetSuggestEdit.setItems([QListWidgetItem(action.icon(), action.widgetInfo.name) for action in self.actions])
containerBoxLayout.insertWidget(0, self.widgetSuggestEdit)
containerBoxLayout.insertWidget(1, self.treeWidget)
self.setWidget(self.containerWidget)
iconSize = self.canvasDlg.toolbarIconSizeList[self.canvasDlg.settings["toolbarIconSize"]]
self.treeWidget.setIconSize(QSize(iconSize, iconSize))
# self.treeWidget.setRootIsDecorated(0)
def insertWidgetTab(self, name, show = 1):
path = name.split("/")
parent = self
for i in xrange(len(path)):
fullName = "/".join(path[:i+1])
name = path[i]
if parent.tabDict.has_key(name):
parent.tabDict[name].setHidden(not show)
parent = parent.tabDict[name]
continue
item = WidgetTreeFolder(self.treeWidget if parent==self else parent, name)
item.widgets = []
parent.tabDict[name] = item
if not show:
item.setHidden(1)
if self.canvasDlg.settings.has_key("treeItemsOpenness") and self.canvasDlg.settings["treeItemsOpenness"].has_key(fullName):
item.setExpanded(self.canvasDlg.settings["treeItemsOpenness"][fullName])
elif not self.canvasDlg.settings.has_key("treeItemsOpenness") and self.treeWidget.topLevelItemCount() == 1:
item.setExpanded(1)
self.tabs.append((fullName, 2*int(show), item))
parent = item
return parent
def widgetSuggestCallback(self):
text = str(self.widgetSuggestEdit.text())
for action in self.actions:
if action.widgetInfo.name == text:
self.widgetInfo = action.widgetInfo
self.canvasDlg.schema.addWidget(action.widgetInfo)
self.widgetSuggestEdit.clear()
return
class WidgetTreeFolder(QTreeWidgetItem):
def __init__(self, parent, name):
QTreeWidgetItem.__init__(self, [name])
ix = len(parent.tabDict)
if hasattr(parent, "insertTopLevelItem"):
parent.insertTopLevelItem(ix, self)
else:
parent.insertChild(ix, self)
self.tabDict = {}
# item.setChildIndicatorPolicy(item.ShowIndicator)
def mousePressEvent(self, e):
self.treeItem.setExpanded(not self.treeItem.isExpanded())
# button that contains the name of the widget category.
# when clicked it shows or hides the widgets in the category
class WidgetTreeButton(QPushButton):
def __init__(self, treeItem, name, parent):
QPushButton.__init__(self, name, parent)
self.treeItem = treeItem
def mousePressEvent(self, e):
self.treeItem.setExpanded(not self.treeItem.isExpanded())
class WidgetToolBox(WidgetListBase, QDockWidget):
def __init__(self, canvasDlg, widgetInfo, *args):
WidgetListBase.__init__(self, canvasDlg, widgetInfo)
QDockWidget.__init__(self, "Widgets")
self.actions = categoriesPopup.allActions
self.toolbox = MyQToolBox(canvasDlg.settings["toolboxWidth"], self)
self.toolbox.setFocusPolicy(Qt.ClickFocus) # this is needed otherwise the document window will sometimes strangely lose focus and the output window will be focused
self.toolbox.layout().setSpacing(0)
# a widget container to hold the search area and the widget tree
self.containerWidget = QWidget()
containerBoxLayout = QBoxLayout(QBoxLayout.TopToBottom, self.containerWidget)
if sys.platform == "darwin":
containerBoxLayout.setContentsMargins(0, 0, 0, 0)
self.widgetSuggestEdit = OWGUIEx.lineEditHint(self, None, None, useRE = 0, caseSensitive = 0, matchAnywhere = 1, autoSizeListWidget = 1, callback = self.widgetSuggestCallback)
self.widgetSuggestEdit.setItems([QListWidgetItem(action.icon(), action.widgetInfo.name) for action in self.actions])
containerBoxLayout.insertWidget(0, self.widgetSuggestEdit)
containerBoxLayout.insertWidget(1, self.toolbox)
self.setWidget(self.containerWidget)
def insertWidgetTab(self, name, show = 1):
if self.tabDict.has_key(name):
if show: self.tabDict[name].scrollArea.show()
else: self.tabDict[name].scrollArea.hide()
return self.tabDict[name]
sa = QScrollArea(self.toolbox)
sa.setBackgroundRole(QPalette.Base)
tab = QFrame(self)
tab.scrollArea = sa
tab.widgets = []
sa.setWidget(tab)
sa.setWidgetResizable(0)
sa.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
tab.setBackgroundRole(QPalette.Base)
tab.setLayout(QVBoxLayout())
tab.layout().setMargin(0)
tab.layout().setSpacing(0)
tab.layout().setContentsMargins(6, 6, 6, 6)
self.tabDict[name] = tab
if show:
self.toolbox.addItem(sa, name)
self.tabs.append((name, 2, tab))
else:
sa.hide()
self.tabs.append((name, 0, tab))
return tab
def widgetSuggestCallback(self):
text = str(self.widgetSuggestEdit.text())
for action in self.actions:
if action.widgetInfo.name == text:
self.widgetInfo = action.widgetInfo
self.canvasDlg.schema.addWidget(action.widgetInfo)
self.widgetSuggestEdit.clear()
return
class MyQToolBox(QToolBox):
def __init__(self, size, parent):
QToolBox.__init__(self, parent)
self.desiredSize = size
def sizeHint(self):
return QSize(self.desiredSize, 100)
class CanvasWidgetAction(QWidgetAction):
def __init__(self, parent, actions):
QWidgetAction.__init__(self, parent)
self.parent = parent
self.actions = actions
self.widgetSuggestEdit = OWGUIEx.lineEditHint(self.parent, None, None, useRE = 0, caseSensitive = 0, matchAnywhere = 1, callback = self.callback, autoSizeListWidget = 1)
self.widgetSuggestEdit.setItems([QListWidgetItem(action.icon(), action.widgetInfo.name) for action in actions])
self.widgetSuggestEdit.setStyleSheet(""" QLineEdit { background: #fffff0; border: 1px solid orange} """)
self.widgetSuggestEdit.listWidget.setStyleSheet(""" QListView { background: #fffff0; } QListView::item {padding: 3px 0px 3px 0px} QListView::item:selected { color: white; background: blue;} """)
self.widgetSuggestEdit.listWidget.setIconSize(QSize(16,16))
self.setDefaultWidget(self.widgetSuggestEdit)
self._in_callback = False
def callback(self):
if not self._in_callback:
try:
self._in_callback = True
text = str(self.widgetSuggestEdit.text())
for action in self.actions:
if action.widgetInfo.name == text:
self.widgetInfo = action.widgetInfo
self.parent.setActiveAction(self)
self.activate(QAction.Trigger)
QApplication.sendEvent(self.widgetSuggestEdit, QKeyEvent(QEvent.KeyPress, Qt.Key_Enter, Qt.NoModifier))
return
finally:
self._in_callback = False
class CanvasPopup(QMenu):
def __init__(self, canvasDlg):
QMenu.__init__(self, canvasDlg)
self.allActions = []
self.catActions = []
self.allCatActions = []
self.quickActions = []
self.candidates = []
self.canvasDlg = canvasDlg
cats = orngRegistry.readCategories(silent=True)
self.suggestDict = dict([(widget.name, widget) for widget in reduce(list.__add__, [cat.values() for cat in cats.values()], [])])
self.suggestItems = [QListWidgetItem(self.canvasDlg.getWidgetIcon(widget), widget.name) for widget in self.suggestDict.values()]
self.categoriesYOffset = 0
self._canConnectCache = {}
def showEvent(self, ev):
QMenu.showEvent(self, ev)
# if self.actions() != []:
# self.actions()[0].defaultWidget().setFocus()
if self.actions() != []:
self.actions()[0].defaultWidget().setFocus()
def addWidgetSuggest(self):
actions = [action for action in self.allActions if action.isEnabled()]
self.addAction(CanvasWidgetAction(self, actions))
self.addSeparator()
def showAllWidgets(self):
for cat in self.catActions:
cat.setEnabled(True)
for act in self.allActions:
act.setEnabled(True)
def selectActions(self, actClassesAttr, widgetClasses):
for cat in self.allCatActions:
cat.setEnabled(False)
for act in self.allActions:
if getattr(act.widgetInfo, actClassesAttr) & widgetClasses:
act.setEnabled(True)
obj = act
while hasattr(obj, "category"):
obj = obj.category
obj.setEnabled(True)
else:
act.setEnabled(False)
def selectInputActions(self, widgetInfo):
"""Enable widgets which can consume the output from `widgetInfo`'s
output channels.
"""
for cat in self.allCatActions:
cat.setEnabled(False)
for act in self.allActions:
if self.canConnect(widgetInfo, act.widgetInfo):
act.setEnabled(True)
obj = act
while hasattr(obj, "category"):
obj = obj.category
obj.setEnabled(True)
else:
act.setEnabled(False)
def selectOutputActions(self, widgetInfo):
"""Enable widgets which can produce the input for `widgetInfo`'s
input channels.
"""
for cat in self.allCatActions:
cat.setEnabled(False)
for act in self.allActions:
if self.canConnect(act.widgetInfo, widgetInfo):
act.setEnabled(True)
obj = act
while hasattr(obj, "category"):
obj = obj.category
obj.setEnabled(True)
else:
act.setEnabled(False)
def canConnect(self, outWidgetDesc, inWidgetDesc):
"""Can connect any output from outWidgetDesc to input
from inWidgetDesc.
"""
if (outWidgetDesc, inWidgetDesc) not in self._canConnectCache:
ret = any(orngSignalManager.canConnect(
resolveSignal(out),
resolveSignal(in_), dynamic=True) \
for out in outWidgetDesc.outputs \
for in_ in inWidgetDesc.inputs
)
self._canConnectCache[(outWidgetDesc, inWidgetDesc)] = ret
return self._canConnectCache[(outWidgetDesc, inWidgetDesc)]
def updateWidgesByOutputs(self, widgetInfo):
self.selectOutputActions(widgetInfo)
def updateWidgetsByInputs(self, widgetInfo):
self.selectInputActions(widgetInfo)
def updatePredictedWidgets(self, widgets, actClassesAttr, ioClasses=None):
self.candidates = []
for widget in widgets:
if ioClasses == None:
self.candidates.append(widget)
else:
# filter widgets by allowed signal
added = False
for category, show in self.canvasDlg.settings["WidgetTabs"]:
if not show or not self.canvasDlg.widgetRegistry.has_key(category):
continue
for candidate in self.canvasDlg.widgetRegistry[category]:
if widget.strip().lower() == candidate.strip().lower():
if getattr(self.canvasDlg.widgetRegistry[category][candidate], actClassesAttr) & ioClasses:
self.candidates.append(candidate)
added = True
if added:
break
self.candidates = self.candidates[:3]
def updateMenu(self):
self.clear()
self.addWidgetSuggest()
for c in self.candidates:
for category, show in self.canvasDlg.settings["WidgetTabs"]:
if not show or not self.canvasDlg.widgetRegistry.has_key(category):
continue
if c in self.canvasDlg.widgetRegistry[category]:
widgetInfo = self.canvasDlg.widgetRegistry[category][c]
icon = self.canvasDlg.getWidgetIcon(widgetInfo)
act = self.addAction(icon, widgetInfo.name)
act.widgetInfo = widgetInfo
act.setIconVisibleInMenu(True)
self.quickActions.append(act)
break
self.categoriesYOffset = self.sizeHint().height()
self.addSeparator()
for m in self.catActions:
self.addMenu(m)
def constructCategoriesPopup(canvasDlg):
global categoriesPopup
categoriesPopup = CanvasPopup(canvasDlg)
categoriesPopup.setStyleSheet(""" QMenu { background-color: #fffff0; selection-background-color: blue; } QMenu::item { color: black; selection-color: white } QMenu::item:disabled { color: #dddddd } QMenu::separator {height: 3px; background: #dddddd; margin-left: 3px; margin-right: 4px;}""")
catmenuDict = {}
for category, show in canvasDlg.settings["WidgetTabs"]:
if not show or not canvasDlg.widgetRegistry.has_key(category):
continue
path = category.split("/")
catmenu = categoriesPopup
catmenu.categoryCount = 0
for i in xrange(len(path)):
fullName = "/".join(path[:i+1])
if fullName in catmenuDict:
catmenu = catmenuDict[fullName]
else:
oldcatmenu = catmenu
catmenu = catmenu.addMenu(path[i]) # Would be better to insert categories before widgets, but API is rather hard to use ...
oldcatmenu.categoryCount += 1
catmenu.categoryCount = 0
catmenuDict[fullName] = catmenu
categoriesPopup.allCatActions.append(catmenu)
if i==0:
categoriesPopup.catActions.append(catmenu)
else:
catmenu.category = oldcatmenu
for widgetInfo in sorted(canvasDlg.widgetRegistry[category].values(), key=lambda x:x.priority):
icon = QIcon(canvasDlg.getWidgetIcon(widgetInfo))
act = catmenu.addAction(icon, widgetInfo.name)
act.widgetInfo = widgetInfo
act.category = catmenu
act.setIconVisibleInMenu(True)
categoriesPopup.allActions.append(act)
#def constructWidgetSuggest(canvasDlg):
# global widgetSuggestEdit
# widgetSuggestEdit = OWGUIEx.suggestLineEdit(None, None, None, useRE = 0, caseSensitive = 0, matchAnywhere = 1)
# widgetSuggestEdit.setWindowFlags(Qt.Popup)
# widgetSuggestEdit.listWidget.setSpacing(2)
# widgetSuggestEdit.setStyleSheet(""" QLineEdit { background: #fffff0;} """)
# widgetSuggestEdit.listWidget.setStyleSheet(""" QListView { background: #fffff0; } QListView::item {padding: 3px 0px 3px 0px} QListView::item:selected, QListView::item:hover { color: white; background: blue;} """)
#
# cats = orngRegistry.readCategories()
# items = []
# for cat in cats.values():
# for widget in cat.values():
# iconNames = canvasDlg.getFullWidgetIconName(widget)
# icon = QIcon()
# for name in iconNames:
# icon.addPixmap(QPixmap(name))
# items.append(QListWidgetItem(icon, widget.name))
# widgetSuggestEdit.setItems(items)
#
#
#
| gpl-3.0 |
easmetz/inasafe | safe/metadata/aggregation_layer_metadata.py | 4 | 3264 | # -*- coding: utf-8 -*-
"""
InaSAFE Disaster risk assessment tool developed by AusAid -
**metadata module.**
Contact : ole.moller.nielsen@gmail.com
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'marco@opengis.ch'
__revision__ = '$Format:%H$'
__date__ = '27/05/2015'
__copyright__ = ('Copyright 2012, Australia Indonesia Facility for '
'Disaster Reduction')
from safe.metadata.generic_layer_metadata import GenericLayerMetadata
from safe.metadata.utils import merge_dictionaries
class AggregationLayerMetadata(GenericLayerMetadata):
"""
Metadata class for aggregation layers
.. versionadded:: 3.2
"""
_standard_properties = {
'aggregation attribute': (
'gmd:identificationInfo/'
'gmd:MD_DataIdentification/'
'gmd:supplementalInformation/'
'inasafe/'
'aggregation_attribute/'
'gco:CharacterString'),
'adult ratio attribute': (
'gmd:identificationInfo/'
'gmd:MD_DataIdentification/'
'gmd:supplementalInformation/'
'inasafe/'
'adult_ratio_attribute/'
'gco:CharacterString'),
'adult ratio default': (
'gmd:identificationInfo/'
'gmd:MD_DataIdentification/'
'gmd:supplementalInformation/'
'inasafe/'
'adult_ratio_default/'
'gco:Float'),
'elderly ratio attribute': (
'gmd:identificationInfo/'
'gmd:MD_DataIdentification/'
'gmd:supplementalInformation/'
'inasafe/'
'elderly_ratio_attribute/'
'gco:CharacterString'),
'elderly ratio default': (
'gmd:identificationInfo/'
'gmd:MD_DataIdentification/'
'gmd:supplementalInformation/'
'inasafe/'
'elderly_ratio_default/'
'gco:Float'),
'female ratio attribute': (
'gmd:identificationInfo/'
'gmd:MD_DataIdentification/'
'gmd:supplementalInformation/'
'inasafe/'
'female_ratio_attribute/'
'gco:CharacterString'),
'female ratio default': (
'gmd:identificationInfo/'
'gmd:MD_DataIdentification/'
'gmd:supplementalInformation/'
'inasafe/'
'female_ratio_default/'
'gco:Float'),
'youth ratio attribute': (
'gmd:identificationInfo/'
'gmd:MD_DataIdentification/'
'gmd:supplementalInformation/'
'inasafe/'
'youth_ratio_attribute/'
'gco:CharacterString'),
'youth ratio default': (
'gmd:identificationInfo/'
'gmd:MD_DataIdentification/'
'gmd:supplementalInformation/'
'inasafe/'
'youth_ratio_default/'
'gco:Float')
}
_standard_properties = merge_dictionaries(
GenericLayerMetadata._standard_properties, _standard_properties)
| gpl-3.0 |
peterbarker/ardupilot | libraries/AP_HAL_ChibiOS/hwdef/scripts/STM32G474xx.py | 10 | 13345 | #!/usr/bin/env python
'''
tables for STM32G474xx MCUs
'''
# additional build information for ChibiOS
build = {
"CHIBIOS_STARTUP_MK" : "os/common/startup/ARMCMx/compilers/GCC/mk/startup_stm32g4xx.mk",
"CHIBIOS_PLATFORM_MK" : "os/hal/ports/STM32/STM32G4xx/platform.mk"
}
# MCU parameters
mcu = {
# ram map, as list of (address, size-kb, flags)
# flags of 1 means DMA-capable (DMA and BDMA)
# flags of 2 means faster memory for CPU intensive work
# flags of 4 means memory can be used for SDMMC DMA
'RAM_MAP' : [
(0x20000000, 96, 1), # SRAM1/SRAM2
(0x10000000, 32, 2), # CCM
],
'EXPECTED_CLOCK' : 168000000,
}
# max pin package is 128
pincount = {
'A': 16,
'B': 16,
'C': 16,
'D': 16,
'E': 16,
'F': 16,
'G': 16,
}
# no DMA map as we will dynamically allocate DMA channels using the DMAMUX
DMA_Map = None
AltFunction_map = {
# format is PIN:FUNCTION : AFNUM
# extracted from g474-64.csv
"PA0:COMP1_OUT" : 8,
"PA0:EVENTOUT" : 15,
"PA0:TIM2_CH1" : 1,
"PA0:TIM2_ETR" : 14,
"PA0:TIM5_CH1" : 2,
"PA0:TIM8_BKIN" : 9,
"PA0:TIM8_ETR" : 10,
"PA0:USART2_CTS" : 7,
"PA0:USART2_NSS" : 7,
"PA1:EVENTOUT" : 15,
"PA1:RTC_REFIN" : 0,
"PA1:TIM15_CH1N" : 9,
"PA1:TIM2_CH2" : 1,
"PA1:TIM5_CH2" : 2,
"PA1:USART2_DE" : 7,
"PA1:USART2_RTS" : 7,
"PA2:COMP2_OUT" : 8,
"PA2:EVENTOUT" : 15,
"PA2:LPUART1_TX" : 12,
"PA2:QUADSPI1_BK1_NCS" : 10,
"PA2:TIM15_CH1" : 9,
"PA2:TIM2_CH3" : 1,
"PA2:TIM5_CH3" : 2,
"PA2:UCPD1_FRSTX1" : 14,
"PA2:UCPD1_FRSTX2" : 14,
"PA2:USART2_TX" : 7,
"PA3:EVENTOUT" : 15,
"PA3:LPUART1_RX" : 12,
"PA3:QUADSPI1_CLK" : 10,
"PA3:SAI1_CK1" : 3,
"PA3:SAI1_MCLK_A" : 13,
"PA3:TIM15_CH2" : 9,
"PA3:TIM2_CH4" : 1,
"PA3:TIM5_CH4" : 2,
"PA3:USART2_RX" : 7,
"PA4:EVENTOUT" : 15,
"PA4:I2S3_WS" : 6,
"PA4:SAI1_FS_B" : 13,
"PA4:SPI1_NSS" : 5,
"PA4:SPI3_NSS" : 6,
"PA4:TIM3_CH2" : 2,
"PA4:USART2_CK" : 7,
"PA5:EVENTOUT" : 15,
"PA5:SPI1_SCK" : 5,
"PA5:TIM2_CH1" : 1,
"PA5:TIM2_ETR" : 2,
"PA5:UCPD1_FRSTX1" : 14,
"PA5:UCPD1_FRSTX2" : 14,
"PA6:COMP1_OUT" : 8,
"PA6:EVENTOUT" : 15,
"PA6:LPUART1_CTS" : 12,
"PA6:QUADSPI1_BK1_IO3" : 10,
"PA6:SPI1_MISO" : 5,
"PA6:TIM16_CH1" : 1,
"PA6:TIM1_BKIN" : 6,
"PA6:TIM3_CH1" : 2,
"PA6:TIM8_BKIN" : 4,
"PA7:COMP2_OUT" : 8,
"PA7:EVENTOUT" : 15,
"PA7:QUADSPI1_BK1_IO2" : 10,
"PA7:SPI1_MOSI" : 5,
"PA7:TIM17_CH1" : 1,
"PA7:TIM1_CH1N" : 6,
"PA7:TIM3_CH2" : 2,
"PA7:TIM8_CH1N" : 4,
"PA7:UCPD1_FRSTX1" : 14,
"PA7:UCPD1_FRSTX2" : 14,
"PA8:COMP7_OUT" : 8,
"PA8:EVENTOUT" : 15,
"PA8:CAN3_RX" : 11,
"PA8:HRTIM1_CHA1" : 13,
"PA8:I2C2_SDA" : 4,
"PA8:I2C3_SCL" : 2,
"PA8:I2S2_MCK" : 5,
"PA8:RCC_MCO" : 0,
"PA8:SAI1_CK2" : 12,
"PA8:SAI1_SCK_A" : 14,
"PA8:TIM1_CH1" : 6,
"PA8:TIM4_ETR" : 10,
"PA8:USART1_CK" : 7,
"PA9:COMP5_OUT" : 8,
"PA9:EVENTOUT" : 15,
"PA9:HRTIM1_CHA2" : 13,
"PA9:I2C2_SCL" : 4,
"PA9:I2C3_SMBA" : 2,
"PA9:I2S3_MCK" : 5,
"PA9:SAI1_FS_A" : 14,
"PA9:TIM15_BKIN" : 9,
"PA9:TIM1_CH2" : 6,
"PA9:TIM2_CH3" : 10,
"PA9:USART1_TX" : 7,
"PA10:COMP6_OUT" : 8,
"PA10:CRS_SYNC" : 3,
"PA10:EVENTOUT" : 15,
"PA10:HRTIM1_CHB1" : 13,
"PA10:I2C2_SMBA" : 4,
"PA10:SAI1_D1" : 12,
"PA10:SAI1_SD_A" : 14,
"PA10:SPI2_MISO" : 5,
"PA10:TIM17_BKIN" : 1,
"PA10:TIM1_CH3" : 6,
"PA10:TIM2_CH4" : 10,
"PA10:TIM8_BKIN" : 11,
"PA10:USART1_RX" : 7,
"PA11:COMP1_OUT" : 8,
"PA11:EVENTOUT" : 15,
"PA11:CAN1_RX" : 9,
"PA11:HRTIM1_CHB2" : 13,
"PA11:I2S2_SD" : 5,
"PA11:SPI2_MOSI" : 5,
"PA11:TIM1_BKIN2" : 12,
"PA11:TIM1_CH1N" : 6,
"PA11:TIM1_CH4" : 11,
"PA11:TIM4_CH1" : 10,
"PA11:USART1_CTS" : 7,
"PA11:USART1_NSS" : 7,
"PA12:COMP2_OUT" : 8,
"PA12:EVENTOUT" : 15,
"PA12:CAN1_TX" : 9,
"PA12:HRTIM1_FLT1" : 13,
"PA12:I2S_CKIN" : 5,
"PA12:TIM16_CH1" : 1,
"PA12:TIM1_CH2N" : 6,
"PA12:TIM1_ETR" : 11,
"PA12:TIM4_CH2" : 10,
"PA12:USART1_DE" : 7,
"PA12:USART1_RTS" : 7,
"PA13:EVENTOUT" : 15,
"PA13:I2C1_SCL" : 4,
"PA13:I2C4_SCL" : 3,
"PA13:IR_OUT" : 5,
"PA13:SAI1_SD_B" : 13,
"PA13:JTMS-SWDIO" : 0,
"PA13:TIM16_CH1N" : 1,
"PA13:TIM4_CH3" : 10,
"PA13:USART3_CTS" : 7,
"PA13:USART3_NSS" : 7,
"PA14:EVENTOUT" : 15,
"PA14:I2C1_SDA" : 4,
"PA14:I2C4_SMBA" : 3,
"PA14:LPTIM1_OUT" : 1,
"PA14:SAI1_FS_B" : 13,
"PA14:JTCK-SWCLK" : 0,
"PA14:TIM1_BKIN" : 6,
"PA14:TIM8_CH2" : 5,
"PA14:USART2_TX" : 7,
"PA15:EVENTOUT" : 15,
"PA15:CAN3_TX" : 11,
"PA15:HRTIM1_FLT2" : 13,
"PA15:I2C1_SCL" : 4,
"PA15:I2S3_WS" : 6,
"PA15:SPI1_NSS" : 5,
"PA15:SPI3_NSS" : 6,
"PA15:SYS_JTDI" : 0,
"PA15:TIM1_BKIN" : 9,
"PA15:TIM2_CH1" : 1,
"PA15:TIM2_ETR" : 14,
"PA15:TIM8_CH1" : 2,
"PA15:UART4_DE" : 8,
"PA15:UART4_RTS" : 8,
"PA15:USART2_RX" : 7,
"PB0:EVENTOUT" : 15,
"PB0:HRTIM1_FLT5" : 13,
"PB0:QUADSPI1_BK1_IO1" : 10,
"PB0:TIM1_CH2N" : 6,
"PB0:TIM3_CH3" : 2,
"PB0:TIM8_CH2N" : 4,
"PB0:UCPD1_FRSTX1" : 14,
"PB0:UCPD1_FRSTX2" : 14,
"PB1:COMP4_OUT" : 8,
"PB1:EVENTOUT" : 15,
"PB1:HRTIM1_SCOUT" : 13,
"PB1:LPUART1_DE" : 12,
"PB1:LPUART1_RTS" : 12,
"PB1:QUADSPI1_BK1_IO0" : 10,
"PB1:TIM1_CH3N" : 6,
"PB1:TIM3_CH4" : 2,
"PB1:TIM8_CH3N" : 4,
"PB2:EVENTOUT" : 15,
"PB2:HRTIM1_SCIN" : 13,
"PB2:I2C3_SMBA" : 4,
"PB2:LPTIM1_OUT" : 1,
"PB2:QUADSPI1_BK2_IO1" : 10,
"PB2:RTC_OUT2" : 0,
"PB2:TIM20_CH1" : 3,
"PB2:TIM5_CH1" : 2,
"PB3:CRS_SYNC" : 3,
"PB3:EVENTOUT" : 15,
"PB3:CAN3_RX" : 11,
"PB3:HRTIM1_EEV9" : 13,
"PB3:HRTIM1_SCOUT" : 12,
"PB3:I2S3_CK" : 6,
"PB3:SAI1_SCK_B" : 14,
"PB3:SPI1_SCK" : 5,
"PB3:SPI3_SCK" : 6,
"PB3:SYS_JTDO-SWO" : 0,
"PB3:TIM2_CH2" : 1,
"PB3:TIM3_ETR" : 10,
"PB3:TIM4_ETR" : 2,
"PB3:TIM8_CH1N" : 4,
"PB3:USART2_TX" : 7,
"PB4:EVENTOUT" : 15,
"PB4:CAN3_TX" : 11,
"PB4:HRTIM1_EEV7" : 13,
"PB4:SAI1_MCLK_B" : 14,
"PB4:SPI1_MISO" : 5,
"PB4:SPI3_MISO" : 6,
"PB4:SYS_JTRST" : 0,
"PB4:TIM16_CH1" : 1,
"PB4:TIM17_BKIN" : 10,
"PB4:TIM3_CH1" : 2,
"PB4:TIM8_CH2N" : 4,
"PB4:UART5_DE" : 8,
"PB4:UART5_RTS" : 8,
"PB4:USART2_RX" : 7,
"PB5:EVENTOUT" : 15,
"PB5:CAN2_RX" : 9,
"PB5:HRTIM1_EEV6" : 13,
"PB5:I2C1_SMBA" : 4,
"PB5:I2C3_SDA" : 8,
"PB5:I2S3_SD" : 6,
"PB5:LPTIM1_IN1" : 11,
"PB5:SAI1_SD_B" : 12,
"PB5:SPI1_MOSI" : 5,
"PB5:SPI3_MOSI" : 6,
"PB5:TIM16_BKIN" : 1,
"PB5:TIM17_CH1" : 10,
"PB5:TIM3_CH2" : 2,
"PB5:TIM8_CH3N" : 3,
"PB5:UART5_CTS" : 14,
"PB5:USART2_CK" : 7,
"PB6:COMP4_OUT" : 8,
"PB6:EVENTOUT" : 15,
"PB6:CAN2_TX" : 9,
"PB6:HRTIM1_EEV4" : 13,
"PB6:HRTIM1_SCIN" : 12,
"PB6:LPTIM1_ETR" : 11,
"PB6:SAI1_FS_B" : 14,
"PB6:TIM16_CH1N" : 1,
"PB6:TIM4_CH1" : 2,
"PB6:TIM8_BKIN2" : 10,
"PB6:TIM8_CH1" : 5,
"PB6:TIM8_ETR" : 6,
"PB6:USART1_TX" : 7,
"PB7:COMP3_OUT" : 8,
"PB7:EVENTOUT" : 15,
"PB7:HRTIM1_EEV3" : 13,
"PB7:I2C1_SDA" : 4,
"PB7:I2C4_SDA" : 3,
"PB7:LPTIM1_IN2" : 11,
"PB7:TIM17_CH1N" : 1,
"PB7:TIM3_CH4" : 10,
"PB7:TIM4_CH2" : 2,
"PB7:TIM8_BKIN" : 5,
"PB7:UART4_CTS" : 14,
"PB7:USART1_RX" : 7,
"PB8:COMP1_OUT" : 8,
"PB8:EVENTOUT" : 15,
"PB8:CAN1_RX" : 9,
"PB8:HRTIM1_EEV8" : 13,
"PB8:I2C1_SCL" : 4,
"PB8:SAI1_CK1" : 3,
"PB8:SAI1_MCLK_A" : 14,
"PB8:TIM16_CH1" : 1,
"PB8:TIM1_BKIN" : 12,
"PB8:TIM4_CH3" : 2,
"PB8:TIM8_CH2" : 10,
"PB8:USART3_RX" : 7,
"PB9:COMP2_OUT" : 8,
"PB9:EVENTOUT" : 15,
"PB9:CAN1_TX" : 9,
"PB9:HRTIM1_EEV5" : 13,
"PB9:I2C1_SDA" : 4,
"PB9:IR_OUT" : 6,
"PB9:SAI1_D2" : 3,
"PB9:SAI1_FS_A" : 14,
"PB9:TIM17_CH1" : 1,
"PB9:TIM1_CH3N" : 12,
"PB9:TIM4_CH4" : 2,
"PB9:TIM8_CH3" : 10,
"PB9:USART3_TX" : 7,
"PB10:EVENTOUT" : 15,
"PB10:HRTIM1_FLT3" : 13,
"PB10:LPUART1_RX" : 8,
"PB10:QUADSPI1_CLK" : 10,
"PB10:SAI1_SCK_A" : 14,
"PB10:TIM1_BKIN" : 12,
"PB10:TIM2_CH3" : 1,
"PB10:USART3_TX" : 7,
"PB11:EVENTOUT" : 15,
"PB11:HRTIM1_FLT4" : 13,
"PB11:LPUART1_TX" : 8,
"PB11:QUADSPI1_BK1_NCS" : 10,
"PB11:TIM2_CH4" : 1,
"PB11:USART3_RX" : 7,
"PB12:EVENTOUT" : 15,
"PB12:CAN2_RX" : 9,
"PB12:HRTIM1_CHC1" : 13,
"PB12:I2C2_SMBA" : 4,
"PB12:I2S2_WS" : 5,
"PB12:LPUART1_DE" : 8,
"PB12:LPUART1_RTS" : 8,
"PB12:SPI2_NSS" : 5,
"PB12:TIM1_BKIN" : 6,
"PB12:TIM5_ETR" : 2,
"PB12:USART3_CK" : 7,
"PB13:EVENTOUT" : 15,
"PB13:CAN2_TX" : 9,
"PB13:HRTIM1_CHC2" : 13,
"PB13:I2S2_CK" : 5,
"PB13:LPUART1_CTS" : 8,
"PB13:SPI2_SCK" : 5,
"PB13:TIM1_CH1N" : 6,
"PB13:USART3_CTS" : 7,
"PB13:USART3_NSS" : 7,
"PB14:COMP4_OUT" : 8,
"PB14:EVENTOUT" : 15,
"PB14:HRTIM1_CHD1" : 13,
"PB14:SPI2_MISO" : 5,
"PB14:TIM15_CH1" : 1,
"PB14:TIM1_CH2N" : 6,
"PB14:USART3_DE" : 7,
"PB14:USART3_RTS" : 7,
"PB15:COMP3_OUT" : 3,
"PB15:EVENTOUT" : 15,
"PB15:HRTIM1_CHD2" : 13,
"PB15:I2S2_SD" : 5,
"PB15:RTC_REFIN" : 0,
"PB15:SPI2_MOSI" : 5,
"PB15:TIM15_CH1N" : 2,
"PB15:TIM15_CH2" : 1,
"PB15:TIM1_CH3N" : 4,
"PC0:EVENTOUT" : 15,
"PC0:LPTIM1_IN1" : 1,
"PC0:LPUART1_RX" : 8,
"PC0:TIM1_CH1" : 2,
"PC1:EVENTOUT" : 15,
"PC1:LPTIM1_OUT" : 1,
"PC1:LPUART1_TX" : 8,
"PC1:QUADSPI1_BK2_IO0" : 10,
"PC1:SAI1_SD_A" : 13,
"PC1:TIM1_CH2" : 2,
"PC2:COMP3_OUT" : 3,
"PC2:EVENTOUT" : 15,
"PC2:LPTIM1_IN2" : 1,
"PC2:QUADSPI1_BK2_IO1" : 10,
"PC2:TIM1_CH3" : 2,
"PC2:TIM20_CH2" : 6,
"PC3:EVENTOUT" : 15,
"PC3:LPTIM1_ETR" : 1,
"PC3:QUADSPI1_BK2_IO2" : 10,
"PC3:SAI1_D1" : 3,
"PC3:SAI1_SD_A" : 13,
"PC3:TIM1_BKIN2" : 6,
"PC3:TIM1_CH4" : 2,
"PC4:EVENTOUT" : 15,
"PC4:I2C2_SCL" : 4,
"PC4:QUADSPI1_BK2_IO3" : 10,
"PC4:TIM1_ETR" : 2,
"PC4:USART1_TX" : 7,
"PC5:EVENTOUT" : 15,
"PC5:HRTIM1_EEV10" : 13,
"PC5:SAI1_D3" : 3,
"PC5:TIM15_BKIN" : 2,
"PC5:TIM1_CH4N" : 6,
"PC5:USART1_RX" : 7,
"PC6:COMP6_OUT" : 7,
"PC6:EVENTOUT" : 15,
"PC6:HRTIM1_CHF1" : 13,
"PC6:HRTIM1_EEV10" : 3,
"PC6:I2C4_SCL" : 8,
"PC6:I2S2_MCK" : 6,
"PC6:TIM3_CH1" : 2,
"PC6:TIM8_CH1" : 4,
"PC7:COMP5_OUT" : 7,
"PC7:EVENTOUT" : 15,
"PC7:HRTIM1_CHF2" : 13,
"PC7:HRTIM1_FLT5" : 3,
"PC7:I2C4_SDA" : 8,
"PC7:I2S3_MCK" : 6,
"PC7:TIM3_CH2" : 2,
"PC7:TIM8_CH2" : 4,
"PC8:COMP7_OUT" : 7,
"PC8:EVENTOUT" : 15,
"PC8:HRTIM1_CHE1" : 3,
"PC8:I2C3_SCL" : 8,
"PC8:TIM20_CH3" : 6,
"PC8:TIM3_CH3" : 2,
"PC8:TIM8_CH3" : 4,
"PC9:EVENTOUT" : 15,
"PC9:HRTIM1_CHE2" : 3,
"PC9:I2C3_SDA" : 8,
"PC9:I2S_CKIN" : 5,
"PC9:TIM3_CH4" : 2,
"PC9:TIM8_BKIN2" : 6,
"PC9:TIM8_CH4" : 4,
"PC10:EVENTOUT" : 15,
"PC10:HRTIM1_FLT6" : 13,
"PC10:I2S3_CK" : 6,
"PC10:SPI3_SCK" : 6,
"PC10:TIM8_CH1N" : 4,
"PC10:UART4_TX" : 5,
"PC10:USART3_TX" : 7,
"PC11:EVENTOUT" : 15,
"PC11:HRTIM1_EEV2" : 3,
"PC11:I2C3_SDA" : 8,
"PC11:SPI3_MISO" : 6,
"PC11:TIM8_CH2N" : 4,
"PC11:UART4_RX" : 5,
"PC11:USART3_RX" : 7,
"PC12:EVENTOUT" : 15,
"PC12:HRTIM1_EEV1" : 3,
"PC12:I2S3_SD" : 6,
"PC12:SPI3_MOSI" : 6,
"PC12:TIM5_CH2" : 1,
"PC12:TIM8_CH3N" : 4,
"PC12:UART5_TX" : 5,
"PC12:UCPD1_FRSTX1" : 14,
"PC12:UCPD1_FRSTX2" : 14,
"PC12:USART3_CK" : 7,
"PC13:EVENTOUT" : 15,
"PC13:TIM1_BKIN" : 2,
"PC13:TIM1_CH1N" : 4,
"PC13:TIM8_CH4N" : 6,
"PC14:EVENTOUT" : 15,
"PC15:EVENTOUT" : 15,
"PD2:EVENTOUT" : 15,
"PD2:TIM3_ETR" : 2,
"PD2:TIM8_BKIN" : 4,
"PD2:UART5_RX" : 5,
"PF0:EVENTOUT" : 15,
"PF0:I2C2_SDA" : 4,
"PF0:I2S2_WS" : 5,
"PF0:SPI2_NSS" : 5,
"PF0:TIM1_CH3N" : 6,
"PF1:EVENTOUT" : 15,
"PF1:I2S2_CK" : 5,
"PF1:SPI2_SCK" : 5,
"PG10:EVENTOUT" : 15,
"PG10:RCC_MCO" : 0,
}
ADC1_map = {
# format is PIN : ADC1_CHAN
"PA0" : 1,
"PA1" : 2,
"PA2" : 3,
"PA3" : 4,
"PB14" : 5,
"PC0" : 6,
"PC1" : 7,
"PC2" : 8,
"PC3" : 9,
"PF0" : 10,
"PB12" : 11,
"PB1" : 12,
"PB11" : 14,
"PB0" : 15,
}
| gpl-3.0 |
Intel-Corporation/tensorflow | tensorflow/python/data/experimental/kernel_tests/serialization/range_dataset_serialization_test.py | 13 | 4593 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the RangeDataset serialization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import gen_dataset_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class RangeDatasetSerializationTest(
dataset_serialization_test_base.DatasetSerializationTestBase):
def _iterator_checkpoint_prefix_local(self):
return os.path.join(self.get_temp_dir(), "iterator")
def _save_op(self, iterator_resource):
iterator_state_variant = gen_dataset_ops.serialize_iterator(
iterator_resource)
save_op = io_ops.write_file(
self._iterator_checkpoint_prefix_local(),
parsing_ops.serialize_tensor(iterator_state_variant))
return save_op
def _restore_op(self, iterator_resource):
iterator_state_variant = parsing_ops.parse_tensor(
io_ops.read_file(self._iterator_checkpoint_prefix_local()),
dtypes.variant)
restore_op = gen_dataset_ops.deserialize_iterator(iterator_resource,
iterator_state_variant)
return restore_op
def testSaveRestore(self):
def _build_graph(start, stop):
iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.range(start, stop))
init_op = iterator.initializer
get_next = iterator.get_next()
save_op = self._save_op(iterator._iterator_resource)
restore_op = self._restore_op(iterator._iterator_resource)
return init_op, get_next, save_op, restore_op
# Saving and restoring in different sessions.
start = 2
stop = 10
break_point = 5
with ops.Graph().as_default() as g:
init_op, get_next, save_op, _ = _build_graph(start, stop)
with self.session(graph=g) as sess:
self.evaluate(variables.global_variables_initializer())
self.evaluate(init_op)
for i in range(start, break_point):
self.assertEqual(i, self.evaluate(get_next))
self.evaluate(save_op)
with ops.Graph().as_default() as g:
init_op, get_next, _, restore_op = _build_graph(start, stop)
with self.session(graph=g) as sess:
self.evaluate(init_op)
self.evaluate(restore_op)
for i in range(break_point, stop):
self.assertEqual(i, self.evaluate(get_next))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next)
# Saving and restoring in same session.
with ops.Graph().as_default() as g:
init_op, get_next, save_op, restore_op = _build_graph(start, stop)
with self.session(graph=g) as sess:
self.evaluate(variables.global_variables_initializer())
self.evaluate(init_op)
for i in range(start, break_point):
self.assertEqual(i, self.evaluate(get_next))
self.evaluate(save_op)
self.evaluate(restore_op)
for i in range(break_point, stop):
self.assertEqual(i, self.evaluate(get_next))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next)
def _build_range_dataset(self, start, stop):
return dataset_ops.Dataset.range(start, stop)
def testRangeCore(self):
start = 2
stop = 10
stop_1 = 8
self.run_core_tests(lambda: self._build_range_dataset(start, stop),
lambda: self._build_range_dataset(start, stop_1),
stop - start)
if __name__ == "__main__":
test.main()
| apache-2.0 |
pczerkas/aodh | aodh/tests/evaluator/test_gnocchi.py | 1 | 20984 | #
# Copyright 2015 eNovance
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import unittest
import uuid
import mock
from oslo_serialization import jsonutils
from oslo_utils import timeutils
from oslotest import mockpatch
import pytz
import six
from six import moves
from aodh.evaluator import gnocchi
from aodh.storage import models
from aodh.tests import constants
from aodh.tests.evaluator import base
class FakeResponse(object):
def __init__(self, code, data):
if code == 200:
self.values = [d[2] for d in data]
else:
self.values = []
self.text = jsonutils.dumps(data)
self.status_code = code
class TestGnocchiThresholdEvaluate(base.TestEvaluatorBase):
EVALUATOR = gnocchi.GnocchiThresholdEvaluator
def setUp(self):
super(TestGnocchiThresholdEvaluate, self).setUp()
self.requests = self.useFixture(mockpatch.Patch(
'aodh.evaluator.gnocchi.requests')).mock
def prepare_alarms(self):
self.alarms = [
models.Alarm(name='instance_running_hot',
description='instance_running_hot',
type='gnocchi_resources_threshold',
enabled=True,
user_id='foobar',
project_id='snafu',
alarm_id=str(uuid.uuid4()),
state='insufficient data',
state_timestamp=constants.MIN_DATETIME,
timestamp=constants.MIN_DATETIME,
insufficient_data_actions=[],
ok_actions=[],
alarm_actions=[],
repeat_actions=False,
time_constraints=[],
rule=dict(
comparison_operator='gt',
threshold=80.0,
evaluation_periods=5,
aggregation_method='mean',
granularity=60,
metric='cpu_util',
resource_type='instance',
resource_id='my_instance')
),
models.Alarm(name='group_running_idle',
description='group_running_idle',
type='gnocchi_aggregation_by_metrics_threshold',
enabled=True,
user_id='foobar',
project_id='snafu',
state='insufficient data',
state_timestamp=constants.MIN_DATETIME,
timestamp=constants.MIN_DATETIME,
insufficient_data_actions=[],
ok_actions=[],
alarm_actions=[],
repeat_actions=False,
alarm_id=str(uuid.uuid4()),
time_constraints=[],
rule=dict(
comparison_operator='le',
threshold=10.0,
evaluation_periods=4,
aggregation_method='max',
granularity=300,
metrics=['0bb1604d-1193-4c0a-b4b8-74b170e35e83',
'9ddc209f-42f8-41e1-b8f1-8804f59c4053']),
),
models.Alarm(name='instance_not_running',
description='instance_running_hot',
type='gnocchi_aggregation_by_resources_threshold',
enabled=True,
user_id='foobar',
project_id='snafu',
alarm_id=str(uuid.uuid4()),
state='insufficient data',
state_timestamp=constants.MIN_DATETIME,
timestamp=constants.MIN_DATETIME,
insufficient_data_actions=[],
ok_actions=[],
alarm_actions=[],
repeat_actions=False,
time_constraints=[],
rule=dict(
comparison_operator='gt',
threshold=80.0,
evaluation_periods=6,
aggregation_method='mean',
granularity=50,
metric='cpu_util',
resource_type='instance',
query='{"=": {"server_group": '
'"my_autoscaling_group"}}')
),
]
@staticmethod
def _get_stats(granularity, values):
now = timeutils.utcnow_ts()
return FakeResponse(
200, [[six.text_type(now - len(values) * granularity),
granularity, value] for value in values])
@staticmethod
def _reason_data(disposition, count, most_recent):
return {'type': 'threshold', 'disposition': disposition,
'count': count, 'most_recent': most_recent}
def _set_all_rules(self, field, value):
for alarm in self.alarms:
alarm.rule[field] = value
def test_retry_transient_api_failure(self):
means = self._get_stats(60, [self.alarms[0].rule['threshold'] - v
for v in moves.xrange(5)])
maxs = self._get_stats(300, [self.alarms[1].rule['threshold'] + v
for v in moves.xrange(4)])
avgs2 = self._get_stats(50, [self.alarms[2].rule['threshold'] - v
for v in moves.xrange(6)])
self.requests.get.side_effect = [Exception('boom'),
FakeResponse(500, "error"),
means,
maxs]
self.requests.post.side_effect = [FakeResponse(500, "error"), avgs2]
self._evaluate_all_alarms()
self._assert_all_alarms('insufficient data')
self._evaluate_all_alarms()
self._assert_all_alarms('ok')
def test_simple_insufficient(self):
self._set_all_alarms('ok')
self.requests.get.return_value = FakeResponse(200, [])
self.requests.post.return_value = FakeResponse(200, [])
self._evaluate_all_alarms()
self._assert_all_alarms('insufficient data')
expected = [mock.call(alarm) for alarm in self.alarms]
update_calls = self.storage_conn.update_alarm.call_args_list
self.assertEqual(expected, update_calls)
expected = [mock.call(
alarm,
'ok',
('%d datapoints are unknown'
% alarm.rule['evaluation_periods']),
self._reason_data('unknown',
alarm.rule['evaluation_periods'],
None))
for alarm in self.alarms]
self.assertEqual(expected, self.notifier.notify.call_args_list)
@mock.patch.object(timeutils, 'utcnow')
def test_simple_alarm_trip(self, utcnow):
utcnow.return_value = datetime.datetime(2015, 1, 26, 12, 57, 0, 0)
self._set_all_alarms('ok')
avgs = self._get_stats(60, [self.alarms[0].rule['threshold'] + v
for v in moves.xrange(1, 6)])
maxs = self._get_stats(300, [self.alarms[1].rule['threshold'] - v
for v in moves.xrange(4)])
avgs2 = self._get_stats(50, [self.alarms[2].rule['threshold'] + v
for v in moves.xrange(1, 7)])
self.requests.get.side_effect = [avgs, maxs]
self.requests.post.side_effect = [avgs2]
self._evaluate_all_alarms()
expected_headers = {'X-Auth-Token': 'fake_token',
'Content-Type': 'application/json'}
start_alarm1 = "2015-01-26T12:51:00"
start_alarm2 = "2015-01-26T12:32:00"
start_alarm3 = "2015-01-26T12:51:10"
end = "2015-01-26T12:57:00"
self.assertEqual([
mock.call(url='http://localhost:8041/v1/resource/instance/'
'my_instance/metric/cpu_util/measures',
params={'aggregation': 'mean',
'start': start_alarm1, 'end': end},
headers=expected_headers),
mock.call(url='http://localhost:8041/v1/aggregation/metric',
params={'aggregation': 'max',
'start': start_alarm2, 'end': end,
'metric[]': [
'0bb1604d-1193-4c0a-b4b8-74b170e35e83',
'9ddc209f-42f8-41e1-b8f1-8804f59c4053']},
headers=expected_headers)],
self.requests.get.mock_calls)
self.assertEqual([
mock.call(url='http://localhost:8041/v1/aggregation/resource/'
'instance/metric/cpu_util',
params={'aggregation': 'mean',
'start': start_alarm3, 'end': end,
'needed_overlap': 0},
data='{"=": {"server_group": "my_autoscaling_group"}}',
headers=expected_headers),
],
self.requests.post.mock_calls)
self._assert_all_alarms('alarm')
expected = [mock.call(alarm) for alarm in self.alarms]
update_calls = self.storage_conn.update_alarm.call_args_list
self.assertEqual(expected, update_calls)
reasons = ['Transition to alarm due to 5 samples outside'
' threshold, most recent: %s' % avgs.values[-1],
'Transition to alarm due to 4 samples outside'
' threshold, most recent: %s' % maxs.values[-1],
'Transition to alarm due to 6 samples outside'
' threshold, most recent: %s' % avgs2.values[-1],
]
reason_datas = [self._reason_data('outside', 5, avgs.values[-1]),
self._reason_data('outside', 4, maxs.values[-1]),
self._reason_data('outside', 6, avgs2.values[-1])]
expected = [mock.call(alarm, 'ok', reason, reason_data)
for alarm, reason, reason_data
in zip(self.alarms, reasons, reason_datas)]
self.assertEqual(expected, self.notifier.notify.call_args_list)
def test_simple_alarm_clear(self):
self._set_all_alarms('alarm')
avgs = self._get_stats(60, [self.alarms[0].rule['threshold'] - v
for v in moves.xrange(5)])
maxs = self._get_stats(300, [self.alarms[1].rule['threshold'] + v
for v in moves.xrange(1, 5)])
avgs2 = self._get_stats(50, [self.alarms[2].rule['threshold'] - v
for v in moves.xrange(6)])
self.requests.post.side_effect = [avgs2]
self.requests.get.side_effect = [avgs, maxs]
self._evaluate_all_alarms()
self._assert_all_alarms('ok')
expected = [mock.call(alarm) for alarm in self.alarms]
update_calls = self.storage_conn.update_alarm.call_args_list
self.assertEqual(expected, update_calls)
reasons = ['Transition to ok due to 5 samples inside'
' threshold, most recent: %s' % avgs.values[-1],
'Transition to ok due to 4 samples inside'
' threshold, most recent: %s' % maxs.values[-1],
'Transition to ok due to 6 samples inside'
' threshold, most recent: %s' % avgs2.values[-1]]
reason_datas = [self._reason_data('inside', 5, avgs.values[-1]),
self._reason_data('inside', 4, maxs.values[-1]),
self._reason_data('inside', 6, avgs2.values[-1])]
expected = [mock.call(alarm, 'alarm', reason, reason_data)
for alarm, reason, reason_data
in zip(self.alarms, reasons, reason_datas)]
self.assertEqual(expected, self.notifier.notify.call_args_list)
def test_equivocal_from_known_state(self):
self._set_all_alarms('ok')
avgs = self._get_stats(60, [self.alarms[0].rule['threshold'] + v
for v in moves.xrange(5)])
maxs = self._get_stats(300, [self.alarms[1].rule['threshold'] - v
for v in moves.xrange(-1, 3)])
avgs2 = self._get_stats(50, [self.alarms[2].rule['threshold'] + v
for v in moves.xrange(6)])
self.requests.post.side_effect = [avgs2]
self.requests.get.side_effect = [avgs, maxs]
self._evaluate_all_alarms()
self._assert_all_alarms('ok')
self.assertEqual(
[],
self.storage_conn.update_alarm.call_args_list)
self.assertEqual([], self.notifier.notify.call_args_list)
def test_equivocal_from_known_state_and_repeat_actions(self):
self._set_all_alarms('ok')
self.alarms[1].repeat_actions = True
avgs = self._get_stats(60, [self.alarms[0].rule['threshold'] + v
for v in moves.xrange(5)])
maxs = self._get_stats(300, [self.alarms[1].rule['threshold'] - v
for v in moves.xrange(-1, 3)])
avgs2 = self._get_stats(50, [self.alarms[2].rule['threshold'] + v
for v in moves.xrange(6)])
self.requests.post.side_effect = [avgs2]
self.requests.get.side_effect = [avgs, maxs]
self._evaluate_all_alarms()
self._assert_all_alarms('ok')
self.assertEqual([], self.storage_conn.update_alarm.call_args_list)
reason = ('Remaining as ok due to 4 samples inside'
' threshold, most recent: 8.0')
reason_datas = self._reason_data('inside', 4, 8.0)
expected = [mock.call(self.alarms[1], 'ok', reason, reason_datas)]
self.assertEqual(expected, self.notifier.notify.call_args_list)
def test_unequivocal_from_known_state_and_repeat_actions(self):
self._set_all_alarms('alarm')
self.alarms[1].repeat_actions = True
avgs = self._get_stats(60, [self.alarms[0].rule['threshold'] + v
for v in moves.xrange(1, 6)])
maxs = self._get_stats(300, [self.alarms[1].rule['threshold'] - v
for v in moves.xrange(4)])
avgs2 = self._get_stats(50, [self.alarms[2].rule['threshold'] + v
for v in moves.xrange(6)])
self.requests.post.side_effect = [avgs2]
self.requests.get.side_effect = [avgs, maxs]
self._evaluate_all_alarms()
self._assert_all_alarms('alarm')
self.assertEqual([], self.storage_conn.update_alarm.call_args_list)
reason = ('Remaining as alarm due to 4 samples outside'
' threshold, most recent: 7.0')
reason_datas = self._reason_data('outside', 4, 7.0)
expected = [mock.call(self.alarms[1], 'alarm',
reason, reason_datas)]
self.assertEqual(expected, self.notifier.notify.call_args_list)
def test_state_change_and_repeat_actions(self):
self._set_all_alarms('ok')
self.alarms[0].repeat_actions = True
self.alarms[1].repeat_actions = True
avgs = self._get_stats(60, [self.alarms[0].rule['threshold'] + v
for v in moves.xrange(1, 6)])
maxs = self._get_stats(300, [self.alarms[1].rule['threshold'] - v
for v in moves.xrange(4)])
avgs2 = self._get_stats(50, [self.alarms[2].rule['threshold'] + v
for v in moves.xrange(1, 7)])
self.requests.post.side_effect = [avgs2]
self.requests.get.side_effect = [avgs, maxs]
self._evaluate_all_alarms()
self._assert_all_alarms('alarm')
expected = [mock.call(alarm) for alarm in self.alarms]
update_calls = self.storage_conn.update_alarm.call_args_list
self.assertEqual(expected, update_calls)
reasons = ['Transition to alarm due to 5 samples outside'
' threshold, most recent: %s' % avgs.values[-1],
'Transition to alarm due to 4 samples outside'
' threshold, most recent: %s' % maxs.values[-1],
'Transition to alarm due to 6 samples outside'
' threshold, most recent: %s' % avgs2.values[-1]]
reason_datas = [self._reason_data('outside', 5, avgs.values[-1]),
self._reason_data('outside', 4, maxs.values[-1]),
self._reason_data('outside', 6, avgs2.values[-1])]
expected = [mock.call(alarm, 'ok', reason, reason_data)
for alarm, reason, reason_data
in zip(self.alarms, reasons, reason_datas)]
self.assertEqual(expected, self.notifier.notify.call_args_list)
def test_equivocal_from_unknown(self):
self._set_all_alarms('insufficient data')
avgs = self._get_stats(60, [self.alarms[0].rule['threshold'] + v
for v in moves.xrange(1, 6)])
maxs = self._get_stats(300, [self.alarms[1].rule['threshold'] - v
for v in moves.xrange(4)])
avgs2 = self._get_stats(50, [self.alarms[2].rule['threshold'] + v
for v in moves.xrange(1, 7)])
self.requests.post.side_effect = [avgs2]
self.requests.get.side_effect = [avgs, maxs]
self._evaluate_all_alarms()
self._assert_all_alarms('alarm')
expected = [mock.call(alarm) for alarm in self.alarms]
update_calls = self.storage_conn.update_alarm.call_args_list
self.assertEqual(expected, update_calls)
reasons = ['Transition to alarm due to 5 samples outside'
' threshold, most recent: %s' % avgs.values[-1],
'Transition to alarm due to 4 samples outside'
' threshold, most recent: %s' % maxs.values[-1],
'Transition to alarm due to 6 samples outside'
' threshold, most recent: %s' % avgs2.values[-1]]
reason_datas = [self._reason_data('outside', 5, avgs.values[-1]),
self._reason_data('outside', 4, maxs.values[-1]),
self._reason_data('outside', 6, avgs2.values[-1])]
expected = [mock.call(alarm, 'insufficient data',
reason, reason_data)
for alarm, reason, reason_data
in zip(self.alarms, reasons, reason_datas)]
self.assertEqual(expected, self.notifier.notify.call_args_list)
@unittest.skipIf(six.PY3,
"the aodh base class is not python 3 ready")
@mock.patch.object(timeutils, 'utcnow')
def test_no_state_change_outside_time_constraint(self, mock_utcnow):
self._set_all_alarms('ok')
self.alarms[0].time_constraints = [
{'name': 'test',
'description': 'test',
'start': '0 11 * * *', # daily at 11:00
'duration': 10800, # 3 hours
'timezone': 'Europe/Ljubljana'}
]
self.alarms[1].time_constraints = self.alarms[0].time_constraints
self.alarms[2].time_constraints = self.alarms[0].time_constraints
dt = datetime.datetime(2014, 1, 1, 15, 0, 0,
tzinfo=pytz.timezone('Europe/Ljubljana'))
mock_utcnow.return_value = dt.astimezone(pytz.UTC)
self.requests.get.return_value = []
self._evaluate_all_alarms()
self._assert_all_alarms('ok')
update_calls = self.storage_conn.update_alarm.call_args_list
self.assertEqual([], update_calls,
"Alarm should not change state if the current "
" time is outside its time constraint.")
self.assertEqual([], self.notifier.notify.call_args_list)
| apache-2.0 |
erocs/Minecraft-Region-Fixer | interactive.py | 1 | 23796 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Region Fixer.
# Fix your region files with a backup copy of your Minecraft world.
# Copyright (C) 2011 Alejandro Aguilera (Fenixin)
# https://github.com/Fenixin/Minecraft-Region-Fixer
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# TODO needs big update!
import world
from cmd import Cmd
from scan import scan_world, scan_regionset
class interactive_loop(Cmd):
def __init__(self, world_list, regionset, options, backup_worlds):
Cmd.__init__(self)
self.world_list = world_list
self.regionset = regionset
self.world_names = [str(i.name) for i in self.world_list]
# if there's only one world use it
if len(self.world_list) == 1 and len(self.regionset) == 0:
self.current = world_list[0]
elif len(self.world_list) == 0 and len(self.regionset) > 0:
self.current = self.regionset
else:
self.current = None
self.options = options
self.backup_worlds = backup_worlds
self.prompt = "#-> "
self.intro = "Minecraft Region-Fixer interactive mode.\n(Use tab to autocomplete. Autocomplete doens't work on Windows. Type help for a list of commands.)\n"
# other region-fixer stuff
# possible args for chunks stuff
possible_args = ""
first = True
for i in world.CHUNK_PROBLEMS_ARGS.values() + ['all']:
if not first:
possible_args += ", "
possible_args += i
first = False
self.possible_chunk_args_text = possible_args
# possible args for region stuff
possible_args = ""
first = True
for i in world.REGION_PROBLEMS_ARGS.values() + ['all']:
if not first:
possible_args += ", "
possible_args += i
first = False
self.possible_region_args_text = possible_args
# do
def do_set(self,arg):
""" Command to change some options and variables in interactive
mode """
args = arg.split()
if len(args) > 2:
print "Error: too many parameters."
elif len(args) == 0:
print "Write \'help set\' to see a list of all possible variables"
else:
if args[0] == "entity-limit":
if len(args) == 1:
print "entity-limit = {0}".format(self.options.entity_limit)
else:
try:
if int(args[1]) >= 0:
self.options.entity_limit = int(args[1])
print "entity-limit = {0}".format(args[1])
print "Updating chunk status..."
self.current.rescan_entities(self.options)
else:
print "Invalid value. Valid values are positive integers and zero"
except ValueError:
print "Invalid value. Valid values are positive integers and zero"
elif args[0] == "workload":
if len(args) == 1:
if self.current:
print "Current workload:\n{0}\n".format(self.current.__str__())
print "List of possible worlds and region-sets (determined by the command used to run region-fixer):"
number = 1
for w in self.world_list:
print " ### world{0} ###".format(number)
number += 1
# add a tab and print
for i in w.__str__().split("\n"): print "\t" + i
print
print " ### regionset ###"
for i in self.regionset.__str__().split("\n"): print "\t" + i
print "\n(Use \"set workload world1\" or name_of_the_world or regionset to choose one)"
else:
a = args[1]
if len(a) == 6 and a[:5] == "world" and int(a[-1]) >= 1 :
# get the number and choos the correct world from the list
number = int(args[1][-1]) - 1
try:
self.current = self.world_list[number]
print "workload = {0}".format(self.current.world_path)
except IndexError:
print "This world is not in the list!"
elif a in self.world_names:
for w in self.world_list:
if w.name == args[1]:
self.current = w
print "workload = {0}".format(self.current.world_path)
break
else:
print "This world name is not on the list!"
elif args[1] == "regionset":
if len(self.regionset):
self.current = self.regionset
print "workload = set of region files"
else:
print "The region set is empty!"
else:
print "Invalid world number, world name or regionset."
elif args[0] == "processes":
if len(args) == 1:
print "processes = {0}".format(self.options.processes)
else:
try:
if int(args[1]) > 0:
self.options.processes = int(args[1])
print "processes = {0}".format(args[1])
else:
print "Invalid value. Valid values are positive integers."
except ValueError:
print "Invalid value. Valid values are positive integers."
elif args[0] == "verbose":
if len(args) == 1:
print "verbose = {0}".format(str(self.options.verbose))
else:
if args[1] == "True":
self.options.verbose = True
print "verbose = {0}".format(args[1])
elif args[1] == "False":
self.options.verbose = False
print "verbose = {0}".format(args[1])
else:
print "Invalid value. Valid values are True and False."
else:
print "Invalid argument! Write \'help set\' to see a list of valid variables."
def do_summary(self, arg):
""" Prints a summary of all the problems found in the region
files. """
if len(arg) == 0:
if self.current:
if self.current.scanned:
text = self.current.summary()
if text: print text
else: print "No problems found!"
else:
print "The world hasn't be scanned (or it needs a rescan). Use \'scan\' to scan it."
else:
print "No world/region-set is set! Use \'set workload\' to set a world/regionset to work with."
else:
print "This command doesn't use any arguments."
def do_current_workload(self, arg):
""" Prints the info of the current workload """
if len(arg) == 0:
if self.current: print self.current
else: print "No world/region-set is set! Use \'set workload\' to set a world/regionset to work with."
else:
print "This command doesn't use any arguments."
def do_scan(self, arg):
# TODO: what about scanning while deleting entities as done in non-interactive mode?
# this would need an option to choose which of the two methods use
""" Scans the current workload. """
if len(arg.split()) > 0:
print "Error: too many parameters."
else:
if self.current:
if isinstance(self.current, world.World):
self.current = world.World(self.current.path)
scan_world(self.current, self.options)
elif isinstance(self.current, world.RegionSet):
print "\n{0:-^60}".format(' Scanning region files ')
scan_regionset(self.current, self.options)
else:
print "No world set! Use \'set workload\'"
def do_count_chunks(self, arg):
""" Counts the number of chunks with the given problem and
prints the result """
if self.current and self.current.scanned:
if len(arg.split()) == 0:
print "Possible counters are: {0}".format(self.possible_chunk_args_text)
elif len(arg.split()) > 1:
print "Error: too many parameters."
else:
if arg in world.CHUNK_PROBLEMS_ARGS.values() or arg == 'all':
total = self.current.count_chunks(None)
for problem, status_text, a in world.CHUNK_PROBLEMS_ITERATOR:
if arg == 'all' or arg == a:
n = self.current.count_chunks(problem)
print "Chunks with status \'{0}\': {1}".format(status_text, n)
print "Total chunks: {0}".format(total)
else:
print "Unknown counter."
else:
print "The world hasn't be scanned (or it needs a rescan). Use \'scan\' to scan it."
def do_count_regions(self, arg):
""" Counts the number of regions with the given problem and
prints the result """
if self.current and self.current.scanned:
if len(arg.split()) == 0:
print "Possible counters are: {0}".format(self.possible_region_args_text)
elif len(arg.split()) > 1:
print "Error: too many parameters."
else:
if arg in world.REGION_PROBLEMS_ARGS.values() or arg == 'all':
total = self.current.count_regions(None)
for problem, status_text, a in world.REGION_PROBLEMS_ITERATOR:
if arg == 'all' or arg == a:
n = self.current.count_regions(problem)
print "Regions with status \'{0}\': {1}".format(status_text, n)
print "Total regions: {0}".format(total)
else:
print "Unknown counter."
else:
print "The world hasn't be scanned (or it needs a rescan). Use \'scan\' to scan it."
def do_count_all(self, arg):
""" Print all the counters for chunks and regions. """
if self.current and self.current.scanned:
if len(arg.split()) > 0:
print "This command doesn't requiere any arguments"
else:
print "{0:#^60}".format("Chunk problems:")
self.do_count_chunks('all')
print "\n"
print "{0:#^60}".format("Region problems:")
self.do_count_regions('all')
else:
print "The world hasn't be scanned (or it needs a rescan). Use \'scan\' to scan it."
def do_remove_entities(self, arg):
if self.current and self.current.scanned:
if len(arg.split()) > 0:
print "Error: too many parameters."
else:
print "WARNING: This will delete all the entities in the chunks that have more entities than entity-limit, make sure you know what entities are!.\nAre you sure you want to continue? (yes/no):"
answer = raw_input()
if answer == 'yes':
counter = self.current.remove_entities()
print "Deleted {0} entities.".format(counter)
if counter:
self.current.scanned = False
self.current.rescan_entities(self.options)
elif answer == 'no':
print "Ok!"
else: print "Invalid answer, use \'yes\' or \'no\' the next time!."
else:
print "The world hasn't be scanned (or it needs a rescan). Use \'scan\' to scan it."
def do_remove_chunks(self, arg):
if self.current and self.current.scanned:
if len(arg.split()) == 0:
print "Possible arguments are: {0}".format(self.possible_chunk_args_text)
elif len(arg.split()) > 1:
print "Error: too many parameters."
else:
if arg in world.CHUNK_PROBLEMS_ARGS.values() or arg == 'all':
for problem, status_text, a in world.CHUNK_PROBLEMS_ITERATOR:
if arg == 'all' or arg == a:
n = self.current.remove_problematic_chunks(problem)
if n:
self.current.scanned = False
print "Removed {0} chunks with status \'{1}\'.\n".format(n, status_text)
else:
print "Unknown argument."
else:
print "The world hasn't be scanned (or it needs a rescan). Use \'scan\' to scan it."
def do_replace_chunks(self, arg):
if self.current and self.current.scanned:
if len(arg.split()) == 0:
print "Possible arguments are: {0}".format(self.possible_chunk_args_text)
elif len(arg.split()) > 1:
print "Error: too many parameters."
else:
if arg in world.CHUNK_PROBLEMS_ARGS.values() or arg == 'all':
for problem, status_text, a in world.CHUNK_PROBLEMS_ITERATOR:
if arg == 'all' or arg == a:
n = self.current.replace_problematic_chunks(self.backup_worlds, problem, self.options)
if n:
self.current.scanned = False
print "\nReplaced {0} chunks with status \'{1}\'.".format(n, status_text)
else:
print "Unknown argument."
else:
print "The world hasn't be scanned (or it needs a rescan). Use \'scan\' to scan it."
def do_replace_regions(self, arg):
if self.current and self.current.scanned:
if len(arg.split()) == 0:
print "Possible arguments are: {0}".format(self.possible_region_args_text)
elif len(arg.split()) > 1:
print "Error: too many parameters."
else:
if arg in world.REGION_PROBLEMS_ARGS.values() or arg == 'all':
for problem, status_text, a in world.REGION_PROBLEMS_ITERATOR:
if arg == 'all' or arg == a:
n = self.current.replace_problematic_regions(self.backup_worlds, problem, self.options)
if n:
self.current.scanned = False
print "\nReplaced {0} regions with status \'{1}\'.".format(n, status_text)
else:
print "Unknown argument."
else:
print "The world hasn't be scanned (or it needs a rescan). Use \'scan\' to scan it."
def do_remove_regions(self, arg):
if self.current and self.current.scanned:
if len(arg.split()) == 0:
print "Possible arguments are: {0}".format(self.possible_region_args_text)
elif len(arg.split()) > 1:
print "Error: too many parameters."
else:
if arg in world.REGION_PROBLEMS_ARGS.values() or arg == 'all':
for problem, status_text, a in world.REGION_PROBLEMS_ITERATOR:
if arg == 'all' or arg == a:
n = self.current.remove_problematic_regions(problem)
if n:
self.current.scanned = False
print "\nRemoved {0} regions with status \'{1}\'.".format(n, status_text)
else:
print "Unknown argument."
else:
print "The world hasn't be scanned (or it needs a rescan). Use \'scan\' to scan it."
pass
def do_quit(self, arg):
print "Quitting."
return True
def do_exit(self, arg):
print "Exiting."
return True
def do_EOF(self, arg):
print "Quitting."
return True
# complete
def complete_arg(self, text, possible_args):
l = []
for arg in possible_args:
if text in arg and arg.find(text) == 0:
l.append(arg + " ")
return l
def complete_set(self, text, line, begidx, endidx):
if "workload " in line:
# return the list of world names plus 'regionset' plus a list of world1, world2...
possible_args = tuple(self.world_names) + ('regionset',) + tuple([ 'world' + str(i+1) for i in range(len(self.world_names))])
elif 'verbose ' in line:
possible_args = ('True','False')
else:
possible_args = ('entity-limit','verbose','processes','workload')
return self.complete_arg(text, possible_args)
def complete_count_chunks(self, text, line, begidx, endidx):
possible_args = world.CHUNK_PROBLEMS_ARGS.values() + ['all']
return self.complete_arg(text, possible_args)
def complete_remove_chunks(self, text, line, begidx, endidx):
possible_args = world.CHUNK_PROBLEMS_ARGS.values() + ['all']
return self.complete_arg(text, possible_args)
def complete_replace_chunks(self, text, line, begidx, endidx):
possible_args = world.CHUNK_PROBLEMS_ARGS.values() + ['all']
return self.complete_arg(text, possible_args)
def complete_count_regions(self, text, line, begidx, endidx):
possible_args = world.REGION_PROBLEMS_ARGS.values() + ['all']
return self.complete_arg(text, possible_args)
def complete_remove_regions(self, text, line, begidx, endidx):
possible_args = world.REGION_PROBLEMS_ARGS.values() + ['all']
return self.complete_arg(text, possible_args)
def complete_replace_regions(self, text, line, begidx, endidx):
possible_args = world.REGION_PROBLEMS_ARGS.values() + ['all']
return self.complete_arg(text, possible_args)
# help
# TODO sería una buena idea poner un artículo de ayuda de como usar el programa en un caso típico.
# TODO: the help texts need a normalize
def help_set(self):
print "\nSets some variables used for the scan in interactive mode. If you run this command without an argument for a variable you can see the current state of the variable. You can set:"
print " verbose"
print "If True prints a line per scanned region file instead of showing a progress bar."
print "\n entity-limit"
print "If a chunk has more than this number of entities it will be added to the list of chunks with too many entities problem."
print "\n processes"
print "Number of cores used while scanning the world."
print "\n workload"
print "If you input a few worlds you can choose wich one will be scanned using this command.\n"
def help_current_workload(self):
print "\nPrints information of the current region-set/world. This will be the region-set/world to scan and fix.\n"
def help_scan(self):
print "\nScans the current world set or the region set.\n"
def help_count_chunks(self):
print "\n Prints out the number of chunks with the given status. For example"
print "\'count corrupted\' prints the number of corrupted chunks in the world."
print
print "Possible status are: {0}\n".format(self.possible_chunk_args_text)
def help_remove_entities(self):
print "\nRemove all the entities in chunks that have more than entity-limit entities."
print
print "This chunks are the ones with status \'too many entities\'.\n"
def help_remove_chunks(self):
print "\nRemoves bad chunks with the given problem."
print
print "Please, be careful, when used with the status too-many-entities this will"
print "REMOVE THE CHUNKS with too many entities problems, not the entities."
print "To remove only the entities see the command remove_entities."
print
print "For example \'remove_chunks corrupted\' this will remove corrupted chunks."
print
print "Possible status are: {0}\n".format(self.possible_chunk_args_text)
print
def help_replace_chunks(self):
print "\nReplaces bad chunks with the given status using the backups directories."
print
print "Exampe: \"replace_chunks corrupted\""
print
print "this will replace the corrupted chunks with the given backups."
print
print "Possible status are: {0}\n".format(self.possible_chunk_args_text)
print
print "Note: after replacing any chunks you have to rescan the world.\n"
def help_count_regions(self):
print "\n Prints out the number of regions with the given status. For example "
print "\'count_regions too-small\' prints the number of region with \'too-small\' status."
print
print "Possible status are: {0}\n".format(self.possible_region_args_text)
def help_remove_regions(self):
print "\nRemoves regions with the given status."
print
print "Example: \'remove_regions too-small\'"
print
print "this will remove the region files with status \'too-small\'."
print
print "Possible status are: {0}".format(self.possible_region_args_text)
print
print "Note: after removing any regions you have to rescan the world.\n"
def help_replace_regions(self):
print "\nReplaces regions with the given status."
print
print "Example: \"replace_regions too-small\""
print
print "this will try to replace the region files with status \'too-small\'"
print "with the given backups."
print
print "Possible status are: {0}".format(self.possible_region_args_text)
print
print "Note: after replacing any regions you have to rescan the world.\n"
def help_summary(self):
print "\nPrints a summary of all the problems found in the current workload.\n"
def help_quit(self):
print "\nQuits interactive mode, exits region-fixer. Same as \'EOF\' and \'exit\' commands.\n"
def help_EOF(self):
print "\nQuits interactive mode, exits region-fixer. Same as \'quit\' and \'exit\' commands\n"
def help_exit(self):
print "\nQuits interactive mode, exits region-fixer. Same as \'quit\' and \'EOF\' commands\n"
def help_help(self):
print "Prints help help."
| gpl-3.0 |
ging/horizon | openstack_dashboard/dashboards/project/containers/forms.py | 9 | 10165 | # Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django.core import validators
from django.utils.encoding import force_text
from django.utils.translation import pgettext_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.containers import tables
no_slash_validator = validators.RegexValidator(r'^(?u)[^/]+$',
_("Slash is not an allowed "
"character."),
code="noslash")
class CreateContainer(forms.SelfHandlingForm):
ACCESS_CHOICES = (
("private", _("Private")),
("public", _("Public")),
)
parent = forms.CharField(max_length=255,
required=False,
widget=forms.HiddenInput)
name = forms.CharField(max_length=255,
label=_("Container Name"),
validators=[no_slash_validator])
access = forms.ChoiceField(label=_("Container Access"),
choices=ACCESS_CHOICES)
def handle(self, request, data):
try:
if not data['parent']:
is_public = data["access"] == "public"
metadata = ({'is_public': is_public})
# Create a container
api.swift.swift_create_container(request,
data["name"],
metadata=metadata)
messages.success(request, _("Container created successfully."))
else:
# Create a pseudo-folder
container, slash, remainder = data['parent'].partition("/")
remainder = remainder.rstrip("/")
subfolder_name = "/".join([bit for bit
in (remainder, data['name'])
if bit])
api.swift.swift_create_subfolder(request,
container,
subfolder_name)
messages.success(request, _("Folder created successfully."))
return True
except Exception:
exceptions.handle(request, _('Unable to create container.'))
class UploadObject(forms.SelfHandlingForm):
path = forms.CharField(max_length=255,
required=False,
widget=forms.HiddenInput)
object_file = forms.FileField(label=_("File"),
required=False,
allow_empty_file=True)
name = forms.CharField(max_length=255,
label=_("Object Name"),
help_text=_("Slashes are allowed, and are treated "
"as pseudo-folders by the Object "
"Store."),
widget=forms.TextInput(
attrs={"ng-model": "name",
"not-blank": ""}
))
container_name = forms.CharField(widget=forms.HiddenInput())
def _set_object_path(self, data):
if data['path']:
object_path = "/".join([data['path'].rstrip("/"), data['name']])
else:
object_path = data['name']
return object_path
def clean(self):
data = super(UploadObject, self).clean()
if 'object_file' not in self.files:
self.files['object_file'] = None
return data
def handle(self, request, data):
object_file = self.files['object_file']
object_path = self._set_object_path(data)
try:
obj = api.swift.swift_upload_object(request,
data['container_name'],
object_path,
object_file)
msg = force_text(_("Object was successfully uploaded."))
messages.success(request, msg)
return obj
except Exception:
exceptions.handle(request, _("Unable to upload object."))
class UpdateObject(UploadObject):
def __init__(self, *args, **kwargs):
super(UpdateObject, self).__init__(*args, **kwargs)
self.fields['name'].widget = forms.TextInput(
attrs={"readonly": "readonly"})
self.fields['name'].help_text = None
def handle(self, request, data):
object_file = self.files.get('object_file')
if object_file:
object_path = self._set_object_path(data)
try:
obj = api.swift.swift_upload_object(request,
data['container_name'],
object_path,
object_file)
messages.success(
request, _("Object was successfully updated."))
return obj
except Exception:
exceptions.handle(request, _("Unable to update object."))
return False
else:
# If object file is not provided, then a POST method is needed
# to update ONLY metadata. This must be implemented when
# object metadata can be updated from this panel.
return True
class CreatePseudoFolder(forms.SelfHandlingForm):
path = forms.CharField(max_length=255,
required=False,
widget=forms.HiddenInput)
name = forms.CharField(max_length=255,
label=_("Pseudo-folder Name"))
container_name = forms.CharField(widget=forms.HiddenInput())
def _set_pseudo_folder_path(self, data):
if data['path']:
pseudo_folder_path = "/".join([data['path'].rstrip("/"),
data['name']]) + "/"
else:
pseudo_folder_path = data['name'] + "/"
return pseudo_folder_path
def handle(self, request, data):
pseudo_folder_path = self._set_pseudo_folder_path(data)
try:
obj = api.swift.swift_create_pseudo_folder(request,
data['container_name'],
pseudo_folder_path)
messages.success(request,
_("Pseudo-folder was successfully created."))
return obj
except Exception:
exceptions.handle(request, _("Unable to create pseudo-folder."))
class CopyObject(forms.SelfHandlingForm):
new_container_name = forms.ChoiceField(label=_("Destination container"),
validators=[no_slash_validator])
path = forms.CharField(
label=pgettext_lazy("Swift pseudo folder path", u"Path"),
max_length=255, required=False)
new_object_name = forms.CharField(max_length=255,
label=_("Destination object name"),
validators=[no_slash_validator])
orig_container_name = forms.CharField(widget=forms.HiddenInput())
orig_object_name = forms.CharField(widget=forms.HiddenInput())
def __init__(self, *args, **kwargs):
containers = kwargs.pop('containers')
super(CopyObject, self).__init__(*args, **kwargs)
self.fields['new_container_name'].choices = containers
def handle(self, request, data):
index = "horizon:project:containers:index"
orig_container = data['orig_container_name']
orig_object = data['orig_object_name']
new_container = data['new_container_name']
new_object = data['new_object_name']
path = data['path']
if path and not path.endswith("/"):
path = path + "/"
new_path = "%s%s" % (path, new_object)
# Now copy the object itself.
try:
api.swift.swift_copy_object(request,
orig_container,
orig_object,
new_container,
new_path)
dest = "%s/%s" % (new_container, path)
vals = {"dest": dest.rstrip("/"),
"orig": orig_object.split("/")[-1],
"new": new_object}
messages.success(request,
_('Copied "%(orig)s" to "%(dest)s" as "%(new)s".')
% vals)
return True
except exceptions.HorizonException as exc:
messages.error(request, exc)
raise exceptions.Http302(
reverse(index, args=[tables.wrap_delimiter(orig_container)]))
except Exception:
redirect = reverse(index,
args=[tables.wrap_delimiter(orig_container)])
exceptions.handle(request,
_("Unable to copy object."),
redirect=redirect)
| apache-2.0 |
IvanGavran/scrapy | scrapy/utils/python.py | 41 | 10182 | """
This module contains essential stuff that should've come with Python itself ;)
"""
import os
import re
import inspect
import weakref
import errno
import six
from functools import partial, wraps
from scrapy.utils.decorators import deprecated
def flatten(x):
"""flatten(sequence) -> list
Returns a single, flat list which contains all elements retrieved
from the sequence and all recursively contained sub-sequences
(iterables).
Examples:
>>> [1, 2, [3,4], (5,6)]
[1, 2, [3, 4], (5, 6)]
>>> flatten([[[1,2,3], (42,None)], [4,5], [6], 7, (8,9,10)])
[1, 2, 3, 42, None, 4, 5, 6, 7, 8, 9, 10]
>>> flatten(["foo", "bar"])
['foo', 'bar']
>>> flatten(["foo", ["baz", 42], "bar"])
['foo', 'baz', 42, 'bar']
"""
return list(iflatten(x))
def iflatten(x):
"""iflatten(sequence) -> iterator
Similar to ``.flatten()``, but returns iterator instead"""
for el in x:
if is_listlike(el):
for el_ in flatten(el):
yield el_
else:
yield el
def is_listlike(x):
"""
>>> is_listlike("foo")
False
>>> is_listlike(5)
False
>>> is_listlike(b"foo")
False
>>> is_listlike([b"foo"])
True
>>> is_listlike((b"foo",))
True
>>> is_listlike({})
True
>>> is_listlike(set())
True
>>> is_listlike((x for x in range(3)))
True
>>> is_listlike(six.moves.xrange(5))
True
"""
return hasattr(x, "__iter__") and not isinstance(x, (six.text_type, bytes))
def unique(list_, key=lambda x: x):
"""efficient function to uniquify a list preserving item order"""
seen = set()
result = []
for item in list_:
seenkey = key(item)
if seenkey in seen:
continue
seen.add(seenkey)
result.append(item)
return result
@deprecated("scrapy.utils.python.to_unicode")
def str_to_unicode(text, encoding=None, errors='strict'):
""" This function is deprecated.
Please use scrapy.utils.python.to_unicode. """
return to_unicode(text, encoding, errors)
@deprecated("scrapy.utils.python.to_bytes")
def unicode_to_str(text, encoding=None, errors='strict'):
""" This function is deprecated. Please use scrapy.utils.python.to_bytes """
return to_bytes(text, encoding, errors)
def to_unicode(text, encoding=None, errors='strict'):
"""Return the unicode representation of a bytes object `text`. If `text`
is already an unicode object, return it as-is."""
if isinstance(text, six.text_type):
return text
if not isinstance(text, (bytes, six.text_type)):
raise TypeError('to_unicode must receive a bytes, str or unicode '
'object, got %s' % type(text).__name__)
if encoding is None:
encoding = 'utf-8'
return text.decode(encoding, errors)
def to_bytes(text, encoding=None, errors='strict'):
"""Return the binary representation of `text`. If `text`
is already a bytes object, return it as-is."""
if isinstance(text, bytes):
return text
if not isinstance(text, six.string_types):
raise TypeError('to_bytes must receive a unicode, str or bytes '
'object, got %s' % type(text).__name__)
if encoding is None:
encoding = 'utf-8'
return text.encode(encoding, errors)
def to_native_str(text, encoding=None, errors='strict'):
""" Return str representation of `text`
(bytes in Python 2.x and unicode in Python 3.x). """
if six.PY2:
return to_bytes(text, encoding, errors)
else:
return to_unicode(text, encoding, errors)
def re_rsearch(pattern, text, chunk_size=1024):
"""
This function does a reverse search in a text using a regular expression
given in the attribute 'pattern'.
Since the re module does not provide this functionality, we have to find for
the expression into chunks of text extracted from the end (for the sake of efficiency).
At first, a chunk of 'chunk_size' kilobytes is extracted from the end, and searched for
the pattern. If the pattern is not found, another chunk is extracted, and another
search is performed.
This process continues until a match is found, or until the whole file is read.
In case the pattern wasn't found, None is returned, otherwise it returns a tuple containing
the start position of the match, and the ending (regarding the entire text).
"""
def _chunk_iter():
offset = len(text)
while True:
offset -= (chunk_size * 1024)
if offset <= 0:
break
yield (text[offset:], offset)
yield (text, 0)
if isinstance(pattern, six.string_types):
pattern = re.compile(pattern)
for chunk, offset in _chunk_iter():
matches = [match for match in pattern.finditer(chunk)]
if matches:
start, end = matches[-1].span()
return offset + start, offset + end
return None
def memoizemethod_noargs(method):
"""Decorator to cache the result of a method (without arguments) using a
weak reference to its object
"""
cache = weakref.WeakKeyDictionary()
@wraps(method)
def new_method(self, *args, **kwargs):
if self not in cache:
cache[self] = method(self, *args, **kwargs)
return cache[self]
return new_method
_BINARYCHARS = {six.b(chr(i)) for i in range(32)} - {b"\0", b"\t", b"\n", b"\r"}
_BINARYCHARS |= {ord(ch) for ch in _BINARYCHARS}
def isbinarytext(text):
"""Return True if the given text is considered binary, or False
otherwise, by looking for binary bytes at their chars
"""
if not isinstance(text, bytes):
raise TypeError("text must be bytes, got '%s'" % type(text).__name__)
return any(c in _BINARYCHARS for c in text)
def get_func_args(func, stripself=False):
"""Return the argument name list of a callable"""
if inspect.isfunction(func):
func_args, _, _, _ = inspect.getargspec(func)
elif inspect.isclass(func):
return get_func_args(func.__init__, True)
elif inspect.ismethod(func):
return get_func_args(func.__func__, True)
elif inspect.ismethoddescriptor(func):
return []
elif isinstance(func, partial):
return [x for x in get_func_args(func.func)[len(func.args):]
if not (func.keywords and x in func.keywords)]
elif hasattr(func, '__call__'):
if inspect.isroutine(func):
return []
elif getattr(func, '__name__', None) == '__call__':
return []
else:
return get_func_args(func.__call__, True)
else:
raise TypeError('%s is not callable' % type(func))
if stripself:
func_args.pop(0)
return func_args
def get_spec(func):
"""Returns (args, kwargs) tuple for a function
>>> import re
>>> get_spec(re.match)
(['pattern', 'string'], {'flags': 0})
>>> class Test(object):
... def __call__(self, val):
... pass
... def method(self, val, flags=0):
... pass
>>> get_spec(Test)
(['self', 'val'], {})
>>> get_spec(Test.method)
(['self', 'val'], {'flags': 0})
>>> get_spec(Test().method)
(['self', 'val'], {'flags': 0})
"""
if inspect.isfunction(func) or inspect.ismethod(func):
spec = inspect.getargspec(func)
elif hasattr(func, '__call__'):
spec = inspect.getargspec(func.__call__)
else:
raise TypeError('%s is not callable' % type(func))
defaults = spec.defaults or []
firstdefault = len(spec.args) - len(defaults)
args = spec.args[:firstdefault]
kwargs = dict(zip(spec.args[firstdefault:], defaults))
return args, kwargs
def equal_attributes(obj1, obj2, attributes):
"""Compare two objects attributes"""
# not attributes given return False by default
if not attributes:
return False
for attr in attributes:
# support callables like itemgetter
if callable(attr):
if not attr(obj1) == attr(obj2):
return False
else:
# check that objects has attribute
if not hasattr(obj1, attr):
return False
if not hasattr(obj2, attr):
return False
# compare object attributes
if not getattr(obj1, attr) == getattr(obj2, attr):
return False
# all attributes equal
return True
class WeakKeyCache(object):
def __init__(self, default_factory):
self.default_factory = default_factory
self._weakdict = weakref.WeakKeyDictionary()
def __getitem__(self, key):
if key not in self._weakdict:
self._weakdict[key] = self.default_factory(key)
return self._weakdict[key]
@deprecated
def stringify_dict(dct_or_tuples, encoding='utf-8', keys_only=True):
"""Return a (new) dict with unicode keys (and values when "keys_only" is
False) of the given dict converted to strings. `dct_or_tuples` can be a
dict or a list of tuples, like any dict constructor supports.
"""
d = {}
for k, v in six.iteritems(dict(dct_or_tuples)):
k = k.encode(encoding) if isinstance(k, six.text_type) else k
if not keys_only:
v = v.encode(encoding) if isinstance(v, six.text_type) else v
d[k] = v
return d
@deprecated
def is_writable(path):
"""Return True if the given path can be written (if it exists) or created
(if it doesn't exist)
"""
if os.path.exists(path):
return os.access(path, os.W_OK)
else:
return os.access(os.path.dirname(path), os.W_OK)
@deprecated
def setattr_default(obj, name, value):
"""Set attribute value, but only if it's not already set. Similar to
setdefault() for dicts.
"""
if not hasattr(obj, name):
setattr(obj, name, value)
def retry_on_eintr(function, *args, **kw):
"""Run a function and retry it while getting EINTR errors"""
while True:
try:
return function(*args, **kw)
except IOError as e:
if e.errno != errno.EINTR:
raise
| bsd-3-clause |
kustodian/ansible | lib/ansible/parsing/ajson.py | 66 | 1276 | # Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
# Imported for backwards compat
from ansible.module_utils.common.json import AnsibleJSONEncoder
from ansible.parsing.vault import VaultLib
from ansible.parsing.yaml.objects import AnsibleVaultEncryptedUnicode
from ansible.utils.unsafe_proxy import wrap_var
class AnsibleJSONDecoder(json.JSONDecoder):
_vaults = {}
def __init__(self, *args, **kwargs):
kwargs['object_hook'] = self.object_hook
super(AnsibleJSONDecoder, self).__init__(*args, **kwargs)
@classmethod
def set_secrets(cls, secrets):
cls._vaults['default'] = VaultLib(secrets=secrets)
def object_hook(self, pairs):
for key in pairs:
value = pairs[key]
if key == '__ansible_vault':
value = AnsibleVaultEncryptedUnicode(value)
if self._vaults:
value.vault = self._vaults['default']
return value
elif key == '__ansible_unsafe':
return wrap_var(value)
return pairs
| gpl-3.0 |
EvanK/ansible | test/units/modules/network/nxos/test_nxos_bgp_af.py | 30 | 4173 | # (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat.mock import patch
from ansible.modules.network.nxos import nxos_bgp_af
from .nxos_module import TestNxosModule, load_fixture, set_module_args
class TestNxosBgpAfModule(TestNxosModule):
module = nxos_bgp_af
def setUp(self):
super(TestNxosBgpAfModule, self).setUp()
self.mock_load_config = patch('ansible.modules.network.nxos.nxos_bgp_af.load_config')
self.load_config = self.mock_load_config.start()
self.mock_get_config = patch('ansible.modules.network.nxos.nxos_bgp_af.get_config')
self.get_config = self.mock_get_config.start()
def tearDown(self):
super(TestNxosBgpAfModule, self).tearDown()
self.mock_load_config.stop()
self.mock_get_config.stop()
def load_fixtures(self, commands=None, device=''):
self.get_config.return_value = load_fixture('nxos_bgp', 'config.cfg')
self.load_config.return_value = None
def test_nxos_bgp_af(self):
set_module_args(dict(asn=65535, afi='ipv4', safi='unicast'))
self.execute_module(
changed=True, sort=False,
commands=['router bgp 65535', 'address-family ipv4 unicast']
)
def test_nxos_bgp_af_vrf(self):
set_module_args(dict(asn=65535, vrf='test', afi='ipv4', safi='unicast'))
self.execute_module(
changed=True, sort=False,
commands=['router bgp 65535', 'vrf test', 'address-family ipv4 unicast']
)
def test_nxos_bgp_af_vrf_exists(self):
set_module_args(dict(asn=65535, vrf='test2', afi='ipv4', safi='unicast'))
self.execute_module(changed=False, commands=[])
def test_nxos_bgp_af_dampening_routemap(self):
set_module_args(dict(asn=65535, afi='ipv4', safi='unicast',
dampening_routemap='route-map-a'))
self.execute_module(
changed=True,
commands=['router bgp 65535', 'address-family ipv4 unicast',
'dampening route-map route-map-a']
)
def test_nxos_bgp_af_dampening_manual(self):
set_module_args(dict(asn=65535, afi='ipv4', safi='unicast',
dampening_half_time=5, dampening_suppress_time=2000,
dampening_reuse_time=1900, dampening_max_suppress_time=10))
self.execute_module(
changed=True,
commands=['router bgp 65535', 'address-family ipv4 unicast',
'dampening 5 1900 2000 10']
)
def test_nxos_bgp_af_dampening_mix(self):
set_module_args(dict(asn=65535, afi='ipv4', safi='unicast',
dampening_routemap='route-map-a',
dampening_half_time=5, dampening_suppress_time=2000,
dampening_reuse_time=1900, dampening_max_suppress_time=10))
result = self.execute_module(failed=True)
self.assertEqual(result['msg'], 'parameters are mutually exclusive: dampening_routemap, dampening_half_time')
def test_nxos_bgp_af_client(self):
set_module_args(dict(asn=65535, afi='ipv4', safi='unicast',
client_to_client=False))
self.execute_module(
changed=True,
commands=['router bgp 65535', 'address-family ipv4 unicast',
'no client-to-client reflection']
)
| gpl-3.0 |
Pcolar/OCR-Parser | _regex/parse_OCR_file.py | 1 | 1854 | import sys
import re
import os
import csv
from datetime import datetime
from regex_library import *
'''
parse_OCR_files from Stdin
Input: (stdin) filename to be parsed
output CSV filename
# of lines to be parsed
Parse the input file for #lines
using the regex expressions, search input for Volume Number, Page, and Date
write result to CSV file when all variables found or #lines reached
'''
Parse_Limit = int(import_value("Parse_Limit"))
outfile = str(import_value("CSV_output_file"))
CSV_File = open(outfile, 'a');
csv_writer = csv.writer(CSV_File, delimiter=',', quotechar='\"')
sequence_count = 0
# start Main
filename = sys.stdin.readline() # read a filename from stdin
while filename:
infile = filename.rstrip('\n')
print "Processing: ", infile
with open(infile) as OCR_File:
# clear variables for this file
line_count = 0
double_quote = "\""
date_string = None
volume_string = None
issue_string = None
page_string = None
# parse the input file
for line in OCR_File:
line_count += 1
# search through the line for the strings
if (volume_string is None):
volume_string = find_volume(line)
# print "Volume: ", volume_string
if (issue_string is None):
issue_string = find_issue(line)
# print "issue: ", issue_string
if (date_string is None):
date_string = find_date(line)
# print "Date found: ", date_string
if (page_string is None):
page_string = find_pageno(line)
# print "Page found: ", page_string
if (line_count >= Parse_Limit):
break
sequence_count += 1
# Write output to the CSV file
csv_writer.writerow((infile, date_string, volume_string, issue_string, page_string, sequence_count))
OCR_File.close()
# get the next filename from stdin
filename = sys.stdin.readline()
CSV_File.close()
exit()
| mit |
doitwrong/daa-competition | daacompetition/data/generic_tests.py | 1 | 1145 | __author__ = 'kaloyan'
import unittest
import os
from importlib.machinery import SourceFileLoader
from daacompetition.util.parametrized_test import ParametrizedTestCase
from daacompetition.util.custom_decorators import timeout
class JudgeTest(ParametrizedTestCase):
'''trybva testovete da ima suffix za da razbera koi test e gramnal '''
solution_module = None
def setUp(self):
self.solution_module = SourceFileLoader("module.name", os.path.join(os.path.dirname(__file__),
'solutions/',
self.param + '.py')).load_module()
@timeout(0.5)
def test_something_0(self):
self.assertEqual(4, self.solution_module.solution(8))
@timeout(1)
def test_something_1(self):
self.assertEqual(2, self.solution_module.solution(4))
@timeout(1)
def test_something_2(self):
self.assertEqual(6, self.solution_module.solution(12))
@timeout(1)
def test_something_3(self):
self.assertEqual(5, self.solution_module.solution(10))
| gpl-2.0 |
seewindcn/tortoisehg | src/ext/hg-fixutf8/buildcpmap.py | 1 | 1240 |
minchar = 0
maxchar = 0xffff
cps = [ 437, # prefer ASCII
1252, 1250, 1251, 1253, 1254, 1255, 1256,
1257, 1258, 874, 932, 936, 949, 950,
1361, 869, 866, 865, 864, 863, 862,
861, 860, 857, 855, 852, 775, 737,
850, 437,
65001] # fallback on utf-8
def canencode(c, cp):
if cp == 'cp65001':
return True
try:
c.encode(cp)
return True
except UnicodeError:
return False
scps = ['cp%d' % cp for cp in cps]
chars = [unichr(i) for i in range(minchar, 1 + maxchar)]
f = open('cpmap.py', 'w')
f.write('''
####################################################
#
# Do not modify this file, edit buildcpmap.py
#
####################################################
''')
f.write("cps = %s\n" % repr(scps))
f.write("cpmap = %s\n" % dict(('cp%d' % cp, cp) for cp in cps))
f.write("charmap = [\n")
for c, lcp in ((char, [cp for cp in scps if canencode(char, cp)])
for char in chars):
f.write(" %s,\n" % repr(lcp))
f.write(''']
def reduce(s):
l = list(cps)
for c in s:
l = [cp for cp in charmap[ord(c)] if cp in l]
return (l[0], cpmap[l[0]])
''');
f.close()
| gpl-2.0 |
insiderr/insiderr-app | ios-patches/basemodules/twisted/python/_shellcomp.py | 42 | 24278 | # -*- test-case-name: twisted.python.test.test_shellcomp -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
No public APIs are provided by this module. Internal use only.
This module implements dynamic tab-completion for any command that uses
twisted.python.usage. Currently, only zsh is supported. Bash support may
be added in the future.
Maintainer: Eric P. Mangold - twisted AT teratorn DOT org
In order for zsh completion to take place the shell must be able to find an
appropriate "stub" file ("completion function") that invokes this code and
displays the results to the user.
The stub used for Twisted commands is in the file C{twisted-completion.zsh},
which is also included in the official Zsh distribution at
C{Completion/Unix/Command/_twisted}. Use this file as a basis for completion
functions for your own commands. You should only need to change the first line
to something like C{#compdef mycommand}.
The main public documentation exists in the L{twisted.python.usage.Options}
docstring, the L{twisted.python.usage.Completions} docstring, and the
Options howto.
"""
import itertools, getopt, inspect
from twisted.python import reflect, util, usage
def shellComplete(config, cmdName, words, shellCompFile):
"""
Perform shell completion.
A completion function (shell script) is generated for the requested
shell and written to C{shellCompFile}, typically C{stdout}. The result
is then eval'd by the shell to produce the desired completions.
@type config: L{twisted.python.usage.Options}
@param config: The L{twisted.python.usage.Options} instance to generate
completions for.
@type cmdName: C{str}
@param cmdName: The name of the command we're generating completions for.
In the case of zsh, this is used to print an appropriate
"#compdef $CMD" line at the top of the output. This is
not necessary for the functionality of the system, but it
helps in debugging, since the output we produce is properly
formed and may be saved in a file and used as a stand-alone
completion function.
@type words: C{list} of C{str}
@param words: The raw command-line words passed to use by the shell
stub function. argv[0] has already been stripped off.
@type shellCompFile: C{file}
@param shellCompFile: The file to write completion data to.
"""
# shellName is provided for forward-compatibility. It is not used,
# since we currently only support zsh.
shellName, position = words[-1].split(":")
position = int(position)
# zsh gives the completion position ($CURRENT) as a 1-based index,
# and argv[0] has already been stripped off, so we subtract 2 to
# get the real 0-based index.
position -= 2
cWord = words[position]
# since the user may hit TAB at any time, we may have been called with an
# incomplete command-line that would generate getopt errors if parsed
# verbatim. However, we must do *some* parsing in order to determine if
# there is a specific subcommand that we need to provide completion for.
# So, to make the command-line more sane we work backwards from the
# current completion position and strip off all words until we find one
# that "looks" like a subcommand. It may in fact be the argument to a
# normal command-line option, but that won't matter for our purposes.
while position >= 1:
if words[position - 1].startswith("-"):
position -= 1
else:
break
words = words[:position]
subCommands = getattr(config, 'subCommands', None)
if subCommands:
# OK, this command supports sub-commands, so lets see if we have been
# given one.
# If the command-line arguments are not valid then we won't be able to
# sanely detect the sub-command, so just generate completions as if no
# sub-command was found.
args = None
try:
opts, args = getopt.getopt(words,
config.shortOpt, config.longOpt)
except getopt.error:
pass
if args:
# yes, we have a subcommand. Try to find it.
for (cmd, short, parser, doc) in config.subCommands:
if args[0] == cmd or args[0] == short:
subOptions = parser()
subOptions.parent = config
gen = ZshSubcommandBuilder(subOptions, config, cmdName,
shellCompFile)
gen.write()
return
# sub-command not given, or did not match any knowns sub-command names
genSubs = True
if cWord.startswith("-"):
# optimization: if the current word being completed starts
# with a hyphen then it can't be a sub-command, so skip
# the expensive generation of the sub-command list
genSubs = False
gen = ZshBuilder(config, cmdName, shellCompFile)
gen.write(genSubs=genSubs)
else:
gen = ZshBuilder(config, cmdName, shellCompFile)
gen.write()
class SubcommandAction(usage.Completer):
def _shellCode(self, optName, shellType):
if shellType == usage._ZSH:
return '*::subcmd:->subcmd'
raise NotImplementedError("Unknown shellType %r" % (shellType,))
class ZshBuilder(object):
"""
Constructs zsh code that will complete options for a given usage.Options
instance, possibly including a list of subcommand names.
Completions for options to subcommands won't be generated because this
class will never be used if the user is completing options for a specific
subcommand. (See L{ZshSubcommandBuilder} below)
@type options: L{twisted.python.usage.Options}
@ivar options: The L{twisted.python.usage.Options} instance defined for this
command.
@type cmdName: C{str}
@ivar cmdName: The name of the command we're generating completions for.
@type file: C{file}
@ivar file: The C{file} to write the completion function to.
"""
def __init__(self, options, cmdName, file):
self.options = options
self.cmdName = cmdName
self.file = file
def write(self, genSubs=True):
"""
Generate the completion function and write it to the output file
@return: C{None}
@type genSubs: C{bool}
@param genSubs: Flag indicating whether or not completions for the list
of subcommand should be generated. Only has an effect
if the C{subCommands} attribute has been defined on the
L{twisted.python.usage.Options} instance.
"""
if genSubs and getattr(self.options, 'subCommands', None) is not None:
gen = ZshArgumentsGenerator(self.options, self.cmdName, self.file)
gen.extraActions.insert(0, SubcommandAction())
gen.write()
self.file.write('local _zsh_subcmds_array\n_zsh_subcmds_array=(\n')
for (cmd, short, parser, desc) in self.options.subCommands:
self.file.write('"%s:%s"\n' % (cmd, desc))
self.file.write(")\n\n")
self.file.write('_describe "sub-command" _zsh_subcmds_array\n')
else:
gen = ZshArgumentsGenerator(self.options, self.cmdName, self.file)
gen.write()
class ZshSubcommandBuilder(ZshBuilder):
"""
Constructs zsh code that will complete options for a given usage.Options
instance, and also for a single sub-command. This will only be used in
the case where the user is completing options for a specific subcommand.
@type subOptions: L{twisted.python.usage.Options}
@ivar subOptions: The L{twisted.python.usage.Options} instance defined for
the sub command.
"""
def __init__(self, subOptions, *args):
self.subOptions = subOptions
ZshBuilder.__init__(self, *args)
def write(self):
"""
Generate the completion function and write it to the output file
@return: C{None}
"""
gen = ZshArgumentsGenerator(self.options, self.cmdName, self.file)
gen.extraActions.insert(0, SubcommandAction())
gen.write()
gen = ZshArgumentsGenerator(self.subOptions, self.cmdName, self.file)
gen.write()
class ZshArgumentsGenerator(object):
"""
Generate a call to the zsh _arguments completion function
based on data in a usage.Options instance
@type options: L{twisted.python.usage.Options}
@ivar options: The L{twisted.python.usage.Options} instance to generate for
@type cmdName: C{str}
@ivar cmdName: The name of the command we're generating completions for.
@type file: C{file}
@ivar file: The C{file} to write the completion function to
The following non-constructor variables are populated by this class
with data gathered from the C{Options} instance passed in, and its
base classes.
@type descriptions: C{dict}
@ivar descriptions: A dict mapping long option names to alternate
descriptions. When this variable is defined, the descriptions
contained here will override those descriptions provided in the
optFlags and optParameters variables.
@type multiUse: C{list}
@ivar multiUse: An iterable containing those long option names which may
appear on the command line more than once. By default, options will
only be completed one time.
@type mutuallyExclusive: C{list} of C{tuple}
@ivar mutuallyExclusive: A sequence of sequences, with each sub-sequence
containing those long option names that are mutually exclusive. That is,
those options that cannot appear on the command line together.
@type optActions: C{dict}
@ivar optActions: A dict mapping long option names to shell "actions".
These actions define what may be completed as the argument to the
given option, and should be given as instances of
L{twisted.python.usage.Completer}.
Callables may instead be given for the values in this dict. The
callable should accept no arguments, and return a C{Completer}
instance used as the action.
@type extraActions: C{list} of C{twisted.python.usage.Completer}
@ivar extraActions: Extra arguments are those arguments typically
appearing at the end of the command-line, which are not associated
with any particular named option. That is, the arguments that are
given to the parseArgs() method of your usage.Options subclass.
"""
def __init__(self, options, cmdName, file):
self.options = options
self.cmdName = cmdName
self.file = file
self.descriptions = {}
self.multiUse = set()
self.mutuallyExclusive = []
self.optActions = {}
self.extraActions = []
for cls in reversed(inspect.getmro(options.__class__)):
data = getattr(cls, 'compData', None)
if data:
self.descriptions.update(data.descriptions)
self.optActions.update(data.optActions)
self.multiUse.update(data.multiUse)
self.mutuallyExclusive.extend(data.mutuallyExclusive)
# I don't see any sane way to aggregate extraActions, so just
# take the one at the top of the MRO (nearest the `options'
# instance).
if data.extraActions:
self.extraActions = data.extraActions
aCL = reflect.accumulateClassList
aCD = reflect.accumulateClassDict
optFlags = []
optParams = []
aCL(options.__class__, 'optFlags', optFlags)
aCL(options.__class__, 'optParameters', optParams)
for i, optList in enumerate(optFlags):
if len(optList) != 3:
optFlags[i] = util.padTo(3, optList)
for i, optList in enumerate(optParams):
if len(optList) != 5:
optParams[i] = util.padTo(5, optList)
self.optFlags = optFlags
self.optParams = optParams
paramNameToDefinition = {}
for optList in optParams:
paramNameToDefinition[optList[0]] = optList[1:]
self.paramNameToDefinition = paramNameToDefinition
flagNameToDefinition = {}
for optList in optFlags:
flagNameToDefinition[optList[0]] = optList[1:]
self.flagNameToDefinition = flagNameToDefinition
allOptionsNameToDefinition = {}
allOptionsNameToDefinition.update(paramNameToDefinition)
allOptionsNameToDefinition.update(flagNameToDefinition)
self.allOptionsNameToDefinition = allOptionsNameToDefinition
self.addAdditionalOptions()
# makes sure none of the Completions metadata references
# option names that don't exist. (great for catching typos)
self.verifyZshNames()
self.excludes = self.makeExcludesDict()
def write(self):
"""
Write the zsh completion code to the file given to __init__
@return: C{None}
"""
self.writeHeader()
self.writeExtras()
self.writeOptions()
self.writeFooter()
def writeHeader(self):
"""
This is the start of the code that calls _arguments
@return: C{None}
"""
self.file.write('#compdef %s\n\n'
'_arguments -s -A "-*" \\\n' % (self.cmdName,))
def writeOptions(self):
"""
Write out zsh code for each option in this command
@return: C{None}
"""
optNames = self.allOptionsNameToDefinition.keys()
optNames.sort()
for longname in optNames:
self.writeOpt(longname)
def writeExtras(self):
"""
Write out completion information for extra arguments appearing on the
command-line. These are extra positional arguments not associated
with a named option. That is, the stuff that gets passed to
Options.parseArgs().
@return: C{None}
@raises: ValueError: if C{Completer} with C{repeat=True} is found and
is not the last item in the C{extraActions} list.
"""
for i, action in enumerate(self.extraActions):
descr = ""
if action._descr:
descr = action._descr
# a repeatable action must be the last action in the list
if action._repeat and i != len(self.extraActions) - 1:
raise ValueError("Completer with repeat=True must be "
"last item in Options.extraActions")
self.file.write(escape(action._shellCode('', usage._ZSH)))
self.file.write(' \\\n')
def writeFooter(self):
"""
Write the last bit of code that finishes the call to _arguments
@return: C{None}
"""
self.file.write('&& return 0\n')
def verifyZshNames(self):
"""
Ensure that none of the option names given in the metadata are typoed
@return: C{None}
@raise ValueError: Raised if unknown option names have been found.
"""
def err(name):
raise ValueError("Unknown option name \"%s\" found while\n"
"examining Completions instances on %s" % (
name, self.options))
for name in itertools.chain(self.descriptions, self.optActions,
self.multiUse):
if name not in self.allOptionsNameToDefinition:
err(name)
for seq in self.mutuallyExclusive:
for name in seq:
if name not in self.allOptionsNameToDefinition:
err(name)
def excludeStr(self, longname, buildShort=False):
"""
Generate an "exclusion string" for the given option
@type longname: C{str}
@param longname: The long option name (e.g. "verbose" instead of "v")
@type buildShort: C{bool}
@param buildShort: May be True to indicate we're building an excludes
string for the short option that correspondes to the given long opt.
@return: The generated C{str}
"""
if longname in self.excludes:
exclusions = self.excludes[longname].copy()
else:
exclusions = set()
# if longname isn't a multiUse option (can't appear on the cmd line more
# than once), then we have to exclude the short option if we're
# building for the long option, and vice versa.
if longname not in self.multiUse:
if buildShort is False:
short = self.getShortOption(longname)
if short is not None:
exclusions.add(short)
else:
exclusions.add(longname)
if not exclusions:
return ''
strings = []
for optName in exclusions:
if len(optName) == 1:
# short option
strings.append("-" + optName)
else:
strings.append("--" + optName)
strings.sort() # need deterministic order for reliable unit-tests
return "(%s)" % " ".join(strings)
def makeExcludesDict(self):
"""
@return: A C{dict} that maps each option name appearing in
self.mutuallyExclusive to a list of those option names that is it
mutually exclusive with (can't appear on the cmd line with).
"""
#create a mapping of long option name -> single character name
longToShort = {}
for optList in itertools.chain(self.optParams, self.optFlags):
if optList[1] != None:
longToShort[optList[0]] = optList[1]
excludes = {}
for lst in self.mutuallyExclusive:
for i, longname in enumerate(lst):
tmp = set(lst[:i] + lst[i+1:])
for name in tmp.copy():
if name in longToShort:
tmp.add(longToShort[name])
if longname in excludes:
excludes[longname] = excludes[longname].union(tmp)
else:
excludes[longname] = tmp
return excludes
def writeOpt(self, longname):
"""
Write out the zsh code for the given argument. This is just part of the
one big call to _arguments
@type longname: C{str}
@param longname: The long option name (e.g. "verbose" instead of "v")
@return: C{None}
"""
if longname in self.flagNameToDefinition:
# It's a flag option. Not one that takes a parameter.
longField = "--%s" % longname
else:
longField = "--%s=" % longname
short = self.getShortOption(longname)
if short != None:
shortField = "-" + short
else:
shortField = ''
descr = self.getDescription(longname)
descriptionField = descr.replace("[", "\[")
descriptionField = descriptionField.replace("]", "\]")
descriptionField = '[%s]' % descriptionField
actionField = self.getAction(longname)
if longname in self.multiUse:
multiField = '*'
else:
multiField = ''
longExclusionsField = self.excludeStr(longname)
if short:
#we have to write an extra line for the short option if we have one
shortExclusionsField = self.excludeStr(longname, buildShort=True)
self.file.write(escape('%s%s%s%s%s' % (shortExclusionsField,
multiField, shortField, descriptionField, actionField)))
self.file.write(' \\\n')
self.file.write(escape('%s%s%s%s%s' % (longExclusionsField,
multiField, longField, descriptionField, actionField)))
self.file.write(' \\\n')
def getAction(self, longname):
"""
Return a zsh "action" string for the given argument
@return: C{str}
"""
if longname in self.optActions:
if callable(self.optActions[longname]):
action = self.optActions[longname]()
else:
action = self.optActions[longname]
return action._shellCode(longname, usage._ZSH)
if longname in self.paramNameToDefinition:
return ':%s:_files' % (longname,)
return ''
def getDescription(self, longname):
"""
Return the description to be used for this argument
@return: C{str}
"""
#check if we have an alternate descr for this arg, and if so use it
if longname in self.descriptions:
return self.descriptions[longname]
#otherwise we have to get it from the optFlags or optParams
try:
descr = self.flagNameToDefinition[longname][1]
except KeyError:
try:
descr = self.paramNameToDefinition[longname][2]
except KeyError:
descr = None
if descr is not None:
return descr
# let's try to get it from the opt_foo method doc string if there is one
longMangled = longname.replace('-', '_') # this is what t.p.usage does
obj = getattr(self.options, 'opt_%s' % longMangled, None)
if obj is not None:
descr = descrFromDoc(obj)
if descr is not None:
return descr
return longname # we really ought to have a good description to use
def getShortOption(self, longname):
"""
Return the short option letter or None
@return: C{str} or C{None}
"""
optList = self.allOptionsNameToDefinition[longname]
return optList[0] or None
def addAdditionalOptions(self):
"""
Add additional options to the optFlags and optParams lists.
These will be defined by 'opt_foo' methods of the Options subclass
@return: C{None}
"""
methodsDict = {}
reflect.accumulateMethods(self.options, methodsDict, 'opt_')
methodToShort = {}
for name in methodsDict.copy():
if len(name) == 1:
methodToShort[methodsDict[name]] = name
del methodsDict[name]
for methodName, methodObj in methodsDict.items():
longname = methodName.replace('_', '-') # t.p.usage does this
# if this option is already defined by the optFlags or
# optParameters then we don't want to override that data
if longname in self.allOptionsNameToDefinition:
continue
descr = self.getDescription(longname)
short = None
if methodObj in methodToShort:
short = methodToShort[methodObj]
reqArgs = methodObj.im_func.func_code.co_argcount
if reqArgs == 2:
self.optParams.append([longname, short, None, descr])
self.paramNameToDefinition[longname] = [short, None, descr]
self.allOptionsNameToDefinition[longname] = [short, None, descr]
else:
# reqArgs must equal 1. self.options would have failed
# to instantiate if it had opt_ methods with bad signatures.
self.optFlags.append([longname, short, descr])
self.flagNameToDefinition[longname] = [short, descr]
self.allOptionsNameToDefinition[longname] = [short, None, descr]
def descrFromDoc(obj):
"""
Generate an appropriate description from docstring of the given object
"""
if obj.__doc__ is None or obj.__doc__.isspace():
return None
lines = [x.strip() for x in obj.__doc__.split("\n")
if x and not x.isspace()]
return " ".join(lines)
def escape(x):
"""
Shell escape the given string
Implementation borrowed from now-deprecated commands.mkarg() in the stdlib
"""
if '\'' not in x:
return '\'' + x + '\''
s = '"'
for c in x:
if c in '\\$"`':
s = s + '\\'
s = s + c
s = s + '"'
return s
| gpl-3.0 |
lexor90/node-compiler | node/deps/v8/tools/testrunner/local/progress.py | 5 | 13799 | # Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from functools import wraps
import json
import os
import sys
import time
from . import execution
from . import junit_output
from . import statusfile
ABS_PATH_PREFIX = os.getcwd() + os.sep
class ProgressIndicator(object):
def __init__(self):
self.runner = None
def SetRunner(self, runner):
self.runner = runner
def Starting(self):
pass
def Done(self):
pass
def HasRun(self, test, has_unexpected_output):
pass
def Heartbeat(self):
pass
def PrintFailureHeader(self, test):
if test.suite.IsNegativeTest(test):
negative_marker = '[negative] '
else:
negative_marker = ''
print "=== %(label)s %(negative)s===" % {
'label': test.GetLabel(),
'negative': negative_marker
}
def _EscapeCommand(self, test):
command = execution.GetCommand(test, self.runner.context)
parts = []
for part in command:
if ' ' in part:
# Escape spaces. We may need to escape more characters for this
# to work properly.
parts.append('"%s"' % part)
else:
parts.append(part)
return " ".join(parts)
class IndicatorNotifier(object):
"""Holds a list of progress indicators and notifies them all on events."""
def __init__(self):
self.indicators = []
def Register(self, indicator):
self.indicators.append(indicator)
# Forge all generic event-dispatching methods in IndicatorNotifier, which are
# part of the ProgressIndicator interface.
for func_name in ProgressIndicator.__dict__:
func = getattr(ProgressIndicator, func_name)
if callable(func) and not func.__name__.startswith('_'):
def wrap_functor(f):
@wraps(f)
def functor(self, *args, **kwargs):
"""Generic event dispatcher."""
for indicator in self.indicators:
getattr(indicator, f.__name__)(*args, **kwargs)
return functor
setattr(IndicatorNotifier, func_name, wrap_functor(func))
class SimpleProgressIndicator(ProgressIndicator):
"""Abstract base class for {Verbose,Dots}ProgressIndicator"""
def Starting(self):
print 'Running %i tests' % self.runner.total
def Done(self):
print
for failed in self.runner.failed:
self.PrintFailureHeader(failed)
if failed.output.stderr:
print "--- stderr ---"
print failed.output.stderr.strip()
if failed.output.stdout:
print "--- stdout ---"
print failed.output.stdout.strip()
print "Command: %s" % self._EscapeCommand(failed)
if failed.output.HasCrashed():
print "exit code: %d" % failed.output.exit_code
print "--- CRASHED ---"
if failed.output.HasTimedOut():
print "--- TIMEOUT ---"
if len(self.runner.failed) == 0:
print "==="
print "=== All tests succeeded"
print "==="
else:
print
print "==="
print "=== %i tests failed" % len(self.runner.failed)
if self.runner.crashed > 0:
print "=== %i tests CRASHED" % self.runner.crashed
print "==="
class VerboseProgressIndicator(SimpleProgressIndicator):
def HasRun(self, test, has_unexpected_output):
if has_unexpected_output:
if test.output.HasCrashed():
outcome = 'CRASH'
else:
outcome = 'FAIL'
else:
outcome = 'pass'
print 'Done running %s: %s' % (test.GetLabel(), outcome)
sys.stdout.flush()
def Heartbeat(self):
print 'Still working...'
sys.stdout.flush()
class DotsProgressIndicator(SimpleProgressIndicator):
def HasRun(self, test, has_unexpected_output):
total = self.runner.succeeded + len(self.runner.failed)
if (total > 1) and (total % 50 == 1):
sys.stdout.write('\n')
if has_unexpected_output:
if test.output.HasCrashed():
sys.stdout.write('C')
sys.stdout.flush()
elif test.output.HasTimedOut():
sys.stdout.write('T')
sys.stdout.flush()
else:
sys.stdout.write('F')
sys.stdout.flush()
else:
sys.stdout.write('.')
sys.stdout.flush()
class CompactProgressIndicator(ProgressIndicator):
"""Abstract base class for {Color,Monochrome}ProgressIndicator"""
def __init__(self, templates):
super(CompactProgressIndicator, self).__init__()
self.templates = templates
self.last_status_length = 0
self.start_time = time.time()
def Done(self):
self.PrintProgress('Done')
print "" # Line break.
def HasRun(self, test, has_unexpected_output):
self.PrintProgress(test.GetLabel())
if has_unexpected_output:
self.ClearLine(self.last_status_length)
self.PrintFailureHeader(test)
stdout = test.output.stdout.strip()
if len(stdout):
print self.templates['stdout'] % stdout
stderr = test.output.stderr.strip()
if len(stderr):
print self.templates['stderr'] % stderr
print "Command: %s" % self._EscapeCommand(test)
if test.output.HasCrashed():
print "exit code: %d" % test.output.exit_code
print "--- CRASHED ---"
if test.output.HasTimedOut():
print "--- TIMEOUT ---"
def Truncate(self, string, length):
if length and (len(string) > (length - 3)):
return string[:(length - 3)] + "..."
else:
return string
def PrintProgress(self, name):
self.ClearLine(self.last_status_length)
elapsed = time.time() - self.start_time
progress = 0 if not self.runner.total else (
((self.runner.total - self.runner.remaining) * 100) //
self.runner.total)
status = self.templates['status_line'] % {
'passed': self.runner.succeeded,
'progress': progress,
'failed': len(self.runner.failed),
'test': name,
'mins': int(elapsed) / 60,
'secs': int(elapsed) % 60
}
status = self.Truncate(status, 78)
self.last_status_length = len(status)
print status,
sys.stdout.flush()
class ColorProgressIndicator(CompactProgressIndicator):
def __init__(self):
templates = {
'status_line': ("[%(mins)02i:%(secs)02i|"
"\033[34m%%%(progress) 4d\033[0m|"
"\033[32m+%(passed) 4d\033[0m|"
"\033[31m-%(failed) 4d\033[0m]: %(test)s"),
'stdout': "\033[1m%s\033[0m",
'stderr': "\033[31m%s\033[0m",
}
super(ColorProgressIndicator, self).__init__(templates)
def ClearLine(self, last_line_length):
print "\033[1K\r",
class MonochromeProgressIndicator(CompactProgressIndicator):
def __init__(self):
templates = {
'status_line': ("[%(mins)02i:%(secs)02i|%%%(progress) 4d|"
"+%(passed) 4d|-%(failed) 4d]: %(test)s"),
'stdout': '%s',
'stderr': '%s',
}
super(MonochromeProgressIndicator, self).__init__(templates)
def ClearLine(self, last_line_length):
print ("\r" + (" " * last_line_length) + "\r"),
class JUnitTestProgressIndicator(ProgressIndicator):
def __init__(self, junitout, junittestsuite):
self.outputter = junit_output.JUnitTestOutput(junittestsuite)
if junitout:
self.outfile = open(junitout, "w")
else:
self.outfile = sys.stdout
def Done(self):
self.outputter.FinishAndWrite(self.outfile)
if self.outfile != sys.stdout:
self.outfile.close()
def HasRun(self, test, has_unexpected_output):
fail_text = ""
if has_unexpected_output:
stdout = test.output.stdout.strip()
if len(stdout):
fail_text += "stdout:\n%s\n" % stdout
stderr = test.output.stderr.strip()
if len(stderr):
fail_text += "stderr:\n%s\n" % stderr
fail_text += "Command: %s" % self._EscapeCommand(test)
if test.output.HasCrashed():
fail_text += "exit code: %d\n--- CRASHED ---" % test.output.exit_code
if test.output.HasTimedOut():
fail_text += "--- TIMEOUT ---"
self.outputter.HasRunTest(
[test.GetLabel()] + self.runner.context.mode_flags + test.flags,
test.duration,
fail_text)
class JsonTestProgressIndicator(ProgressIndicator):
def __init__(self, json_test_results, arch, mode, random_seed):
self.json_test_results = json_test_results
self.arch = arch
self.mode = mode
self.random_seed = random_seed
self.results = []
self.tests = []
def Done(self):
complete_results = []
if os.path.exists(self.json_test_results):
with open(self.json_test_results, "r") as f:
# Buildbot might start out with an empty file.
complete_results = json.loads(f.read() or "[]")
duration_mean = None
if self.tests:
# Get duration mean.
duration_mean = (
sum(t.duration for t in self.tests) / float(len(self.tests)))
# Sort tests by duration.
timed_tests = [t for t in self.tests if t.duration is not None]
timed_tests.sort(lambda a, b: cmp(b.duration, a.duration))
slowest_tests = [
{
"name": test.GetLabel(),
"flags": test.flags,
"command": self._EscapeCommand(test).replace(ABS_PATH_PREFIX, ""),
"duration": test.duration,
"marked_slow": statusfile.IsSlow(test.outcomes),
} for test in timed_tests[:20]
]
complete_results.append({
"arch": self.arch,
"mode": self.mode,
"results": self.results,
"slowest_tests": slowest_tests,
"duration_mean": duration_mean,
"test_total": len(self.tests),
})
with open(self.json_test_results, "w") as f:
f.write(json.dumps(complete_results))
def HasRun(self, test, has_unexpected_output):
# Buffer all tests for sorting the durations in the end.
self.tests.append(test)
if not has_unexpected_output:
# Omit tests that run as expected. Passing tests of reruns after failures
# will have unexpected_output to be reported here has well.
return
self.results.append({
"name": test.GetLabel(),
"flags": test.flags,
"command": self._EscapeCommand(test).replace(ABS_PATH_PREFIX, ""),
"run": test.run,
"stdout": test.output.stdout,
"stderr": test.output.stderr,
"exit_code": test.output.exit_code,
"result": test.suite.GetOutcome(test),
"expected": list(test.outcomes or ["PASS"]),
"duration": test.duration,
# TODO(machenbach): This stores only the global random seed from the
# context and not possible overrides when using random-seed stress.
"random_seed": self.random_seed,
"target_name": test.suite.shell(),
"variant": test.variant,
})
class FlakinessTestProgressIndicator(ProgressIndicator):
def __init__(self, json_test_results):
self.json_test_results = json_test_results
self.results = {}
self.summary = {
"PASS": 0,
"FAIL": 0,
"CRASH": 0,
"TIMEOUT": 0,
}
self.seconds_since_epoch = time.time()
def Done(self):
with open(self.json_test_results, "w") as f:
json.dump({
"interrupted": False,
"num_failures_by_type": self.summary,
"path_delimiter": "/",
"seconds_since_epoch": self.seconds_since_epoch,
"tests": self.results,
"version": 3,
}, f)
def HasRun(self, test, has_unexpected_output):
key = "/".join(
sorted(flag.lstrip("-")
for flag in self.runner.context.extra_flags + test.flags) +
["test", test.GetLabel()],
)
outcome = test.suite.GetOutcome(test)
assert outcome in ["PASS", "FAIL", "CRASH", "TIMEOUT"]
if test.run == 1:
# First run of this test.
expected_outcomes = ([
expected
for expected in (test.outcomes or ["PASS"])
if expected in ["PASS", "FAIL", "CRASH", "TIMEOUT"]
] or ["PASS"])
self.results[key] = {
"actual": outcome,
"expected": " ".join(expected_outcomes),
"times": [test.duration],
}
self.summary[outcome] = self.summary[outcome] + 1
else:
# This is a rerun and a previous result exists.
result = self.results[key]
result["actual"] = "%s %s" % (result["actual"], outcome)
result["times"].append(test.duration)
PROGRESS_INDICATORS = {
'verbose': VerboseProgressIndicator,
'dots': DotsProgressIndicator,
'color': ColorProgressIndicator,
'mono': MonochromeProgressIndicator
}
| mit |
Tecnativa/website | website_field_autocomplete/controllers/main.py | 6 | 1367 | # -*- coding: utf-8 -*-
# Copyright 2016 LasLabs Inc.
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
import json
from openerp import http
from openerp.http import request
from openerp.addons.website.controllers.main import Website
class Website(Website):
@http.route(
'/website/field_autocomplete/<string:model>',
type='http',
auth='public',
methods=['GET'],
website=True,
)
def _get_field_autocomplete(self, model, **kwargs):
""" Return json autocomplete data """
domain = json.loads(kwargs.get('domain', "[]"))
fields = json.loads(kwargs.get('fields', "[]"))
limit = kwargs.get('limit', None)
res = self._get_autocomplete_data(model, domain, fields, limit)
return json.dumps(res.values())
def _get_autocomplete_data(self, model, domain, fields, limit=None):
""" Gets and returns raw record data
Params:
model: Model name to query on
domain: Search domain
fields: List of fields to get
limit: Limit results to
Returns:
Dict of record dicts, keyed by ID
"""
if limit:
limit = int(limit)
res = request.env[model].search_read(
domain, fields, limit=limit
)
return {r['id']: r for r in res}
| agpl-3.0 |
alrusdi/lettuce | lettuce/plugins/subunit_output.py | 20 | 5048 | # -*- coding: utf-8 -*-
# <Lettuce - Behaviour Driven Development for python>
# Copyright (C) <2010-2012> Gabriel Falcão <gabriel@nacaolivre.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERsteps.pyCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import datetime
import sys
from StringIO import StringIO
from lettuce.terrain import before, after
from subunit.v2 import StreamResultToBytes
from subunit.iso8601 import Utc
def open_file(filename):
"""
open a subunit file
this is not a context manager because it is used asynchronously by
hooks
out of the scope of enable() because we want to patch it in our tests
"""
filename = filename or 'subunit.bin'
return open(filename, 'wb')
def close_file(file_):
"""
"""
file_.close()
def enable(filename=None):
file_ = open_file(filename)
streamresult = StreamResultToBytes(file_)
streamresult.startTestRun()
real_stdout = sys.stdout
real_stderr = sys.stderr
@before.each_scenario
def before_scenario(scenario):
# create redirects for stdout and stderr
scenario.stdout = StringIO()
scenario.stderr = StringIO()
try:
test_tags = scenario.tags
except AttributeError:
test_tags = ()
streamresult.status(test_id=get_test_id(scenario),
test_status='inprogress',
test_tags=test_tags,
timestamp=now())
@before.step_output
def capture_output(step):
# only consider steps for background
if not step.scenario:
return
sys.stdout = step.scenario.stdout
sys.stderr = step.scenario.stderr
@after.step_output
def uncapture_output(step):
sys.stdout = real_stdout
sys.stderr = real_stderr
@after.each_scenario
def after_scenario(scenario):
streamresult.status(test_id=get_test_id(scenario),
file_name='stdout',
file_bytes=scenario.stdout.getvalue().encode('utf-8'),
mime_type='text/plain; charset=utf8',
eof=True)
streamresult.status(test_id=get_test_id(scenario),
file_name='stderr',
file_bytes=scenario.stderr.getvalue().encode('utf-8'),
mime_type='text/plain; charset=utf8',
eof=True)
if scenario.passed:
streamresult.status(test_id=get_test_id(scenario),
test_status='success',
timestamp=now())
else:
streamresult.status(test_id=get_test_id(scenario),
test_status='fail',
timestamp=now())
@after.each_step
def after_step(step):
# only consider steps for background
if not step.scenario:
return
test_id = get_test_id(step.scenario)
if step.passed:
marker = u'✔'
elif not step.defined_at:
marker = u'?'
elif step.failed:
marker = u'❌'
try:
streamresult.status(test_id=test_id,
file_name='traceback',
file_bytes=step.why.traceback.encode('utf-8'),
mime_type='text/plain; charset=utf8')
except AttributeError:
pass
elif not step.ran:
marker = u' '
else:
raise AssertionError("Internal error")
steps = u'{marker} {sentence}\n'.format(
marker=marker,
sentence=step.sentence)
streamresult.status(test_id=test_id,
file_name='steps',
file_bytes=steps.encode('utf-8'),
mime_type='text/plain; charset=utf8')
@after.all
def after_all(total):
streamresult.stopTestRun()
close_file(file_)
def get_test_id(scenario):
try:
return '{feature}: {scenario}'.format(
feature=scenario.feature.name,
scenario=scenario.name)
except AttributeError:
return '{feature}: Background'.format(
feature=scenario.feature.name)
def now():
"""
A timestamp suitable for subunit
"""
return datetime.datetime.now(tz=Utc())
| gpl-3.0 |
mmasaki/trove | trove/extensions/mgmt/upgrade/models.py | 13 | 1653 | # Copyright 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from trove.common.remote import guest_client
class UpgradeMessageSender(object):
"""
This class handles the business logic for sending
an rpc message to the guest
"""
@staticmethod
def create(context, instance_id, instance_version, location,
metadata=None):
instance_id = UpgradeMessageSender._validate(instance_id, 36)
if instance_version:
instance_version = UpgradeMessageSender._validate(
instance_version, 255)
if location:
location = UpgradeMessageSender._validate(location, 255)
def _create_resources():
guest_client(context, instance_id).upgrade(
instance_version, location, metadata)
return _create_resources
@staticmethod
def _validate(s, max_length):
if s is None:
raise ValueError()
s = s.strip()
length = len(s)
if length < 1 or length > max_length:
raise ValueError()
return s
| apache-2.0 |
yoosw/printrun_golconda | printrun/power/__init__.py | 19 | 4886 | # This file is part of the Printrun suite.
#
# Printrun is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Printrun is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Printrun. If not, see <http://www.gnu.org/licenses/>.
import platform
import logging
import os
if platform.system() == "Darwin":
from .osx import inhibit_sleep_osx, deinhibit_sleep_osx
inhibit_sleep = inhibit_sleep_osx
deinhibit_sleep = deinhibit_sleep_osx
elif platform.system() == "Windows":
import ctypes
ES_CONTINUOUS = 0x80000000
ES_SYSTEM_REQUIRED = 0x00000001
def inhibit_sleep(reason):
mode = ES_CONTINUOUS | ES_SYSTEM_REQUIRED
ctypes.windll.kernel32.SetThreadExecutionState(ctypes.c_int(mode))
def deinhibit_sleep():
ctypes.windll.kernel32.SetThreadExecutionState(ctypes.c_int(ES_CONTINUOUS))
else:
try:
import dbus
inhibit_sleep_handler = None
inhibit_sleep_token = None
bus = dbus.SessionBus()
try:
# GNOME uses the right object path, try it first
service_name = "org.freedesktop.ScreenSaver"
proxy = bus.get_object(service_name,
"/org/freedesktop/ScreenSaver")
inhibit_sleep_handler = dbus.Interface(proxy, service_name)
# Do a test run
token = inhibit_sleep_handler.Inhibit("printrun", "test")
inhibit_sleep_handler.UnInhibit(token)
except dbus.DBusException:
# KDE uses /ScreenSaver object path, let's try it as well
proxy = bus.get_object(service_name,
"/ScreenSaver")
inhibit_sleep_handler = dbus.Interface(proxy, service_name)
token = inhibit_sleep_handler.Inhibit("printrun", "test")
inhibit_sleep_handler.UnInhibit(token)
def inhibit_sleep(reason):
global inhibit_sleep_handler, inhibit_sleep_token
inhibit_sleep_token = inhibit_sleep_handler.Inhibit("printrun", reason)
def deinhibit_sleep():
global inhibit_sleep_handler, inhibit_sleep_token
if inhibit_sleep_handler is None or inhibit_sleep_token is None:
return
inhibit_sleep_handler.UnInhibit(inhibit_sleep_token)
inhibit_sleep_token = None
except Exception, e:
logging.warning("Could not setup DBus for sleep inhibition: %s" % e)
def inhibit_sleep(reason):
return
def deinhibit_sleep():
return
try:
import psutil
def get_nice(nice, p = None):
if not p: p = psutil.Process(os.getpid())
if callable(p.nice):
return p.nice()
else:
return p.nice
def set_nice(nice, p = None):
if not p: p = psutil.Process(os.getpid())
if callable(p.nice):
p.nice(nice)
else:
p.nice = nice
if platform.system() != "Windows":
import resource
if hasattr(psutil, "RLIMIT_NICE"):
nice_limit, _ = resource.getrlimit(psutil.RLIMIT_NICE)
high_priority_nice = 20 - nice_limit
else:
high_priority_nice = 0
# RLIMIT_NICE is not available (probably OSX), let's probe
# Try setting niceness to -20 .. -1
p = psutil.Process(os.getpid())
orig_nice = get_nice(p)
for i in range(-20, 0):
try:
set_nice(i, p)
high_priority_nice = i
break
except psutil.AccessDenied, e:
pass
set_nice(orig_nice, p)
def set_priority():
if platform.system() == "Windows":
set_nice(psutil.HIGH_PRIORITY_CLASS)
else:
if high_priority_nice < 0:
set_nice(high_priority_nice)
def reset_priority():
if platform.system() == "Windows":
set_nice(psutil.NORMAL_PRIORITY_CLASS)
else:
if high_priority_nice < 0:
set_nice(0)
def powerset_print_start(reason):
set_priority()
inhibit_sleep(reason)
def powerset_print_stop():
reset_priority()
deinhibit_sleep()
except ImportError, e:
logging.warning("psutil unavailable, could not import power utils:" + str(e))
def powerset_print_start(reason):
pass
def powerset_print_stop():
pass
| gpl-3.0 |
xiangke/pycopia | core/pycopia/inet/packet/ipv4.py | 2 | 4623 | #!/usr/bin/python
"""
IP Packet class taken from:
# Copyright 1997, Corporation for National Research Initiatives
# written by Jeremy Hylton, jeremy@cnri.reston.va.us
"""
import sys, os
IPVERSION = 4
IP_DF = 0x4000
IP_MF = 0x2000
IP_MAXPACKET = 65535
IPTOS_LOWDELAY = 0x10
IPTOS_THROUGHPUT = 0x08
IPTOS_RELIABILITY = 0x04
IPTOS_PREC_NETCONTROL = 0xe0
IPTOS_PREC_INTERNETCONTROL = 0xc0
IPTOS_PREC_CRITIC_ECP = 0xa0
IPTOS_PREC_FLASHOVERRIDE = 0x80
IPTOS_PREC_FLASH = 0x60
IPTOS_PREC_IMMEDIATE = 0x40
IPTOS_PREC_PRIORITY = 0x20
IPTOS_PREC_ROUTINE = 0x00
IPOPT_CONTROL = 0x00
IPOPT_RESERVED1 = 0x20
IPOPT_DEBMEAS = 0x40
IPOPT_RESERVED2 = 0x60
IPOPT_EOL = 0
IPOPT_NOP = 1
IPOPT_RR = 7
IPOPT_TS = 68
IPOPT_SECURITY = 130
IPOPT_LSRR = 131
IPOPT_SATID = 136
IPOPT_SSRR = 137
IPOPT_OPTVAL = 0
IPOPT_OLEN = 1
IPOPT_OFFSET = 2
IPOPT_MINOFF = 4
IPOPT_TS_TSONLY = 0
IPOPT_TS_TSANDADDR = 1
IPOPT_TS_PRESPEC = 2
IPOPT_SECUR_UNCLASS = 0x0000
IPOPT_SECUR_CONFID = 0xf135
IPOPT_SECUR_EFTO = 0x789a
IPOPT_SECUR_MMMM = 0xbc4d
IPOPT_SECUR_RESTR = 0xaf13
IPOPT_SECUR_SECRET = 0xd788
IPOPT_SECUR_TOPSECRET = 0x6bc5
MAXTTL = 255
IPFRAGTTL = 60
IPTTLDEC = 1
IP_MSS = 576
# This Packet class stolen from CNRI archives. It might be useful.
class Packet(object):
"""An IP packet.
Doesn't handle IP options yet (but you have the option of adding
support).
"""
def __init__(self, packet=None, cksum=0):
if packet:
self.__disassemble(packet, cksum)
else:
self.v = IPVERSION
self.hl = 5 # this implement punts on options
self.tos = IPTOS_PREC_ROUTINE
self.len = 20 # begin with header length
self.id = 0
self.off = 0
self.ttl = 0
self.p = 0
self.sum = 0
self.src = os.uname()[1]
self.dst = None
self.data = ''
def __repr__(self):
begin = "<IPv%d id=%d proto=%d src=%s dst=%s datalen=%d " % \
(self.v, self.id, self.p, self.src, self.dst,
self.len - self.hl * 4)
if len(self.data) == 0:
rep = begin + "\'\'>"
elif len(self.data) < 10:
rep = begin + "%s>" % repr(self.data)
else:
rep = begin + "%s>" % repr(self.data[:10] + '...')
return rep
def assemble(self, cksum=0):
"Get a packet suitable for sending over an IP socket."
# make sure all the data is ready
self.len = self.hl * 4 + len(self.data)
self.__parse_addrs()
# create the packet
header = struct.pack('cchhhcc',
chr((self.v & 0x0f) << 4
| (self.hl & 0x0f)), # 4bits each
chr(self.tos & 0xff),
self.len,
self.id,
self.off, # what about flags?
chr(self.ttl & 0xff),
chr(self.p & 0xff))
if cksum:
self.sum = inet.cksum(header + '\000\000' + self.__src +
self.__dst)
packet = header + struct.pack('h', self.sum) \
+ self.__src + self.__dst
else:
packet = header + '\000\000' + self.__src + self.__dst
packet = packet + self.data
self.__packet = inet.iph2net(packet)
return self.__packet
def __parse_addrs(self):
try:
self.__src = dqtoi(self.src)
except ValueError:
try:
self.__src = dqtoi(socket.gethostbyname(self.src))
except ValueError:
raise ValueError, "invalid source address"
try:
self.__dst = dqtoi(self.dst)
except ValueError:
try:
self.__dst = dqtoi(socket.gethostbyname(self.dst))
except ValueError:
raise ValueError, "invalid source address"
def __unparse_addrs(self):
src = struct.unpack('cccc', self.src)
self.src = string.joinfields(map(lambda x:str(ord(x)), src), '.')
dst = struct.unpack('cccc', self.dst)
self.dst = string.joinfields(map(lambda x:str(ord(x)), dst), '.')
def __disassemble(self, raw_packet, cksum=0):
# Ok, I didn't realize this. The kernel does the checksum for
# you, even on a raw packet. Plus, the Python cksum code seems
# to be buggy. It's different than the IP version by ...01010
packet = inet.net2iph(raw_packet)
b1 = ord(packet[0])
self.v = (b1 >> 4) & 0x0f
self.hl = b1 & 0x0f
if self.v != IPVERSION:
raise ValueError, "cannot handle IPv%d packets" % self.v
hl = self.hl * 4
# verify the checksum
self.sum = struct.unpack('h', packet[10:12])[0] & 0xffff
if cksum:
our_cksum = inet.cksum(packet[:20])
if our_cksum != 0:
raise ValueError, packet
# unpack the fields
elts = struct.unpack('cchhhcc', packet[:hl-10])
# struct didn't do !<> when this was written
self.tos = ord(elts[1])
self.len = elts[2] & 0xffff
self.id = elts[3] & 0xffff
self.off = elts[4] & 0xffff
self.ttl = ord(elts[5])
self.p = ord(elts[6])
self.data = packet[hl:]
self.src = packet[hl-8:hl-4]
self.dst = packet[hl-4:hl]
self.__unparse_addrs()
| lgpl-2.1 |
SummerLW/Perf-Insight-Report | telemetry/telemetry/testing/system_stub_unittest.py | 29 | 12427 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import unittest
PERF_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
from telemetry.testing import system_stub
from telemetry.internal.testing import system_stub_test_module
class CloudStorageTest(unittest.TestCase):
SUCCESS_FILE_HASH = 'success'.zfill(40)
PUBLIC_FILE_HASH = 'public'.zfill(40)
PARTNER_FILE_HASH = 'partner'.zfill(40)
INTERNAL_FILE_HASH = 'internal'.zfill(40)
UPDATED_HASH = 'updated'.zfill(40)
def setUp(self):
self.cloud_storage = system_stub.CloudStorageModuleStub()
# Files in Cloud Storage.
self.remote_files = ['preset_public_file.wpr',
'preset_partner_file.wpr',
'preset_internal_file.wpr']
self.remote_paths = {
self.cloud_storage.PUBLIC_BUCKET:
{'preset_public_file.wpr':CloudStorageTest.PUBLIC_FILE_HASH},
self.cloud_storage.PARTNER_BUCKET:
{'preset_partner_file.wpr':CloudStorageTest.PARTNER_FILE_HASH},
self.cloud_storage.INTERNAL_BUCKET:
{'preset_internal_file.wpr':CloudStorageTest.INTERNAL_FILE_HASH}}
# Local data files and hashes.
self.data_files = [
os.path.join(os.path.sep, 'path', 'to', 'success.wpr'),
os.path.join(os.path.sep, 'path', 'to', 'wrong_hash.wpr'),
os.path.join(os.path.sep, 'path', 'to', 'preset_public_file.wpr'),
os.path.join(os.path.sep, 'path', 'to', 'preset_partner_file.wpr'),
os.path.join(os.path.sep, 'path', 'to', 'preset_internal_file.wpr')]
self.local_file_hashes = {
os.path.join(os.path.sep, 'path', 'to', 'success.wpr'):
CloudStorageTest.SUCCESS_FILE_HASH,
os.path.join(os.path.sep, 'path', 'to', 'wrong_hash.wpr'):
CloudStorageTest.SUCCESS_FILE_HASH,
os.path.join(os.path.sep, 'path', 'to', 'preset_public_file.wpr'):
CloudStorageTest.PUBLIC_FILE_HASH,
os.path.join(os.path.sep, 'path', 'to', 'preset_partner_file.wpr'):
CloudStorageTest.PARTNER_FILE_HASH,
os.path.join(os.path.sep, 'path', 'to', 'preset_internal_file.wpr'):
CloudStorageTest.INTERNAL_FILE_HASH,
}
self.cloud_storage.SetCalculatedHashesForTesting(self.local_file_hashes)
# Local hash files and their contents.
local_hash_files = {
os.path.join(os.path.sep, 'path', 'to', 'success.wpr.sha1'):
CloudStorageTest.SUCCESS_FILE_HASH,
os.path.join(os.path.sep, 'path', 'to', 'wrong_hash.wpr.sha1'):
'wronghash'.zfill(40),
os.path.join(os.path.sep, 'path', 'to', 'preset_public_file.wpr.sha1'):
CloudStorageTest.PUBLIC_FILE_HASH,
os.path.join(os.path.sep, 'path', 'to', 'preset_partner_file.wpr.sha1'):
CloudStorageTest.PARTNER_FILE_HASH,
os.path.join(os.path.sep, 'path', 'to',
'preset_internal_file.wpr.sha1'):
CloudStorageTest.INTERNAL_FILE_HASH,
}
self.cloud_storage.SetHashFileContentsForTesting(local_hash_files)
def testSetup(self):
self.assertEqual(self.local_file_hashes,
self.cloud_storage.local_file_hashes)
self.assertEqual(set(self.data_files),
set(self.cloud_storage.GetLocalDataFiles()))
self.assertEqual(self.cloud_storage.default_remote_paths,
self.cloud_storage.GetRemotePathsForTesting())
self.cloud_storage.SetRemotePathsForTesting(self.remote_paths)
self.assertEqual(self.remote_paths,
self.cloud_storage.GetRemotePathsForTesting())
def testExistsEmptyCloudStorage(self):
# Test empty remote files dictionary.
self.assertFalse(self.cloud_storage.Exists(self.cloud_storage.PUBLIC_BUCKET,
'preset_public_file.wpr'))
self.assertFalse(self.cloud_storage.Exists(
self.cloud_storage.PARTNER_BUCKET, 'preset_partner_file.wpr'))
self.assertFalse(self.cloud_storage.Exists(
self.cloud_storage.INTERNAL_BUCKET, 'preset_internal_file.wpr'))
def testExistsNonEmptyCloudStorage(self):
# Test non-empty remote files dictionary.
self.cloud_storage.SetRemotePathsForTesting(self.remote_paths)
self.assertTrue(self.cloud_storage.Exists(
self.cloud_storage.PUBLIC_BUCKET, 'preset_public_file.wpr'))
self.assertTrue(self.cloud_storage.Exists(
self.cloud_storage.PARTNER_BUCKET, 'preset_partner_file.wpr'))
self.assertTrue(self.cloud_storage.Exists(
self.cloud_storage.INTERNAL_BUCKET, 'preset_internal_file.wpr'))
self.assertFalse(self.cloud_storage.Exists(
self.cloud_storage.PUBLIC_BUCKET, 'fake_file'))
self.assertFalse(self.cloud_storage.Exists(
self.cloud_storage.PARTNER_BUCKET, 'fake_file'))
self.assertFalse(self.cloud_storage.Exists(
self.cloud_storage.INTERNAL_BUCKET, 'fake_file'))
# Reset state.
self.cloud_storage.SetRemotePathsForTesting()
def testNonEmptyInsertAndExistsPublic(self):
# Test non-empty remote files dictionary.
self.cloud_storage.SetRemotePathsForTesting(self.remote_paths)
self.assertFalse(self.cloud_storage.Exists(self.cloud_storage.PUBLIC_BUCKET,
'success.wpr'))
self.cloud_storage.Insert(
self.cloud_storage.PUBLIC_BUCKET, 'success.wpr',
os.path.join(os.path.sep, 'path', 'to', 'success.wpr'))
self.assertTrue(self.cloud_storage.Exists(
self.cloud_storage.PUBLIC_BUCKET, 'success.wpr'))
# Reset state.
self.cloud_storage.SetRemotePathsForTesting()
def testEmptyInsertAndExistsPublic(self):
# Test empty remote files dictionary.
self.assertFalse(self.cloud_storage.Exists(
self.cloud_storage.PUBLIC_BUCKET, 'success.wpr'))
self.cloud_storage.Insert(
self.cloud_storage.PUBLIC_BUCKET, 'success.wpr',
os.path.join(os.path.sep, 'path', 'to', 'success.wpr'))
self.assertTrue(self.cloud_storage.Exists(
self.cloud_storage.PUBLIC_BUCKET, 'success.wpr'))
def testEmptyInsertAndGet(self):
self.assertRaises(self.cloud_storage.NotFoundError, self.cloud_storage.Get,
self.cloud_storage.PUBLIC_BUCKET, 'success.wpr',
os.path.join(os.path.sep, 'path', 'to', 'success.wpr'))
self.cloud_storage.Insert(self.cloud_storage.PUBLIC_BUCKET, 'success.wpr',
os.path.join(os.path.sep, 'path', 'to',
'success.wpr'))
self.assertTrue(self.cloud_storage.Exists(
self.cloud_storage.PUBLIC_BUCKET, 'success.wpr'))
self.assertEqual(CloudStorageTest.SUCCESS_FILE_HASH, self.cloud_storage.Get(
self.cloud_storage.PUBLIC_BUCKET, 'success.wpr',
os.path.join(os.path.sep, 'path', 'to', 'success.wpr')))
def testNonEmptyInsertAndGet(self):
self.cloud_storage.SetRemotePathsForTesting(self.remote_paths)
self.assertRaises(self.cloud_storage.NotFoundError, self.cloud_storage.Get,
self.cloud_storage.PUBLIC_BUCKET, 'success.wpr',
os.path.join(os.path.sep, 'path', 'to', 'success.wpr'))
self.cloud_storage.Insert(self.cloud_storage.PUBLIC_BUCKET, 'success.wpr',
os.path.join(os.path.sep, 'path', 'to',
'success.wpr'))
self.assertTrue(self.cloud_storage.Exists(self.cloud_storage.PUBLIC_BUCKET,
'success.wpr'))
self.assertEqual(
CloudStorageTest.SUCCESS_FILE_HASH, self.cloud_storage.Get(
self.cloud_storage.PUBLIC_BUCKET, 'success.wpr',
os.path.join(os.path.sep, 'path', 'to', 'success.wpr')))
# Reset state.
self.cloud_storage.SetRemotePathsForTesting()
def testGetIfChanged(self):
self.cloud_storage.SetRemotePathsForTesting(self.remote_paths)
self.assertRaises(
self.cloud_storage.NotFoundError, self.cloud_storage.Get,
self.cloud_storage.PUBLIC_BUCKET, 'success.wpr',
os.path.join(os.path.sep, 'path', 'to', 'success.wpr'))
self.assertFalse(self.cloud_storage.GetIfChanged(
os.path.join(os.path.sep, 'path', 'to', 'preset_public_file.wpr'),
self.cloud_storage.PUBLIC_BUCKET))
self.cloud_storage.ChangeRemoteHashForTesting(
self.cloud_storage.PUBLIC_BUCKET, 'preset_public_file.wpr',
CloudStorageTest.UPDATED_HASH)
self.assertTrue(self.cloud_storage.GetIfChanged(
os.path.join(os.path.sep, 'path', 'to', 'preset_public_file.wpr'),
self.cloud_storage.PUBLIC_BUCKET))
self.assertFalse(self.cloud_storage.GetIfChanged(
os.path.join(os.path.sep, 'path', 'to', 'preset_public_file.wpr'),
self.cloud_storage.PUBLIC_BUCKET))
# Reset state.
self.cloud_storage.SetRemotePathsForTesting()
def testList(self):
self.assertEqual([],
self.cloud_storage.List(self.cloud_storage.PUBLIC_BUCKET))
self.cloud_storage.SetRemotePathsForTesting(self.remote_paths)
self.assertEqual(['preset_public_file.wpr'],
self.cloud_storage.List(self.cloud_storage.PUBLIC_BUCKET))
# Reset state.
self.cloud_storage.SetRemotePathsForTesting()
def testPermissionError(self):
self.cloud_storage.SetRemotePathsForTesting(self.remote_paths)
self.cloud_storage.SetPermissionLevelForTesting(
self.cloud_storage.PUBLIC_PERMISSION)
self.assertRaises(
self.cloud_storage.PermissionError, self.cloud_storage.Get,
self.cloud_storage.INTERNAL_BUCKET, 'preset_internal_file.wpr',
os.path.join(os.path.sep, 'path', 'to', 'preset_internal_file.wpr'))
self.assertRaises(
self.cloud_storage.PermissionError, self.cloud_storage.GetIfChanged,
os.path.join(os.path.sep, 'path', 'to', 'preset_internal_file.wpr'),
self.cloud_storage.INTERNAL_BUCKET)
self.assertRaises(
self.cloud_storage.PermissionError, self.cloud_storage.List,
self.cloud_storage.INTERNAL_BUCKET)
self.assertRaises(
self.cloud_storage.PermissionError, self.cloud_storage.Exists,
self.cloud_storage.INTERNAL_BUCKET, 'preset_internal_file.wpr')
self.assertRaises(
self.cloud_storage.PermissionError, self.cloud_storage.Insert,
self.cloud_storage.INTERNAL_BUCKET, 'success.wpr',
os.path.join(os.path.sep, 'path', 'to', 'success.wpr'))
# Reset state.
self.cloud_storage.SetRemotePathsForTesting()
def testCredentialsError(self):
self.cloud_storage.SetRemotePathsForTesting(self.remote_paths)
self.cloud_storage.SetPermissionLevelForTesting(
self.cloud_storage.CREDENTIALS_ERROR_PERMISSION)
self.assertRaises(
self.cloud_storage.CredentialsError, self.cloud_storage.Get,
self.cloud_storage.INTERNAL_BUCKET, 'preset_internal_file.wpr',
os.path.join(os.path.sep, 'path', 'to', 'preset_internal_file.wpr'))
self.assertRaises(
self.cloud_storage.CredentialsError, self.cloud_storage.GetIfChanged,
self.cloud_storage.INTERNAL_BUCKET,
os.path.join(os.path.sep, 'path', 'to', 'preset_internal_file.wpr'))
self.assertRaises(
self.cloud_storage.CredentialsError, self.cloud_storage.List,
self.cloud_storage.INTERNAL_BUCKET)
self.assertRaises(
self.cloud_storage.CredentialsError, self.cloud_storage.Exists,
self.cloud_storage.INTERNAL_BUCKET, 'preset_internal_file.wpr')
self.assertRaises(
self.cloud_storage.CredentialsError, self.cloud_storage.Insert,
self.cloud_storage.INTERNAL_BUCKET, 'success.wpr',
os.path.join(os.path.sep, 'path', 'to', 'success.wpr'))
# Reset state.
self.cloud_storage.SetRemotePathsForTesting()
def testOpenRestoresCorrectly(self):
file_path = os.path.realpath(__file__)
stubs = system_stub.Override(system_stub_test_module, ['open'])
stubs.open.files = {file_path:'contents'}
f = system_stub_test_module.SystemStubTest.TestOpen(file_path)
self.assertEqual(type(f), system_stub.OpenFunctionStub.FileStub)
stubs.open.files = {}
stubs.Restore()
# This will throw an error if the open stub wasn't restored correctly.
f = system_stub_test_module.SystemStubTest.TestOpen(file_path)
self.assertEqual(type(f), file)
| bsd-3-clause |
ChristineLaMuse/mozillians | mozillians/phonebook/tests/test_views/test_views_register.py | 8 | 3401 | from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.test import Client
from django.test.utils import override_settings
from funfactory.helpers import urlparams
from mock import patch
from nose.tools import eq_, ok_
from mozillians.common.tests import TestCase
from mozillians.phonebook.tests import InviteFactory
from mozillians.phonebook.utils import redeem_invite
from mozillians.users.tests import UserFactory
class RegisterTests(TestCase):
def test_register_anonymous(self):
client = Client()
url = urlparams(reverse('phonebook:register'), code='foo')
response = client.get(url, follow=True)
eq_(client.session['invite-code'], 'foo')
self.assertTemplateUsed(response, 'phonebook/home.html')
@override_settings(CAN_VOUCH_THRESHOLD=1)
@patch('mozillians.phonebook.views.redeem_invite', wraps=redeem_invite)
def test_register_unvouched(self, redeem_invite_mock):
user = UserFactory.create(vouched=False)
inviter = UserFactory.create()
invite = InviteFactory.create(inviter=inviter.userprofile)
url = urlparams(reverse('phonebook:register'), code=invite.code)
with self.login(user) as client:
response = client.get(url, follow=True)
user = User.objects.get(id=user.id)
ok_(user.userprofile.is_vouched)
ok_(redeem_invite_mock.called)
self.assertTemplateUsed(response, 'phonebook/home.html')
@override_settings(CAN_VOUCH_THRESHOLD=1)
@patch('mozillians.phonebook.views.redeem_invite', wraps=redeem_invite)
def test_register_vouched(self, redeem_invite_mock):
voucher_1 = UserFactory.create()
voucher_2 = UserFactory.create()
user = UserFactory.create(vouched=False)
user.userprofile.vouch(voucher_1.userprofile)
invite = InviteFactory.create(inviter=voucher_2.userprofile)
url = urlparams(reverse('phonebook:register'), code=invite.code)
with self.login(user) as client:
response = client.get(url, follow=True)
user = User.objects.get(id=user.id)
ok_(user.userprofile.is_vouched)
ok_(user.userprofile.vouched_by, voucher_1.userprofile)
ok_(not redeem_invite_mock.called)
self.assertTemplateUsed(response, 'phonebook/home.html')
def test_register_without_code_anonymous(self):
client = Client()
response = client.get(reverse('phonebook:register'), follow=True)
ok_(not self.client.session.get('invite-code'))
self.assertTemplateUsed(response, 'phonebook/home.html')
eq_(response.status_code, 200)
def test_register_without_code_unvouched(self):
user = UserFactory.create(vouched=False)
with self.login(user) as client:
response = client.get(reverse('phonebook:register'), follow=True)
ok_(not self.client.session.get('invite-code'))
self.assertTemplateUsed(response, 'phonebook/home.html')
eq_(response.status_code, 200)
def test_register_without_code_vouched(self):
user = UserFactory.create()
with self.login(user) as client:
response = client.get(reverse('phonebook:register'), follow=True)
ok_(not self.client.session.get('invite-code'))
self.assertTemplateUsed(response, 'phonebook/home.html')
eq_(response.status_code, 200)
| bsd-3-clause |
sdeslauriers/bayesian | bayesian/_domain.py | 1 | 5352 | from itertools import combinations
from copy import copy
import numpy as np
import bayesian
class DomainGraph(object):
def __init__(self, network=None):
"""Domain graph of a bayesian network
The DomainGraph class represents the domain graph of a Bayesian
network. It is an undirected graph where nodes represent variables.
There is a link between two nodes if the are part ot the domain of the
same table.
Args:
network (optional, bayesian.Network) : The Bayesian network used
to compute the domain graph.
"""
self._nodes = set()
# If the network is not provided, the domain graph is empty.
if network is None:
network = bayesian.Network()
self._network = network
# Initialize the domain graph using the provided network.
self._build_from_network(network)
def __copy__(self):
"""Shallow copy of a domain graph"""
# The deep copy does not duplicated the Bayesian network.
shallow_copy = DomainGraph(self._network)
shallow_copy._nodes = copy(self._nodes)
return shallow_copy
@property
def network(self):
"""Get the Bayesian network of the domain graph"""
return self._network
@property
def simplicial_node(self):
"""Get a simplicial node"""
for node in self._nodes:
if node.is_simplicial:
return node
return None
def add_node(self, node):
"""Adds a node to the undirected graph"""
self._nodes.add(node)
def get_node(self, data):
"""Get the node with the specified data"""
nodes = list(self._nodes)
data_list = [node.data for node in nodes]
return nodes[data_list.index(data)]
def get_almost_simplicial(self):
"""Get the node that is closest to being simplicial"""
selected = None
for node in self._nodes:
if selected is None or node.missing_links() < selected.missing_links():
selected = node
return node
def remove_node(self, node_to_remove):
"""Removes a node from the graph"""
self._nodes.discard(node_to_remove)
for node in self._nodes:
node.remove_link(node_to_remove)
def _build_from_network(self, network):
"""Builds the domain graph of a Bayesian network
Args:
network (bayesian.Network) : The network used to build the domain
graph.
"""
# Create a node for every variable in the network.
domain = network.domain
for variable in domain:
self.add_node(Node(variable))
# Add the links between variables that are in the domain of the
# same table.
tables = network.get_tables()
for table in tables:
for v1, v2 in combinations(table.domain, 2):
node = self.get_node(v1)
node.add_link(self.get_node(v2))
class Node(object):
def __init__(self, data):
"""Node in a domain graph
The Node class represents a node in the domain graph of a Bayesian
network.
Args:
data (object) : The data associated with the node.
"""
self._data = data
self._links = set()
@property
def data(self):
"""Get the data of the node"""
return self._data
@property
def family(self):
"""Get the family of a node"""
nodes = [n for n in self.links]
nodes.append(self)
return set(nodes)
@property
def is_simplicial(self):
"""Indicates if a node is simplicial
A node is simplicial if all the nodes it is linked to are pairwise
linked.
Returns:
(bool) : True if the node is simplicial, False otherwise.
"""
# Verify if all the nodes are pairwise linked.
for node, other_node in combinations(self._links, 2):
if node not in other_node.links:
return False
return True
@property
def links(self):
"""Get the links of the node"""
return self._links
def add_link(self, node):
"""Add a link to a node"""
# Because the links are undirected, it is added to both nodes.
self._links.add(node)
node._links.add(self)
def make_simplicial(self):
"""Makes a node simplicial
Makes the node simplicial by adding links between its neighbors.
"""
# Add the missing links between neightbors.
for node, other_node in combinations(self._links, 2):
if node not in other_node.links:
node.add_link(other_node)
def missing_links(self):
"""Returns the number of missing links to make the node simplicial"""
# Count the number of missing links.
missing = 0
for node, other_node in combinations(self._links, 2):
if node not in other_node.links:
missing = missing + 1
return missing
def remove_link(self, node):
"""Removes a link between nodes"""
self._links.discard(node)
node._links.discard(self)
| mit |
brandonium21/snowflake | snowflakeEnv/lib/python2.7/site-packages/flask/testsuite/helpers.py | 405 | 21973 | # -*- coding: utf-8 -*-
"""
flask.testsuite.helpers
~~~~~~~~~~~~~~~~~~~~~~~
Various helpers.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import os
import flask
import unittest
from logging import StreamHandler
from flask.testsuite import FlaskTestCase, catch_warnings, catch_stderr
from werkzeug.http import parse_cache_control_header, parse_options_header
from flask._compat import StringIO, text_type
def has_encoding(name):
try:
import codecs
codecs.lookup(name)
return True
except LookupError:
return False
class JSONTestCase(FlaskTestCase):
def test_json_bad_requests(self):
app = flask.Flask(__name__)
@app.route('/json', methods=['POST'])
def return_json():
return flask.jsonify(foo=text_type(flask.request.get_json()))
c = app.test_client()
rv = c.post('/json', data='malformed', content_type='application/json')
self.assert_equal(rv.status_code, 400)
def test_json_body_encoding(self):
app = flask.Flask(__name__)
app.testing = True
@app.route('/')
def index():
return flask.request.get_json()
c = app.test_client()
resp = c.get('/', data=u'"Hällo Wörld"'.encode('iso-8859-15'),
content_type='application/json; charset=iso-8859-15')
self.assert_equal(resp.data, u'Hällo Wörld'.encode('utf-8'))
def test_jsonify(self):
d = dict(a=23, b=42, c=[1, 2, 3])
app = flask.Flask(__name__)
@app.route('/kw')
def return_kwargs():
return flask.jsonify(**d)
@app.route('/dict')
def return_dict():
return flask.jsonify(d)
c = app.test_client()
for url in '/kw', '/dict':
rv = c.get(url)
self.assert_equal(rv.mimetype, 'application/json')
self.assert_equal(flask.json.loads(rv.data), d)
def test_json_as_unicode(self):
app = flask.Flask(__name__)
app.config['JSON_AS_ASCII'] = True
with app.app_context():
rv = flask.json.dumps(u'\N{SNOWMAN}')
self.assert_equal(rv, '"\\u2603"')
app.config['JSON_AS_ASCII'] = False
with app.app_context():
rv = flask.json.dumps(u'\N{SNOWMAN}')
self.assert_equal(rv, u'"\u2603"')
def test_json_attr(self):
app = flask.Flask(__name__)
@app.route('/add', methods=['POST'])
def add():
json = flask.request.get_json()
return text_type(json['a'] + json['b'])
c = app.test_client()
rv = c.post('/add', data=flask.json.dumps({'a': 1, 'b': 2}),
content_type='application/json')
self.assert_equal(rv.data, b'3')
def test_template_escaping(self):
app = flask.Flask(__name__)
render = flask.render_template_string
with app.test_request_context():
rv = flask.json.htmlsafe_dumps('</script>')
self.assert_equal(rv, u'"\\u003c/script\\u003e"')
self.assert_equal(type(rv), text_type)
rv = render('{{ "</script>"|tojson }}')
self.assert_equal(rv, '"\\u003c/script\\u003e"')
rv = render('{{ "<\0/script>"|tojson }}')
self.assert_equal(rv, '"\\u003c\\u0000/script\\u003e"')
rv = render('{{ "<!--<script>"|tojson }}')
self.assert_equal(rv, '"\\u003c!--\\u003cscript\\u003e"')
rv = render('{{ "&"|tojson }}')
self.assert_equal(rv, '"\\u0026"')
rv = render('{{ "\'"|tojson }}')
self.assert_equal(rv, '"\\u0027"')
rv = render("<a ng-data='{{ data|tojson }}'></a>",
data={'x': ["foo", "bar", "baz'"]})
self.assert_equal(rv,
'<a ng-data=\'{"x": ["foo", "bar", "baz\\u0027"]}\'></a>')
def test_json_customization(self):
class X(object):
def __init__(self, val):
self.val = val
class MyEncoder(flask.json.JSONEncoder):
def default(self, o):
if isinstance(o, X):
return '<%d>' % o.val
return flask.json.JSONEncoder.default(self, o)
class MyDecoder(flask.json.JSONDecoder):
def __init__(self, *args, **kwargs):
kwargs.setdefault('object_hook', self.object_hook)
flask.json.JSONDecoder.__init__(self, *args, **kwargs)
def object_hook(self, obj):
if len(obj) == 1 and '_foo' in obj:
return X(obj['_foo'])
return obj
app = flask.Flask(__name__)
app.testing = True
app.json_encoder = MyEncoder
app.json_decoder = MyDecoder
@app.route('/', methods=['POST'])
def index():
return flask.json.dumps(flask.request.get_json()['x'])
c = app.test_client()
rv = c.post('/', data=flask.json.dumps({
'x': {'_foo': 42}
}), content_type='application/json')
self.assertEqual(rv.data, b'"<42>"')
def test_modified_url_encoding(self):
class ModifiedRequest(flask.Request):
url_charset = 'euc-kr'
app = flask.Flask(__name__)
app.testing = True
app.request_class = ModifiedRequest
app.url_map.charset = 'euc-kr'
@app.route('/')
def index():
return flask.request.args['foo']
rv = app.test_client().get(u'/?foo=정상처리'.encode('euc-kr'))
self.assert_equal(rv.status_code, 200)
self.assert_equal(rv.data, u'정상처리'.encode('utf-8'))
if not has_encoding('euc-kr'):
test_modified_url_encoding = None
def test_json_key_sorting(self):
app = flask.Flask(__name__)
app.testing = True
self.assert_equal(app.config['JSON_SORT_KEYS'], True)
d = dict.fromkeys(range(20), 'foo')
@app.route('/')
def index():
return flask.jsonify(values=d)
c = app.test_client()
rv = c.get('/')
lines = [x.strip() for x in rv.data.strip().decode('utf-8').splitlines()]
self.assert_equal(lines, [
'{',
'"values": {',
'"0": "foo",',
'"1": "foo",',
'"2": "foo",',
'"3": "foo",',
'"4": "foo",',
'"5": "foo",',
'"6": "foo",',
'"7": "foo",',
'"8": "foo",',
'"9": "foo",',
'"10": "foo",',
'"11": "foo",',
'"12": "foo",',
'"13": "foo",',
'"14": "foo",',
'"15": "foo",',
'"16": "foo",',
'"17": "foo",',
'"18": "foo",',
'"19": "foo"',
'}',
'}'
])
class SendfileTestCase(FlaskTestCase):
def test_send_file_regular(self):
app = flask.Flask(__name__)
with app.test_request_context():
rv = flask.send_file('static/index.html')
self.assert_true(rv.direct_passthrough)
self.assert_equal(rv.mimetype, 'text/html')
with app.open_resource('static/index.html') as f:
rv.direct_passthrough = False
self.assert_equal(rv.data, f.read())
rv.close()
def test_send_file_xsendfile(self):
app = flask.Flask(__name__)
app.use_x_sendfile = True
with app.test_request_context():
rv = flask.send_file('static/index.html')
self.assert_true(rv.direct_passthrough)
self.assert_in('x-sendfile', rv.headers)
self.assert_equal(rv.headers['x-sendfile'],
os.path.join(app.root_path, 'static/index.html'))
self.assert_equal(rv.mimetype, 'text/html')
rv.close()
def test_send_file_object(self):
app = flask.Flask(__name__)
with catch_warnings() as captured:
with app.test_request_context():
f = open(os.path.join(app.root_path, 'static/index.html'))
rv = flask.send_file(f)
rv.direct_passthrough = False
with app.open_resource('static/index.html') as f:
self.assert_equal(rv.data, f.read())
self.assert_equal(rv.mimetype, 'text/html')
rv.close()
# mimetypes + etag
self.assert_equal(len(captured), 2)
app.use_x_sendfile = True
with catch_warnings() as captured:
with app.test_request_context():
f = open(os.path.join(app.root_path, 'static/index.html'))
rv = flask.send_file(f)
self.assert_equal(rv.mimetype, 'text/html')
self.assert_in('x-sendfile', rv.headers)
self.assert_equal(rv.headers['x-sendfile'],
os.path.join(app.root_path, 'static/index.html'))
rv.close()
# mimetypes + etag
self.assert_equal(len(captured), 2)
app.use_x_sendfile = False
with app.test_request_context():
with catch_warnings() as captured:
f = StringIO('Test')
rv = flask.send_file(f)
rv.direct_passthrough = False
self.assert_equal(rv.data, b'Test')
self.assert_equal(rv.mimetype, 'application/octet-stream')
rv.close()
# etags
self.assert_equal(len(captured), 1)
with catch_warnings() as captured:
f = StringIO('Test')
rv = flask.send_file(f, mimetype='text/plain')
rv.direct_passthrough = False
self.assert_equal(rv.data, b'Test')
self.assert_equal(rv.mimetype, 'text/plain')
rv.close()
# etags
self.assert_equal(len(captured), 1)
app.use_x_sendfile = True
with catch_warnings() as captured:
with app.test_request_context():
f = StringIO('Test')
rv = flask.send_file(f)
self.assert_not_in('x-sendfile', rv.headers)
rv.close()
# etags
self.assert_equal(len(captured), 1)
def test_attachment(self):
app = flask.Flask(__name__)
with catch_warnings() as captured:
with app.test_request_context():
f = open(os.path.join(app.root_path, 'static/index.html'))
rv = flask.send_file(f, as_attachment=True)
value, options = parse_options_header(rv.headers['Content-Disposition'])
self.assert_equal(value, 'attachment')
rv.close()
# mimetypes + etag
self.assert_equal(len(captured), 2)
with app.test_request_context():
self.assert_equal(options['filename'], 'index.html')
rv = flask.send_file('static/index.html', as_attachment=True)
value, options = parse_options_header(rv.headers['Content-Disposition'])
self.assert_equal(value, 'attachment')
self.assert_equal(options['filename'], 'index.html')
rv.close()
with app.test_request_context():
rv = flask.send_file(StringIO('Test'), as_attachment=True,
attachment_filename='index.txt',
add_etags=False)
self.assert_equal(rv.mimetype, 'text/plain')
value, options = parse_options_header(rv.headers['Content-Disposition'])
self.assert_equal(value, 'attachment')
self.assert_equal(options['filename'], 'index.txt')
rv.close()
def test_static_file(self):
app = flask.Flask(__name__)
# default cache timeout is 12 hours
with app.test_request_context():
# Test with static file handler.
rv = app.send_static_file('index.html')
cc = parse_cache_control_header(rv.headers['Cache-Control'])
self.assert_equal(cc.max_age, 12 * 60 * 60)
rv.close()
# Test again with direct use of send_file utility.
rv = flask.send_file('static/index.html')
cc = parse_cache_control_header(rv.headers['Cache-Control'])
self.assert_equal(cc.max_age, 12 * 60 * 60)
rv.close()
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 3600
with app.test_request_context():
# Test with static file handler.
rv = app.send_static_file('index.html')
cc = parse_cache_control_header(rv.headers['Cache-Control'])
self.assert_equal(cc.max_age, 3600)
rv.close()
# Test again with direct use of send_file utility.
rv = flask.send_file('static/index.html')
cc = parse_cache_control_header(rv.headers['Cache-Control'])
self.assert_equal(cc.max_age, 3600)
rv.close()
class StaticFileApp(flask.Flask):
def get_send_file_max_age(self, filename):
return 10
app = StaticFileApp(__name__)
with app.test_request_context():
# Test with static file handler.
rv = app.send_static_file('index.html')
cc = parse_cache_control_header(rv.headers['Cache-Control'])
self.assert_equal(cc.max_age, 10)
rv.close()
# Test again with direct use of send_file utility.
rv = flask.send_file('static/index.html')
cc = parse_cache_control_header(rv.headers['Cache-Control'])
self.assert_equal(cc.max_age, 10)
rv.close()
class LoggingTestCase(FlaskTestCase):
def test_logger_cache(self):
app = flask.Flask(__name__)
logger1 = app.logger
self.assert_true(app.logger is logger1)
self.assert_equal(logger1.name, __name__)
app.logger_name = __name__ + '/test_logger_cache'
self.assert_true(app.logger is not logger1)
def test_debug_log(self):
app = flask.Flask(__name__)
app.debug = True
@app.route('/')
def index():
app.logger.warning('the standard library is dead')
app.logger.debug('this is a debug statement')
return ''
@app.route('/exc')
def exc():
1 // 0
with app.test_client() as c:
with catch_stderr() as err:
c.get('/')
out = err.getvalue()
self.assert_in('WARNING in helpers [', out)
self.assert_in(os.path.basename(__file__.rsplit('.', 1)[0] + '.py'), out)
self.assert_in('the standard library is dead', out)
self.assert_in('this is a debug statement', out)
with catch_stderr() as err:
try:
c.get('/exc')
except ZeroDivisionError:
pass
else:
self.assert_true(False, 'debug log ate the exception')
def test_debug_log_override(self):
app = flask.Flask(__name__)
app.debug = True
app.logger_name = 'flask_tests/test_debug_log_override'
app.logger.level = 10
self.assert_equal(app.logger.level, 10)
def test_exception_logging(self):
out = StringIO()
app = flask.Flask(__name__)
app.logger_name = 'flask_tests/test_exception_logging'
app.logger.addHandler(StreamHandler(out))
@app.route('/')
def index():
1 // 0
rv = app.test_client().get('/')
self.assert_equal(rv.status_code, 500)
self.assert_in(b'Internal Server Error', rv.data)
err = out.getvalue()
self.assert_in('Exception on / [GET]', err)
self.assert_in('Traceback (most recent call last):', err)
self.assert_in('1 // 0', err)
self.assert_in('ZeroDivisionError:', err)
def test_processor_exceptions(self):
app = flask.Flask(__name__)
@app.before_request
def before_request():
if trigger == 'before':
1 // 0
@app.after_request
def after_request(response):
if trigger == 'after':
1 // 0
return response
@app.route('/')
def index():
return 'Foo'
@app.errorhandler(500)
def internal_server_error(e):
return 'Hello Server Error', 500
for trigger in 'before', 'after':
rv = app.test_client().get('/')
self.assert_equal(rv.status_code, 500)
self.assert_equal(rv.data, b'Hello Server Error')
def test_url_for_with_anchor(self):
app = flask.Flask(__name__)
@app.route('/')
def index():
return '42'
with app.test_request_context():
self.assert_equal(flask.url_for('index', _anchor='x y'),
'/#x%20y')
def test_url_for_with_scheme(self):
app = flask.Flask(__name__)
@app.route('/')
def index():
return '42'
with app.test_request_context():
self.assert_equal(flask.url_for('index',
_external=True,
_scheme='https'),
'https://localhost/')
def test_url_for_with_scheme_not_external(self):
app = flask.Flask(__name__)
@app.route('/')
def index():
return '42'
with app.test_request_context():
self.assert_raises(ValueError,
flask.url_for,
'index',
_scheme='https')
def test_url_with_method(self):
from flask.views import MethodView
app = flask.Flask(__name__)
class MyView(MethodView):
def get(self, id=None):
if id is None:
return 'List'
return 'Get %d' % id
def post(self):
return 'Create'
myview = MyView.as_view('myview')
app.add_url_rule('/myview/', methods=['GET'],
view_func=myview)
app.add_url_rule('/myview/<int:id>', methods=['GET'],
view_func=myview)
app.add_url_rule('/myview/create', methods=['POST'],
view_func=myview)
with app.test_request_context():
self.assert_equal(flask.url_for('myview', _method='GET'),
'/myview/')
self.assert_equal(flask.url_for('myview', id=42, _method='GET'),
'/myview/42')
self.assert_equal(flask.url_for('myview', _method='POST'),
'/myview/create')
class NoImportsTestCase(FlaskTestCase):
"""Test Flasks are created without import.
Avoiding ``__import__`` helps create Flask instances where there are errors
at import time. Those runtime errors will be apparent to the user soon
enough, but tools which build Flask instances meta-programmatically benefit
from a Flask which does not ``__import__``. Instead of importing to
retrieve file paths or metadata on a module or package, use the pkgutil and
imp modules in the Python standard library.
"""
def test_name_with_import_error(self):
try:
flask.Flask('importerror')
except NotImplementedError:
self.fail('Flask(import_name) is importing import_name.')
class StreamingTestCase(FlaskTestCase):
def test_streaming_with_context(self):
app = flask.Flask(__name__)
app.testing = True
@app.route('/')
def index():
def generate():
yield 'Hello '
yield flask.request.args['name']
yield '!'
return flask.Response(flask.stream_with_context(generate()))
c = app.test_client()
rv = c.get('/?name=World')
self.assertEqual(rv.data, b'Hello World!')
def test_streaming_with_context_as_decorator(self):
app = flask.Flask(__name__)
app.testing = True
@app.route('/')
def index():
@flask.stream_with_context
def generate():
yield 'Hello '
yield flask.request.args['name']
yield '!'
return flask.Response(generate())
c = app.test_client()
rv = c.get('/?name=World')
self.assertEqual(rv.data, b'Hello World!')
def test_streaming_with_context_and_custom_close(self):
app = flask.Flask(__name__)
app.testing = True
called = []
class Wrapper(object):
def __init__(self, gen):
self._gen = gen
def __iter__(self):
return self
def close(self):
called.append(42)
def __next__(self):
return next(self._gen)
next = __next__
@app.route('/')
def index():
def generate():
yield 'Hello '
yield flask.request.args['name']
yield '!'
return flask.Response(flask.stream_with_context(
Wrapper(generate())))
c = app.test_client()
rv = c.get('/?name=World')
self.assertEqual(rv.data, b'Hello World!')
self.assertEqual(called, [42])
def suite():
suite = unittest.TestSuite()
if flask.json_available:
suite.addTest(unittest.makeSuite(JSONTestCase))
suite.addTest(unittest.makeSuite(SendfileTestCase))
suite.addTest(unittest.makeSuite(LoggingTestCase))
suite.addTest(unittest.makeSuite(NoImportsTestCase))
suite.addTest(unittest.makeSuite(StreamingTestCase))
return suite
| bsd-2-clause |
cesardeazevedo/ArnoldC-Speaker | bower_components/ace/static.py | 71 | 8436 | #!/usr/bin/env python
"""static - A stupidly simple WSGI way to serve static (or mixed) content.
(See the docstrings of the various functions and classes.)
Copyright (C) 2006-2009 Luke Arno - http://lukearno.com/
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to:
The Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor,
Boston, MA 02110-1301, USA.
Luke Arno can be found at http://lukearno.com/
"""
import mimetypes
import rfc822
import time
import string
import sys
from os import path, stat, getcwd
from wsgiref import util
from wsgiref.headers import Headers
from wsgiref.simple_server import make_server
from optparse import OptionParser
try: from pkg_resources import resource_filename, Requirement
except: pass
try: import kid
except: pass
class MagicError(Exception): pass
class StatusApp:
"""Used by WSGI apps to return some HTTP status."""
def __init__(self, status, message=None):
self.status = status
if message is None:
self.message = status
else:
self.message = message
def __call__(self, environ, start_response, headers=[]):
if self.message:
Headers(headers).add_header('Content-type', 'text/plain')
start_response(self.status, headers)
if environ['REQUEST_METHOD'] == 'HEAD':
return [""]
else:
return [self.message]
class Cling(object):
"""A stupidly simple way to serve static content via WSGI.
Serve the file of the same path as PATH_INFO in self.datadir.
Look up the Content-type in self.content_types by extension
or use 'text/plain' if the extension is not found.
Serve up the contents of the file or delegate to self.not_found.
"""
block_size = 16 * 4096
index_file = 'index.html'
not_found = StatusApp('404 Not Found')
not_modified = StatusApp('304 Not Modified', "")
moved_permanently = StatusApp('301 Moved Permanently')
method_not_allowed = StatusApp('405 Method Not Allowed')
def __init__(self, root, **kw):
"""Just set the root and any other attribs passes via **kw."""
self.root = root
for k, v in kw.iteritems():
setattr(self, k, v)
def __call__(self, environ, start_response):
"""Respond to a request when called in the usual WSGI way."""
if environ['REQUEST_METHOD'] not in ('GET', 'HEAD'):
headers = [('Allow', 'GET, HEAD')]
return self.method_not_allowed(environ, start_response, headers)
path_info = environ.get('PATH_INFO', '')
full_path = self._full_path(path_info)
if not self._is_under_root(full_path):
return self.not_found(environ, start_response)
if path.isdir(full_path):
if full_path[-1] <> '/' or full_path == self.root:
location = util.request_uri(environ, include_query=False) + '/'
if environ.get('QUERY_STRING'):
location += '?' + environ.get('QUERY_STRING')
headers = [('Location', location)]
return self.moved_permanently(environ, start_response, headers)
else:
full_path = self._full_path(path_info + self.index_file)
content_type = self._guess_type(full_path)
try:
etag, last_modified = self._conditions(full_path, environ)
headers = [('Date', rfc822.formatdate(time.time())),
('Last-Modified', last_modified),
('ETag', etag)]
if_modified = environ.get('HTTP_IF_MODIFIED_SINCE')
if if_modified and (rfc822.parsedate(if_modified)
>= rfc822.parsedate(last_modified)):
return self.not_modified(environ, start_response, headers)
if_none = environ.get('HTTP_IF_NONE_MATCH')
if if_none and (if_none == '*' or etag in if_none):
return self.not_modified(environ, start_response, headers)
file_like = self._file_like(full_path)
headers.append(('Content-Type', content_type))
start_response("200 OK", headers)
if environ['REQUEST_METHOD'] == 'GET':
return self._body(full_path, environ, file_like)
else:
return ['']
except (IOError, OSError), e:
print e
return self.not_found(environ, start_response)
def _full_path(self, path_info):
"""Return the full path from which to read."""
return self.root + path_info
def _is_under_root(self, full_path):
"""Guard against arbitrary file retrieval."""
if (path.abspath(full_path) + path.sep)\
.startswith(path.abspath(self.root) + path.sep):
return True
else:
return False
def _guess_type(self, full_path):
"""Guess the mime type using the mimetypes module."""
return mimetypes.guess_type(full_path)[0] or 'text/plain'
def _conditions(self, full_path, environ):
"""Return a tuple of etag, last_modified by mtime from stat."""
mtime = stat(full_path).st_mtime
return str(mtime), rfc822.formatdate(mtime)
def _file_like(self, full_path):
"""Return the appropriate file object."""
return open(full_path, 'rb')
def _body(self, full_path, environ, file_like):
"""Return an iterator over the body of the response."""
way_to_send = environ.get('wsgi.file_wrapper', iter_and_close)
return way_to_send(file_like, self.block_size)
def iter_and_close(file_like, block_size):
"""Yield file contents by block then close the file."""
while 1:
try:
block = file_like.read(block_size)
if block: yield block
else: raise StopIteration
except StopIteration, si:
file_like.close()
return
def cling_wrap(package_name, dir_name, **kw):
"""Return a Cling that serves from the given package and dir_name.
This uses pkg_resources.resource_filename which is not the
recommended way, since it extracts the files.
I think this works fine unless you have some _very_ serious
requirements for static content, in which case you probably
shouldn't be serving it through a WSGI app, IMHO. YMMV.
"""
resource = Requirement.parse(package_name)
return Cling(resource_filename(resource, dir_name), **kw)
def command():
parser = OptionParser(usage="%prog DIR [HOST][:][PORT]",
version="static 0.3.6")
options, args = parser.parse_args()
if len(args) in (1, 2):
if len(args) == 2:
parts = args[1].split(":")
if len(parts) == 1:
host = parts[0]
port = None
elif len(parts) == 2:
host, port = parts
else:
sys.exit("Invalid host:port specification.")
elif len(args) == 1:
host, port = None, None
if not host:
host = '0.0.0.0'
if not port:
port = 8888
try:
port = int(port)
except:
sys.exit("Invalid host:port specification.")
app = Cling(args[0])
try:
make_server(host, port, app).serve_forever()
except KeyboardInterrupt, ki:
print "Cio, baby!"
except:
sys.exit("Problem initializing server.")
else:
parser.print_help(sys.stderr)
sys.exit(1)
def test():
from wsgiref.validate import validator
app = Cling(getcwd())
try:
print "Serving " + getcwd() + " to http://localhost:8888"
make_server('0.0.0.0', 8888, validator(app)).serve_forever()
except KeyboardInterrupt, ki:
print ""
print "Ciao, baby!"
if __name__ == '__main__':
test()
| mit |
markuskiller/textblob-de | textblob_de/blob.py | 1 | 30630 | # -*- coding: utf-8 -*-
# Code adapted from the main `TextBlob`_ library.
#
# :repo: `https://github.com/sloria/TextBlob`_
# :source: textblob/blob.py
# :version: 2013-10-21 (a88e86a76a)
#
# :modified: 2014-09-17 <m.killer@langui.ch>
#
"""Wrappers for various units of text.
This includes the main :class:`TextBlobDE <textblob_de.blob.TextBlobDE>`,
:class:`Word <textblob_de.blob.Word>`, and :class:`WordList <textblob_de.blob.WordList>` classes.
Whenever possible, classes are inherited from the main `TextBlob`_ library, but in many
cases, the models for German have to be initialised here in :py:mod:`textblob_de.blob`, resulting
in a lot of duplicate code. The main reason are the :class:`Word <textblob_de.blob.Word>` objects.
If they are generated from an inherited class, they will use the English models
(e.g. for ``pluralize``/``singularize``) used in the main library.
Example usage: ::
>>> from textblob_de import TextBlobDE
>>> b = TextBlobDE("Einfach ist besser als kompliziert.")
>>> b.tags
[('Einfach', 'RB'), ('ist', 'VB'), ('besser', 'RB'), ('als', 'IN'), ('kompliziert', 'JJ')]
>>> b.noun_phrases
WordList([])
>>> b.words
WordList(['Einfach', 'ist', 'besser', 'als', 'kompliziert'])
.. _TextBlob: http://textblob.readthedocs.org/
"""
from __future__ import absolute_import
import json
import sys
from collections import defaultdict, namedtuple
from textblob.blob import _initialize_models
from textblob.decorators import cached_property, requires_nltk_corpus
from textblob.translate import Translator
from textblob.utils import lowerstrip
from textblob_de.base import BaseBlob as _BaseBlob
from textblob_de.compat import unicode, basestring
from textblob_de.tokenizers import NLTKPunktTokenizer
from textblob_de.tokenizers import word_tokenize, sent_tokenize
from textblob_de.taggers import PatternTagger
from textblob_de.packages import pattern_de
from textblob_de.parsers import PatternParser
from textblob_de.np_extractors import PatternParserNPExtractor
from textblob_de.lemmatizers import PatternParserLemmatizer
from textblob_de.sentiments import PatternAnalyzer
_singularize = pattern_de.inflect.singularize
_pluralize = pattern_de.inflect.pluralize
class Word(unicode):
"""A simple word representation.
Includes methods for inflection, translation, and WordNet
integration.
"""
translator = Translator()
def __new__(cls, string, pos_tag=None):
"""Return a new instance of the class.
It is necessary to override this method in order to handle the
extra pos_tag argument in the constructor.
"""
return super(Word, cls).__new__(cls, string)
def __init__(self, string, pos_tag=None):
self.string = string
self.pos_tag = pos_tag
def __repr__(self):
return repr(self.string)
def __str__(self):
return self.string
def singularize(self):
"""Return the singular version of the word as a string."""
return Word(_singularize(self.string))
def pluralize(self):
"""Return the plural version of the word as a string."""
return Word(_pluralize(self.string))
def translate(self, from_lang=None, to="de"):
"""Translate the word to another language using Google's Translate API.
.. versionadded:: 0.5.0 (``textblob``)
"""
if from_lang is None:
from_lang = self.translator.detect(self.string)
return self.translator.translate(self.string,
from_lang=from_lang, to_lang=to)
def detect_language(self):
"""Detect the word's language using Google's Translate API.
.. versionadded:: 0.5.0 (``textblob``)
"""
return self.translator.detect(self.string)
def spellcheck(self):
"""Return a list of (word, confidence) tuples of spelling corrections.
Based on: Peter Norvig, "How to Write a Spelling Corrector"
(http://norvig.com/spell-correct.html) as implemented in the pattern
library.
.. versionadded:: 0.6.0 (``textblob``)
"""
# return suggest(self.string)
raise NotImplementedError
def correct(self):
"""Correct the spelling of the word. Returns the word with the highest
confidence using the spelling corrector.
.. versionadded:: 0.6.0 (``textblob``)
"""
# return Word(self.spellcheck()[0][0])
raise NotImplementedError
@cached_property
def lemma(self):
"""Return the lemma of this word using Wordnet's morphy function."""
#tag = _penn_to_wordnet(self.pos_tag) if (self.pos_tag is not None) else None
# return self.lemmatize(pos=tag)
raise NotImplementedError
@requires_nltk_corpus
def lemmatize(self, pos=None):
"""Return the lemma for a word using WordNet's morphy function.
:param pos: Part of speech to filter upon. If `None`, defaults to
``_wordnet.NOUN``.
.. versionadded:: 0.8.1 (``textblob``)
"""
# if pos is None:
#pos = _wordnet.NOUN
#lemmatizer = nltk.stem.WordNetLemmatizer()
# return lemmatizer.lemmatize(self.string, pos)
raise NotImplementedError
@cached_property
def synsets(self):
"""The list of Synset objects for this Word.
:rtype: list of Synsets
.. versionadded:: 0.7.0 (``textblob``)
"""
# return self.get_synsets(pos=None)
raise NotImplementedError
@cached_property
def definitions(self):
"""The list of definitions for this word. Each definition corresponds
to a synset.
.. versionadded:: 0.7.0 (``textblob``)
"""
# return self.define(pos=None)
raise NotImplementedError
def get_synsets(self, pos=None):
"""Return a list of Synset objects for this word.
:param pos: A part-of-speech tag to filter upon. If ``None``, all
synsets for all parts of speech will be loaded.
:rtype: list of Synsets
.. versionadded:: 0.7.0 (``textblob``)
"""
# return _wordnet.synsets(self.string, pos)
raise NotImplementedError
def define(self, pos=None):
"""Return a list of definitions for this word. Each definition
corresponds to a synset for this word.
:param pos: A part-of-speech tag to filter upon. If ``None``, definitions
for all parts of speech will be loaded.
:rtype: List of strings
.. versionadded:: 0.7.0 (``textblob``)
"""
# return [syn.definition for syn in self.get_synsets(pos=pos)]
raise NotImplementedError
# Cannot inherit from textblob.blob.WordList, otherwise the
# properties of Word() will use the English models
class WordList(list):
"""A list-like collection of words."""
def __init__(self, collection):
"""Initialize a WordList.
Takes a collection of strings as its only argument.
"""
self._collection = [Word(w) for w in collection]
super(WordList, self).__init__(self._collection)
def __str__(self):
return str(self._collection)
def __repr__(self):
"""Returns a string representation for debugging."""
class_name = self.__class__.__name__
return '{cls}({lst})'.format(
cls=class_name, lst=repr(self._collection))
def __getitem__(self, key):
"""Returns a string at the given index."""
if isinstance(key, slice):
return self.__class__(self._collection[key])
else:
return self._collection[key]
def __getslice__(self, i, j):
# This is included for Python 2.* compatibility
return self.__class__(self._collection[i:j])
def __iter__(self):
return iter(self._collection)
def count(self, strg, case_sensitive=False, *args, **kwargs):
"""Get the count of a word or phrase `s` within this WordList.
:param strg: The string to count.
:param case_sensitive: A boolean, whether or not the search is case-sensitive.
"""
if not case_sensitive:
return [word.lower() for word in self].count(strg.lower(), *args,
**kwargs)
return self._collection.count(strg, *args, **kwargs)
def append(self, obj):
"""Append an object to end. If the object is a string, appends a.
:class:`Word <Word>` object.
"""
if isinstance(obj, basestring):
return self._collection.append(Word(obj))
else:
return self._collection.append(obj)
def extend(self, iterable):
"""Extend WordList by appending elements from ``iterable``.
If an element
is a string, appends a :class:`Word <Word>` object.
"""
[self._collection.append(Word(e) if isinstance(e, basestring) else e)
for e in iterable]
return self
def upper(self):
"""Return a new WordList with each word upper-cased."""
return self.__class__([word.upper() for word in self])
def lower(self):
"""Return a new WordList with each word lower-cased."""
return self.__class__([word.lower() for word in self])
def singularize(self):
"""Return the single version of each word in this WordList."""
return self.__class__([word.singularize() for word in self])
def pluralize(self):
"""Return the plural version of each word in this WordList."""
return self.__class__([word.pluralize() for word in self])
def lemmatize(self):
"""Return the lemma of each word in this WordList.
Currently using NLTKPunktTokenizer() for all lemmatization
tasks. This might cause slightly different tokenization results
compared to the TextBlob.words property.
"""
_lemmatizer = PatternParserLemmatizer(tokenizer=NLTKPunktTokenizer())
# WordList object --> Sentence.string
# add a period (improves parser accuracy)
_raw = " ".join(self) + "."
_lemmas = _lemmatizer.lemmatize(_raw)
return self.__class__([Word(l, t) for l, t in _lemmas])
class BaseBlob(_BaseBlob):
'''``BaseBlob`` class initialised with German default models:
An abstract base class that all textblob classes will inherit from.
Includes words, POS tag, NP, and word count properties. Also includes
basic dunder and string methods for making objects like Python strings.
:param str text: A string.
:param tokenizer: (optional) A tokenizer instance. If ``None``, defaults to
:class:`NLTKPunktTokenizer() <textblob_de.tokenizers.NLTKPunktTokenizer>`.
:param np_extractor: (optional) An NPExtractor instance. If ``None``,
defaults to :class:`PatternParserNPExtractor()
<textblob_de.np_extractors.PatternParserNPExtractor>`.
:param pos_tagger: (optional) A Tagger instance. If ``None``, defaults to
:class:`PatternTagger <textblob_de.taggers.PatternTagger>`.
:param analyzer: (optional) A sentiment analyzer. If ``None``, defaults to
:class:`PatternAnalyzer <textblob_de.sentiments.PatternAnalyzer>`.
:param classifier: (optional) A classifier.
.. versionchanged:: 0.6.0
``clean_html`` parameter deprecated, as it was in NLTK.
'''
def __init__(self, text, tokenizer=None,
pos_tagger=None,
np_extractor=None,
analyzer=None,
parser=None,
classifier=None, clean_html=False):
self.tokenizer = tokenizer if tokenizer is not None else NLTKPunktTokenizer()
self.pos_tagger = pos_tagger if pos_tagger is not None else PatternTagger(
tokenizer=self.tokenizer)
self.np_extractor = np_extractor if np_extractor is not None \
else PatternParserNPExtractor(tokenizer=self.tokenizer)
self.analyzer = analyzer if analyzer is not None \
else PatternAnalyzer(tokenizer=self.tokenizer)
self.parser = parser if parser is not None \
else PatternParser(tokenizer=self.tokenizer)
self.classifier = classifier if classifier is not None else None
if not isinstance(text, basestring):
raise TypeError('The `text` argument passed to `__init__(text)` '
'must be a string, not {0}'.format(type(text)))
if clean_html:
raise NotImplementedError(
"clean_html has been deprecated. "
"To remove HTML markup, use BeautifulSoup's "
"get_text() function")
self.raw = self.string = text
self.stripped = lowerstrip(self.raw, all=True)
_initialize_models(
self,
self.tokenizer,
self.pos_tagger,
self.np_extractor,
self.analyzer,
self.parser,
self.classifier)
@cached_property
def words(self):
"""Return a list of word tokens. This excludes punctuation characters.
If you want to include punctuation characters, access the ``tokens``
property.
:returns: A :class:`WordList <WordList>` of word tokens.
"""
return WordList(
word_tokenize(
self.raw,
tokenizer=self.tokenizer,
include_punc=False))
@cached_property
def tokens(self):
'''Return a list of tokens, using this blob's tokenizer object
(defaults to :class:`WordTokenizer <textblob.tokenizers.WordTokenizer>`).
'''
return WordList(self.tokenizer.tokenize(self.raw))
def tokenize(self, tokenizer=None):
"""Return a list of tokens, using ``tokenizer``.
:param tokenizer: (optional) A tokenizer object. If None, defaults to
this blob's default tokenizer.
"""
t = tokenizer if tokenizer is not None else self.tokenizer
return WordList(t.tokenize(self.raw))
@cached_property
def sentiment(self):
"""Return a tuple of form (polarity, subjectivity ) where polarity
is a float within the range [-1.0, 1.0] and subjectivity is a float
within the range [0.0, 1.0] where 0.0 is very objective and 1.0 is
very subjective.
:rtype: namedtuple of the form ``Sentiment(polarity, subjectivity)``
"""
return self.analyzer.analyze(self.raw)
@cached_property
def sentiment_assessments(self):
"""Return a tuple of form (polarity, subjectivity, assessments ) where
polarity is a float within the range [-1.0, 1.0], subjectivity is a
float within the range [0.0, 1.0] where 0.0 is very objective and 1.0
is very subjective, and assessments is a list of polarity and
subjectivity scores for the assessed tokens.
:rtype: namedtuple of the form ``Sentiment(polarity, subjectivity, assessments)``
"""
raise NotImplementedError
@cached_property
def polarity(self):
"""Return the polarity score as a float within the range [-1.0, 1.0]
:rtype: float
"""
return self.sentiment[0]
@cached_property
def subjectivity(self):
'''Return the subjectivity score as a float within the range [0.0, 1.0]
where 0.0 is very objective and 1.0 is very subjective.
:rtype: float
'''
return self.sentiment[1]
@cached_property
def noun_phrases(self):
"""Returns a list of noun phrases for this blob."""
return WordList([phrase.strip()
for phrase in self.np_extractor.extract(self.raw)
if len(phrase.split()) > 1])
@cached_property
def pos_tags(self):
"""Returns an list of tuples of the form (word, POS tag).
Example:
::
[('At', 'IN'), ('eight', 'CD'), ("o'clock", 'JJ'), ('on', 'IN'),
('Thursday', 'NNP'), ('morning', 'NN')]
:rtype: list of tuples
"""
return [(Word(word, pos_tag=t), unicode(t))
for word, t in self.pos_tagger.tag(self.raw)
# new keyword PatternTagger(include_punc=False)
# if not PUNCTUATION_REGEX.match(unicode(t))
]
tags = pos_tags
@cached_property
def word_counts(self):
"""Dictionary of word frequencies in this text."""
counts = defaultdict(int)
stripped_words = [lowerstrip(word) for word in self.words]
for word in stripped_words:
counts[word] += 1
return counts
@cached_property
def np_counts(self):
"""Dictionary of noun phrase frequencies in this text."""
counts = defaultdict(int)
for phrase in self.noun_phrases:
counts[phrase] += 1
return counts
def translate(self, from_lang=None, to="de"):
"""Translate the blob to another language."""
if from_lang is None:
from_lang = self.translator.detect(self.string)
return self.__class__(
self.translator.translate(
self.raw,
from_lang=from_lang,
to_lang=to))
def correct(self):
"""Attempt to correct the spelling of a blob.
.. versionadded:: 0.6.0 (``textblob``)
:rtype: :class:`BaseBlob <BaseBlob>`
"""
# regex matches: contraction or word or punctuation or whitespace
#tokens = nltk.tokenize.regexp_tokenize(self.raw, "\w*('\w*)+|\w+|[^\w\s]|\s")
#corrected = (Word(w).correct() for w in tokens)
#ret = ''.join(corrected)
# return self.__class__(ret)
raise NotImplementedError
def _cmpkey(self):
"""Key used by ComparableMixin to implement all rich comparison
operators."""
return self.raw
def _strkey(self):
"""Key used by StringlikeMixin to implement string methods."""
return self.raw
def __hash__(self):
return hash(self._cmpkey())
def __add__(self, other):
"""Concatenates two text objects the same way Python strings are
concatenated.
Arguments:
- `other`: a string or a text object
"""
if isinstance(other, basestring):
return self.__class__(self.raw + other)
elif isinstance(other, BaseBlob):
return self.__class__(self.raw + other.raw)
else:
raise TypeError('Operands must be either strings or {0} objects'
.format(self.__class__.__name__))
def split(self, sep=None, maxsplit=sys.maxsize):
"""Behaves like the built-in str.split() except returns a
WordList.
:rtype: :class:`WordList <WordList>`
"""
return WordList(self._strkey().split(sep, maxsplit))
class Sentence(BaseBlob):
'''A sentence within a TextBlob. Inherits from :class:`BaseBlob <BaseBlob>`.
:param sentence: A string, the raw sentence.
:param start_index: An int, the index where this sentence begins
in a TextBlob. If not given, defaults to 0.
:param end_index: An int, the index where this sentence ends in
a TextBlob. If not given, defaults to the
length of the sentence - 1.
'''
def __init__(self, sentence, start_index=0, end_index=None,
*args, **kwargs):
super(Sentence, self).__init__(sentence, *args, **kwargs)
#: The start index within a TextBlob
self.start = self.start_index = start_index
#: The end index within a textBlob
self.end = self.end_index = end_index or len(sentence) - 1
#@cached_property
# def sentiment(self):
#"""Return a tuple of form (polarity, subjectivity ) where polarity
# is a float within the range [-1.0, 1.0] and subjectivity is a float
# within the range [0.0, 1.0] where 0.0 is very objective and 1.0 is
# very subjective.
#:rtype: tuple of the form ``(polarity, subjectivity)``
#"""
#_wl = self.words
#_lemmas = _wl.lemmatize()
#_string = " ".join(_lemmas)
# return self.analyzer.analyze(_string)
#@cached_property
# def polarity(self):
#"""Return the polarity score as a float within the range [-1.0, 1.0]
#:rtype: float
#"""
# return self.sentiment[0]
#@cached_property
# def subjectivity(self):
#'''Return the subjectivity score as a float within the range [0.0, 1.0]
# where 0.0 is very objective and 1.0 is very subjective.
#:rtype: float
#'''
# return self.sentiment[1]
@property
def dict(self):
"""The dict representation of this sentence."""
return {
'raw': self.raw,
'start_index': self.start_index,
'end_index': self.end_index,
'stripped': self.stripped,
'noun_phrases': self.noun_phrases,
'polarity': self.polarity,
'subjectivity': self.subjectivity,
}
class TextBlobDE(BaseBlob):
'''``TextBlob`` class initialised with German default models:
:param str text: A string.
:param tokenizer: (optional) A tokenizer instance. If ``None``, defaults to
:class:`NLTKPunktTokenizer() <textblob_de.tokenizers.NLTKPunktTokenizer>`.
:param np_extractor: (optional) An NPExtractor instance. If ``None``,
defaults to :class:`PatternParserNPExtractor()
<textblob_de.np_extractors.PatternParserNPExtractor>`.
:param pos_tagger: (optional) A Tagger instance. If ``None``, defaults to
:class:`PatternTagger <textblob_de.taggers.PatternTagger>`.
:param analyzer: (optional) A sentiment analyzer. If ``None``, defaults to
:class:`PatternAnalyzer <textblob_de.sentiments.PatternAnalyzer>`.
:param classifier: (optional) A classifier.
'''
@cached_property
def sentences(self):
"""Return list of :class:`Sentence <Sentence>` objects."""
return self._create_sentence_objects()
@cached_property
def words(self):
"""Return a list of word tokens. This excludes punctuation characters.
If you want to include punctuation characters, access the ``tokens``
property.
:returns: A :class:`WordList <WordList>` of word tokens.
"""
return WordList(
word_tokenize(self.raw, self.tokenizer, include_punc=False))
@property
def raw_sentences(self):
"""List of strings, the raw sentences in the blob."""
return [sentence.raw for sentence in self.sentences]
@cached_property
def sentiment(self):
"""Return a tuple of form (polarity, subjectivity ) where polarity
is a float within the range [-1.0, 1.0] and subjectivity is a float
within the range [0.0, 1.0] where 0.0 is very objective and 1.0 is
very subjective.
:rtype: named tuple of the form ``Sentiment(polarity=0.0, subjectivity=0.0)``
"""
#: Enhancement Issue #2
#: adapted from 'textblob.en.sentiments.py'
#: Return type declaration
_RETURN_TYPE = namedtuple('Sentiment', ['polarity', 'subjectivity'])
_polarity = 0
_subjectivity = 0
for s in self.sentences:
_polarity += s.polarity
_subjectivity += s.subjectivity
try:
polarity = _polarity / len(self.sentences)
except ZeroDivisionError:
polarity = 0.0
try:
subjectivity = _subjectivity / len(self.sentences)
except ZeroDivisionError:
subjectivity = 0.0
return _RETURN_TYPE(polarity, subjectivity)
@cached_property
def sentiment_assessments(self):
"""Return a tuple of form (polarity, subjectivity, assessments ) where
polarity is a float within the range [-1.0, 1.0], subjectivity is a
float within the range [0.0, 1.0] where 0.0 is very objective and 1.0
is very subjective, and assessments is a list of polarity and
subjectivity scores for the assessed tokens.
:rtype: namedtuple of the form ``Sentiment(polarity, subjectivity, assessments)``
"""
raise NotImplementedError
@cached_property
def polarity(self):
"""Return the polarity score as a float within the range [-1.0, 1.0]
:rtype: float
"""
return self.sentiment[0]
@cached_property
def subjectivity(self):
'''Return the subjectivity score as a float within the range [0.0, 1.0]
where 0.0 is very objective and 1.0 is very subjective.
:rtype: float
'''
return self.sentiment[1]
@property
def serialized(self):
"""Returns a list of each sentence's dict representation."""
return [sentence.dict for sentence in self.sentences]
def to_json(self, *args, **kwargs):
"""Return a json representation (str) of this blob. Takes the same
arguments as json.dumps.
.. versionadded:: 0.5.1 (``textblob``)
"""
return json.dumps(self.serialized, *args, **kwargs)
@property
def json(self):
"""The json representation of this blob.
.. versionchanged:: 0.5.1
Made ``json`` a property instead of a method to restore backwards
compatibility that was broken after version 0.4.0.
"""
return self.to_json()
def _create_sentence_objects(self):
"""Returns a list of Sentence objects from the raw text."""
sentence_objects = []
sentences = sent_tokenize(self.raw, tokenizer=self.tokenizer)
char_index = 0 # Keeps track of character index within the blob
for sent in sentences:
# Compute the start and end indices of the sentence
# within the blob. This only works if the sentence splitter
# does not perform any character replacements or changes to
# white space.
# Working: NLTKPunktTokenizer
# Not working: PatternTokenizer
try:
start_index = self.raw.index(sent, char_index)
char_index += len(sent)
end_index = start_index + len(sent)
except ValueError:
start_index = None
end_index = None
# Sentences share the same models as their parent blob
s = Sentence(
sent,
start_index=start_index,
end_index=end_index,
tokenizer=self.tokenizer,
np_extractor=self.np_extractor,
pos_tagger=self.pos_tagger,
analyzer=self.analyzer,
parser=self.parser,
classifier=self.classifier)
sentence_objects.append(s)
return sentence_objects
class BlobberDE(object):
"""A factory for TextBlobs that all share the same tagger, tokenizer,
parser, classifier, and np_extractor.
Usage:
>>> from textblob_de import BlobberDE
>>> from textblob_de.taggers import PatternTagger
>>> from textblob.tokenizers import PatternTokenizer
>>> tb = Blobber(pos_tagger=PatternTagger(), tokenizer=PatternTokenizer())
>>> blob1 = tb("Das ist ein Blob.")
>>> blob2 = tb("Dieser Blob benutzt die selben Tagger und Tokenizer.")
>>> blob1.pos_tagger is blob2.pos_tagger
True
:param str text: A string.
:param tokenizer: (optional) A tokenizer instance. If ``None``, defaults to
:class:`NLTKPunktTokenizer() <textblob_de.tokenizers.NLTKPunktTokenizer>`.
:param np_extractor: (optional) An NPExtractor instance. If ``None``,
defaults to :class:`PatternParserNPExtractor()
<textblob_de.np_extractors.PatternParserNPExtractor>`.
:param pos_tagger: (optional) A Tagger instance. If ``None``, defaults to
:class:`PatternTagger <textblob_de.taggers.PatternTagger>`.
:param analyzer: (optional) A sentiment analyzer. If ``None``, defaults to
:class:`PatternAnalyzer <textblob_de.sentiments.PatternAnalyzer>`.
:param classifier: (optional) A classifier.
.. versionadded:: 0.4.0 (``textblob``)
"""
def __init__(self,
tokenizer=None,
pos_tagger=None,
np_extractor=None,
analyzer=None,
parser=None,
classifier=None):
self.tokenizer = tokenizer if tokenizer is not None else NLTKPunktTokenizer()
self.pos_tagger = pos_tagger if pos_tagger is not None \
else PatternTagger(tokenizer=self.tokenizer)
self.np_extractor = np_extractor if np_extractor is not None \
else PatternParserNPExtractor(tokenizer=self.tokenizer)
self.analyzer = analyzer if analyzer is not None \
else PatternAnalyzer(tokenizer=self.tokenizer)
self.parser = parser if parser is not None \
else PatternParser(tokenizer=self.tokenizer)
self.classifier = classifier if classifier is not None else None
_initialize_models(
self,
self.tokenizer,
self.pos_tagger,
self.np_extractor,
self.analyzer,
self.parser,
self.classifier)
def __call__(self, text):
"""Return a new TextBlob object with this Blobber's ``np_extractor``,
``pos_tagger``, ``tokenizer``, ``analyzer``, and ``classifier``.
:returns: A new :class:`TextBlob <TextBlob>`.
"""
return TextBlobDE(
text,
tokenizer=self.tokenizer,
pos_tagger=self.pos_tagger,
np_extractor=self.np_extractor,
analyzer=self.analyzer,
parser=self.parser,
classifier=self.classifier)
def __repr__(self):
classifier_name = self.classifier.__class__.__name__ + \
"()" if self.classifier else "None"
return (
"Blobber(tokenizer={0}(), pos_tagger={1}(), "
"np_extractor={2}(), analyzer={3}(), parser={4}(), classifier={5})") .format(
self.tokenizer.__class__.__name__,
self.pos_tagger.__class__.__name__,
self.np_extractor.__class__.__name__,
self.analyzer.__class__.__name__,
self.parser.__class__.__name__,
classifier_name)
__str__ = __repr__
| mit |
GirlsCodePy/girlscode-coursebuilder | modules/dashboard/dashboard_handler.py | 3 | 4353 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Presents a friendly base class for dashboard handlers."""
__author__ = 'Nick Retallack (nretallack@google.com)'
from common import crypto
from common import jinja_utils
from modules.dashboard import dashboard
class AbstractDashboardHandler(dashboard.DashboardHandler):
"""Contains common patterns in dashboard handlers."""
@property
def TEMPLATE(self):
"""Name of a partial template this handler renders most often.
It should be located in one of your TEMPLATE_DIRS. It represents the
main_content in view.html.
This will be used when you call render_this. If you do not call
render_this, you don't need to populate this value.
"""
return NotImplementedError('Subclasses must override this.')
@property
def TEMPLATE_DIRS(self):
"""List of places to find templates for the main_content."""
return NotImplementedError('Subclasses must override this.')
@property
def ACTION(self):
"""A unique string to identify this operation.
This is used for permissions and XSRF tokens.
It's also used to identify menu items.
"""
return NotImplementedError('Subclasses must override this.')
@property
def PAGE_TITLE(self):
"""This will be displayed in your browser's title bar."""
return ''
@property
def IN_ACTION(self):
"""This determines which menu item should be active when someone visits
your page."""
return self.ACTION # subclasses may override this
@property
def EXTRA_CSS_URLS(self):
"""List of CSS files to add to the document's head."""
return [] # subclasses may override this
@property
def EXTRA_JS_URLS(self):
"""List of JavaScript files to add to the document's head."""
return [] # subclasses may override this
@property
def EXTRA_JS_HREF_LIST(self):
"""Backward compatibility with the existing dashboard."""
return super(AbstractDashboardHandler, self
).EXTRA_JS_HREF_LIST + self.EXTRA_JS_URLS
@property
def EXTRA_CSS_HREF_LIST(self):
"""Backward compatibility with the existing dashboard."""
return super(AbstractDashboardHandler, self
).EXTRA_CSS_HREF_LIST + self.EXTRA_CSS_URLS
@property
def action(self):
"""Backward compatibility with the existing dashboard."""
return self.ACTION
def render_this(self, **values):
self.render_other(self.TEMPLATE, **values)
def render_other(self, template, **values):
self.render_content(jinja_utils.render_partial_template(
template, self.TEMPLATE_DIRS, values, handler=self))
def render_content(self, content):
self.render_page({
'page_title': self.PAGE_TITLE,
'main_content': content,
}, in_action=self.IN_ACTION)
def get(self):
# check permissions
if not self.can_view(self.ACTION):
self.redirect(self.app_context.get_slug(), abort=True)
def post(self):
# check for cross-site request forgery
xsrf_token = self.request.headers.get('CSRF-Token', self.request.get(
'xsrf_token_{}'.format(self.ACTION)))
if not crypto.XsrfTokenManager.is_xsrf_token_valid(
xsrf_token, self.ACTION):
self.abort(403)
# check permissions
if not self.can_edit(self.ACTION):
self.redirect(self.app_context.get_slug(), abort=True)
@classmethod
def add_to_menu(cls, group, item, title, **kwargs):
cls.add_sub_nav_mapping(
group, item, title,
action=cls.ACTION,
href=cls.URL,
**kwargs
)
| gpl-3.0 |
vikingco/django-advanced-reports | tests/test_filters.py | 1 | 5496 | from django.test import TestCase
from advanced_reports.defaults import AdvancedReport
class AdvancedReportTest(TestCase):
def setUp(self):
self.report = AdvancedReport()
def test_tabbed_filter_fields(self):
self.report.tabbed_filter_fields = {
'card': {
'images': {
'default': 'img/item.png',
'item2': 'img/item2.png'
},
'types': [
'item1', 'item2', 'item3', 'item4'
]
}
}
self.assertEqual(['card'], [k for k, v in self.report.get_tabbed_filter_links()])
dict_iteritems = [v for k, v in self.report.get_tabbed_filter_links()]
self.assertEqual(
[
('item1', 'img/item.png'),
('item2', 'img/item2.png'),
('item3', 'img/item.png'),
('item4', 'img/item.png'),
], sorted([(k, v) for k, v in dict_iteritems[0]]))
def test_tabbed_filter_fields_default_only(self):
self.report.tabbed_filter_fields = {
'card': {
'images': {
'default': 'img/item2.png'
},
'types': [
'item1', 'item2', 'item3', 'item4'
]
}
}
self.assertEqual(['card'], [k for k, v in self.report.get_tabbed_filter_links()])
dict_iteritems = [v for k, v in self.report.get_tabbed_filter_links()]
self.assertEqual(
[
('item1', 'img/item2.png'),
('item2', 'img/item2.png'),
('item3', 'img/item2.png'),
('item4', 'img/item2.png'),
], sorted([(k, v) for k, v in dict_iteritems[0]]))
def test_tabbed_filter_fields_without_default_image(self):
self.report.tabbed_filter_fields = {
'card': {
'images': {
'item2': 'img/item2.png'
},
'types': [
'item1', 'item2', 'item3', 'item4'
]
}
}
self.assertEqual(['card'], [k for k, v in self.report.get_tabbed_filter_links()])
dict_iteritems = [v for k, v in self.report.get_tabbed_filter_links()]
self.assertEqual(
[
('item1', None),
('item2', 'img/item2.png'),
('item3', None),
('item4', None),
], sorted([(k, v) for k, v in dict_iteritems[0]]))
def test_tabbed_filter_fields_without_image_values(self):
self.report.tabbed_filter_fields = {
'card': {
'images': {
},
'types': [
'item1', 'item2', 'item3', 'item4'
]
}
}
self.assertEqual(['card'], [k for k, v in self.report.get_tabbed_filter_links()])
dict_iteritems = [v for k, v in self.report.get_tabbed_filter_links()]
self.assertEqual(
[
('item1', None),
('item2', None),
('item3', None),
('item4', None),
], sorted([(k, v) for k, v in dict_iteritems[0]]))
def test_tabbed_filter_fields_without_images(self):
self.report.tabbed_filter_fields = {
'card': {
'types': [
'item1', 'item2', 'item3', 'item4'
]
}
}
self.assertEqual(['card'], [k for k, v in self.report.get_tabbed_filter_links()])
dict_iteritems = [v for k, v in self.report.get_tabbed_filter_links()]
self.assertEqual(
[
('item1', None),
('item2', None),
('item3', None),
('item4', None),
], sorted([(k, v) for k, v in dict_iteritems[0]]))
def test_tabbed_filter_fields_without_types(self):
self.report.tabbed_filter_fields = {
'card': {
'images': {
},
}
}
with self.assertRaises(Exception):
self.report.get_tabbed_filter_links()
def test_tabbed_filter_fields_multiple(self):
self.report.tabbed_filter_fields = {
'card': {
'images': {
'default': 'img/item.png',
'item2': 'img/item2.png'
},
'types': [
'item1', 'item2', 'item3', 'item4'
]
},
'gender': {
'images': {
'male': 'img/male.png',
'female': 'img/female.png'
},
'types': [
'male', 'female'
]
}
}
self.assertEqual(['card', 'gender'], sorted([k for k, v in self.report.get_tabbed_filter_links()]))
dict_iteritems = [v for k, v in self.report.get_tabbed_filter_links()]
self.assertEqual(
[
('item1', 'img/item.png'),
('item2', 'img/item2.png'),
('item3', 'img/item.png'),
('item4', 'img/item.png'),
], sorted([(k, v) for k, v in dict_iteritems[1]]))
self.assertEqual(
[
('female', 'img/female.png'),
('male', 'img/male.png'),
], sorted([(k, v) for k, v in dict_iteritems[0]]))
| bsd-3-clause |
jakirkham/volumina | volumina/multimethods.py | 3 | 4179 | ###############################################################################
# volumina: volume slicing and editing library
#
# Copyright (C) 2011-2014, the ilastik developers
# <team@ilastik.org>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the Lesser GNU General Public License
# as published by the Free Software Foundation; either version 2.1
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# See the files LICENSE.lgpl2 and LICENSE.lgpl3 for full text of the
# GNU Lesser General Public License version 2.1 and 3 respectively.
# This information is also available on the ilastik web site at:
# http://ilastik.org/license/
###############################################################################
import sys
import inspect
registry = {}
calling_modules = {}
class MultiMethod(object):
def __init__(self, name):
self.name = name
self.typemap = {}
def __call__(self, *args):
types = tuple(arg.__class__ for arg in args)
function = self.typemap.get(types)
if function is None:
raise TypeError("no match: %s" % str(types))
return function(*args)
def register(self, types, function):
if types in self.typemap:
raise TypeError("duplicate registration: %s" % str(types))
self.typemap[types] = function
# decorator
def multimethod(*types):
def register(function):
name = function.__name__
mm = registry.get(name)
if mm is None:
mm = registry[name] = MultiMethod(name)
mm.register(types, function)
return mm
def noop(function):
assert(function.__name__ in registry) # noop is only returned,
# when we have seen the
# function at least once
return registry[function.__name__]
# a single module can appear more than once under different names
# in sys.modules; for exampe 'volumina.pixelpipeline.multimethods'
# and 'pixelpipeline.multimethods'
# we encountered this case, when a module is imported with its relative
# name in a __init__.py and simultaneously in a submodule with is full
# qualified name (this only works, when the python path points to the root
# dir of the full qualified name)
# As a result, the same decorator was called more than once and we got
# multi registration errors
# volumina.pixelpipeline.submodule:
# uses multimethod decorator
# imports another module with fqp volumina.pixelpipeline.submodule2
# executes __init__.py in fq namespace
# volumina.__init__:
# import pixelpipeline.submodule # executes decorators
# volumina.module:
# impot pixelpipeline.submodule # executes decorators
#
# The following code handles this case.
caller = sys.modules.get(inspect.currentframe().f_back.f_globals['__name__'])
# there is a caller and it is a non built-in in module (not a function etc.)
if caller and inspect.ismodule(caller) and hasattr(caller, '__file__'):
module_file = caller.__file__
module_name = caller.__name__
# Have we encountered the module before?
if module_file in calling_modules:
# we are still during the first encounter of that module
if calling_modules[module_file] is module_name:
return register
# we previously encountered the module in a different namespace
# -> do not register a new multimethod, but return the old one
else:
return noop
# module encountered the first time
else:
calling_modules[module_file] = module_name
return register
else:
raise Exception("multimethod() was not called as a decorator")
| lgpl-3.0 |
veltzer/demos-python | scripts/syntax_check.py | 1 | 1314 | #!/usr/bin/env python
"""
This script checks the syntax of other python scripts.
It returns a bad error code to the parent if something goes wrong.
It's basically a more sophisticated version of something like this:
python2 -m py_compile $< || python3 -m py_compile $<
"""
import sys
import subprocess
if len(sys.argv) != 2:
print('usage: {0} [filename]'.format(sys.argv[0]), file=sys.stderr)
sys.exit(1)
filename = sys.argv[1]
# read the first line of the file
check_with = None
with open(filename, 'r') as f:
for line in f:
line = line.rstrip()
if line == '#!/usr/bin/python2':
check_with = 'python2'
break
elif line == '#!/usr/bin/python3':
check_with = 'python3'
break
if line.startswith('# CHECK_WITH'):
check_with = line.split()[2]
break
# check with python3 if in doubt
if check_with is None:
check_with = 'python3'
if check_with is None:
print('{0}: could not find how to check file [{1}]'.format(sys.argv[0], filename), file=sys.stderr)
sys.exit(1)
# check the syntax
out=subprocess.check_output([
check_with,
'-m',
'py_compile',
filename,
]).decode()
# check that there is no output
if out!="":
print("out is [{}]".format(out))
sys.exit(1)
| gpl-3.0 |
idigbio-api-hackathon/LifemapperQgis | lifemapperTools/LmShared/LmClient/openTree.py | 1 | 2926 | """
@summary: Module containing client functions for interacting with OpenTree web
services
@author: CJ Grady / Jeff Cavner
@version: 3.2.0
@status: release
@license: Copyright (C) 2015, University of Kansas Center for Research
Lifemapper Project, lifemapper [at] ku [dot] edu,
Biodiversity Institute,
1345 Jayhawk Boulevard, Lawrence, Kansas, 66045, USA
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or (at
your option) any later version.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301, USA.
"""
from LmClient.localconstants import OTL_HINT_URL, OTL_TREE_WEB_URL
# .............................................................................
class OTLClient(object):
"""
@summary: Lifemapper interface to Open Tree of Life web services
"""
# .........................................
def __init__(self, cl):
"""
@summary: Constructor
@param cl: Lifemapper client for connection to web services
"""
self.cl = cl
# .........................................
def getOTLHint(self, taxaName):
"""
@summary: Calls the Open Tree of Life hint service with a taxa name and
returns matching OTL tree ids
@param taxaName: The name of the taxa to search for
"""
url = OTL_HINT_URL
jsonBody = '{"name":"%s","context_name":"All life"}' % (taxaName)
res = self.cl.makeRequest(url,
method="POST",
body=jsonBody,
headers={"Content-Type": "application/json"})
return res
# .........................................
def getOTLTreeWeb(self, otlTID):
"""
@summary: Calls the Open Tree of Life tree service with an OTL tree id
and returns a tree in Newick format.
@param otlTID: Open Tree of Life tree idopen tree tree id
"""
url = OTL_TREE_WEB_URL
jsonBody = '{"ott_id":"%s"}' % (otlTID)
res = self.cl.makeRequest(url,
method="POST",
body=jsonBody,
headers={"Content-Type": "application/json"})
return res
| gpl-2.0 |
bgyori/indra | indra/tests/test_pybel_api.py | 3 | 30089 | # -*- coding: utf-8 -*-
"""Tests for the PyBEL processor."""
import os
from urllib import request
from pybel import BELGraph
from pybel.dsl import *
from pybel.language import Entity
from pybel.io import from_nodelink_file
from pybel.examples import egf_graph
from indra.statements import *
from indra.sources import bel
from indra.sources.bel import processor as pb
from indra.sources.bel.api import process_cbn_jgif_file, process_pybel_graph, \
small_corpus_url
from indra.databases import hgnc_client
from indra.statements.validate import assert_valid_statement
mek_hgnc_id = hgnc_client.get_hgnc_id('MAP2K1')
mek_up_id = hgnc_client.get_uniprot_id(mek_hgnc_id)
def test_pybel_neighborhood_query():
bp = bel.process_pybel_neighborhood(['TP63'],
network_type='graph_jsongz_url',
network_file=small_corpus_url)
assert bp.statements
for stmt in bp.statements:
assert_valid_statement(stmt)
assert all([s.evidence[0].context is not None
for s in bp.statements])
assert all([s.evidence[0].context.cell_line.name == 'MCF 10A'
for s in bp.statements])
# Locate statement about epidermis development
stmt = [st for st in bp.statements if st.agent_list()[1].name ==
'epidermis development'][0]
assert repr(stmt.evidence[0].context) == str(stmt.evidence[0].context)
assert stmt.evidence[0].context == BioContext(
location=RefContext(name="Cytoplasm",
db_refs={'MESH': 'D003593'}),
cell_line=RefContext(name="MCF 10A",
db_refs={'EFO': '0001200'}),
cell_type=RefContext(name="keratinocyte",
db_refs={'CL': 'CL:0000312'}),
organ=RefContext(name="colon",
db_refs={'UBERON': 'UBERON:0001155'}),
disease=RefContext(name="cancer",
db_refs={'DOID': 'DOID:162'}),
species=RefContext(name="Rattus norvegicus",
db_refs={'TAXONOMY': '10116'})), \
stmt.evidence[0].context
# Test annotation manager
assert bp.annot_manager.get_mapping('Species', '9606') == \
'Homo sapiens'
def test_pybel_readme_example():
bel_processor = bel.process_pybel_neighborhood(['KRAS', 'BRAF'])
assert bel_processor.statements
def test_process_pybel():
pbp = bel.process_pybel_graph(egf_graph)
assert pbp.statements
def test_process_jgif():
test_file_url = 'https://s3.amazonaws.com/bigmech/travis/Hox-2.0-Hs.jgf'
test_file = 'Hox-2.0-Hs.jgf'
if not os.path.exists(test_file):
request.urlretrieve(url=test_file_url, filename=test_file)
pbp = process_cbn_jgif_file(test_file)
# Clean up
os.remove(test_file)
assert len(pbp.statements) == 26, len(pbp.statements)
assert isinstance(pbp.statements[0], Statement)
assert all(s.evidence[0].source_api == 'bel' for s in pbp.statements)
def test_nodelink_json():
test_file_url = \
'https://s3.amazonaws.com/bigmech/travis/Hox-2.0-Hs_nljson.json'
test_file = 'Hox-2.0-Hs_nljson.json'
if not os.path.exists(test_file):
request.urlretrieve(url=test_file_url, filename=test_file)
pbp = process_pybel_graph(from_nodelink_file(test_file))
# Clean up
os.remove(test_file)
# Changed to 24, not really sure how to debug this one
assert len(pbp.statements) == 24, (len(pbp.statements), pbp.statements)
assert isinstance(pbp.statements[0], Statement)
assert all(s.evidence[0].source_api == 'bel' for s in pbp.statements)
def test_get_agent_hgnc():
mek = Protein(name='MAP2K1', namespace='HGNC')
agent = pb.get_agent(mek, {})
assert isinstance(agent, Agent)
assert agent.name == 'MAP2K1', agent
assert agent.db_refs.get('HGNC') == mek_hgnc_id
assert agent.db_refs.get('UP') == mek_up_id
# Now create an agent with an identifier
mek = Protein(name='Foo', namespace='HGNC', identifier='6840')
agent = pb.get_agent(mek, {})
assert isinstance(agent, Agent)
assert agent.name == 'MAP2K1', agent
assert agent.db_refs.get('HGNC') == mek_hgnc_id
assert agent.db_refs.get('UP') == mek_up_id
def test_get_agent_up():
mek = Protein(namespace='UP', identifier='Q02750')
agent = pb.get_agent(mek, {})
assert isinstance(agent, Agent)
assert agent.name == 'MAP2K1'
assert agent.db_refs.get('HGNC') == mek_hgnc_id
assert agent.db_refs.get('UP') == mek_up_id
def test_get_agent_egid():
node_data = Protein(name='5008', namespace='EGID')
agent = pb.get_agent(node_data)
assert isinstance(agent, Agent)
assert agent.name == 'OSM'
assert len(agent.db_refs) == 3
assert agent.db_refs['EGID'] == '5008'
assert agent.db_refs['HGNC'] == '8506'
assert agent.db_refs['UP'] == 'P13725'
def test_get_agent_mgi():
node = Protein(namespace='MGI', name='Nr1h3')
agent = pb.get_agent(node, {})
assert isinstance(agent, Agent)
assert agent.name == 'Nr1h3'
assert len(agent.db_refs) == 1
assert agent.db_refs.get('UP') == 'Q9Z0Y9', agent.db_refs
def test_get_agent_rgd():
node = Protein(namespace='RGD', name='Tp53')
agent = pb.get_agent(node, {})
assert isinstance(agent, Agent)
assert agent.name == 'Tp53'
assert len(agent.db_refs) == 1
assert agent.db_refs.get('UP') == 'P10361', agent.db_refs
def test_get_agent_sfam():
node_data = Protein(
namespace='SFAM',
name='PRKC Family',
)
agent = pb.get_agent(node_data)
assert isinstance(agent, Agent)
assert len(agent.db_refs) == 2
assert agent.db_refs['SFAM'] == 'PRKC Family'
assert agent.db_refs['FPLX'] == 'PKC'
assert agent.name == 'PKC'
def test_get_agent_sdis():
node_data = Pathology(namespace='SDIS', name='metastasis')
agent = pb.get_agent(node_data)
assert isinstance(agent, Agent)
assert agent.name == 'metastasis'
assert len(agent.db_refs) == 1
assert agent.db_refs['SDIS'] == 'metastasis'
def test_get_agent_chebi():
node_data = Abundance(namespace='CHEBI', name='nitric oxide')
agent = pb.get_agent(node_data)
assert isinstance(agent, Agent)
assert agent.name == 'nitric oxide'
assert len(agent.db_refs) == 1
assert agent.db_refs['CHEBI'] == 'CHEBI:16480'
def test_get_agent_schem():
node_data = Abundance(namespace='SCHEM', name='Promegestone')
agent = pb.get_agent(node_data)
assert isinstance(agent, Agent)
assert agent.name == 'Promegestone'
assert len(agent.db_refs) == 1
assert agent.db_refs['SCHEM'] == 'Promegestone'
def test_get_agent_mirna():
m = MicroRna(namespace='HGNC', name='MIRLET7A1')
agent = pb.get_agent(m, {})
assert isinstance(agent, Agent)
assert agent.name == 'MIRLET7A1'
assert agent.db_refs.get('MIRBASE') == 'MI0000060'
assert agent.db_refs.get('HGNC') == '31476'
m = MicroRna(namespace='HGNC', name='MIRLET7A1', identifier='31476')
agent = pb.get_agent(m, {})
assert isinstance(agent, Agent)
assert agent.name == 'MIRLET7A1'
assert agent.db_refs.get('MIRBASE') == 'MI0000060'
assert agent.db_refs.get('HGNC') == '31476'
m = MicroRna(namespace='MIRBASE', name='hsa-let-7a-1')
agent = pb.get_agent(m, {})
assert isinstance(agent, Agent)
assert agent.name == 'MIRLET7A1'
assert agent.db_refs.get('MIRBASE') == 'MI0000060'
assert agent.db_refs.get('HGNC') == '31476'
def test_get_agent_fusion():
node_data = ProteinFusion(
partner_5p=Protein(namespace='HGNC', name='BCR'),
partner_3p=Protein(namespace='HGNC', name='ABL1'),
)
agent = pb.get_agent(node_data)
assert agent is None
def test_get_agent_up_no_id():
mek = Protein(name='MAP2K1', namespace='UP')
agent = pb.get_agent(mek, {})
assert agent is None
def test_get_agent_meshpp():
apoptosis = bioprocess(name='Apoptosis', namespace='MESHPP')
agent = pb.get_agent(apoptosis)
assert isinstance(agent, Agent)
assert agent.name == 'Apoptosis'
assert 'MESH' in agent.db_refs
def test_get_agent_meshd():
hyperoxia = bioprocess(name='Hyperoxia', namespace='MESHD')
agent = pb.get_agent(hyperoxia)
assert isinstance(agent, Agent)
assert agent.name == 'Hyperoxia'
assert 'MESH' in agent.db_refs
def test_get_agent_with_mods():
mek = Protein(name='MAP2K1', namespace='HGNC',
variants=[pmod('Ph')])
agent = pb.get_agent(mek, {})
assert isinstance(agent, Agent)
assert len(agent.mods) == 1
mod = agent.mods[0]
assert mod.mod_type == 'phosphorylation'
assert not mod.residue
assert not mod.position
mek = Protein(name='MAP2K1', namespace='HGNC',
variants=[pmod('Ph', code='Ser')])
agent = pb.get_agent(mek, {})
assert isinstance(agent, Agent)
assert len(agent.mods) == 1
mod = agent.mods[0]
assert mod.mod_type == 'phosphorylation'
assert mod.residue == 'S'
assert not mod.position
mek = Protein(name='MAP2K1', namespace='HGNC',
variants=[pmod('Ph', position=218)])
agent = pb.get_agent(mek, {})
assert isinstance(agent, Agent)
assert len(agent.mods) == 1
mod = agent.mods[0]
assert mod.mod_type == 'phosphorylation'
assert not mod.residue
assert mod.position == '218'
mek = Protein(name='MAP2K1', namespace='HGNC',
variants=[pmod('Ph', position=218, code='Ser')])
agent = pb.get_agent(mek, {})
assert isinstance(agent, Agent)
assert len(agent.mods) == 1
mod = agent.mods[0]
assert mod.mod_type == 'phosphorylation'
assert mod.residue == 'S'
assert mod.position == '218'
def test_get_agent_with_muts():
mek = Protein(name='MAP2K1', namespace='HGNC',
variants=[hgvs('p.Val600Glu')])
agent = pb.get_agent(mek, {})
assert isinstance(agent, Agent)
assert len(agent.mutations) == 1
mut = agent.mutations[0]
assert mut.position == '600'
assert mut.residue_from == 'V'
assert mut.residue_to == 'E'
def test_get_agent_with_activity():
mek = Protein(name='MAP2K1', namespace='HGNC')
agent = pb.get_agent(mek, activity('act'))
assert isinstance(agent, Agent)
assert isinstance(agent.activity, ActivityCondition)
assert agent.activity.activity_type == 'activity'
assert agent.activity.is_active
def test_get_agent_complex():
mek = Protein(name='MAP2K1', namespace='HGNC')
erk = Protein(name='MAPK1', namespace='HGNC',
variants=[pmod('Ph', position=185, code='Thr')])
cplx = complex_abundance([mek, erk])
agent = pb.get_agent(cplx)
assert isinstance(agent, Agent)
assert agent.name == 'MAP2K1'
assert len(agent.bound_conditions) == 1
bc = agent.bound_conditions[0]
assert isinstance(bc, BoundCondition)
assert bc.is_bound is True
bc_agent = bc.agent
assert bc_agent.name == 'MAPK1'
assert len(bc_agent.mods) == 1
assert bc_agent.mods[0].mod_type == 'phosphorylation'
assert bc_agent.mods[0].residue == 'T'
assert bc_agent.mods[0].position == '185'
def test_get_agent_complex_none_agent():
"""If one of the agents in the complex can't be obtained (e.g., an
unhandled namespace), then the complex itself should be None."""
# Prime agent is None
mek = Protein(name='MAP2K1', namespace='FOO')
erk = Protein(name='MAPK1', namespace='HGNC',
variants=[pmod('Ph', position=185, code='Thr')])
cplx = complex_abundance([mek, erk])
agent = pb.get_agent(cplx)
assert agent is None
# Bound agent is None
mek = Protein(name='MAP2K1', namespace='HGNC')
erk = Protein(name='MAPK1', namespace='FOO',
variants=[pmod('Ph', position=185, code='Thr')])
cplx = complex_abundance([mek, erk])
agent = pb.get_agent(cplx)
assert agent is None
def test_get_agent_named_complex_go():
# TODO: Handle named complexes and map to FamPlex where possible
node_data = NamedComplexAbundance(namespace='GOCCID', name='0043509')
agent = pb.get_agent(node_data)
assert agent is None
def test_get_agent_with_translocation():
node_data = Protein(name='MAPK1', namespace='HGNC')
# Some example edge data
edge_data = translocation(
from_loc=Entity(namespace='GOCC', name='intracellular'),
to_loc=Entity(namespace='GOCC', name='extracellular space'),
)
agent = pb.get_agent(node_data, edge_data)
assert isinstance(agent, Agent)
assert agent.name == 'MAPK1'
assert agent.location == 'extracellular space'
def test_phosphorylation_one_site_with_evidence():
mek = Protein(name='MAP2K1', namespace='HGNC')
erk = Protein(name='MAPK1', namespace='HGNC',
variants=[pmod('Ph', position=185, code='Thr')])
g = BELGraph()
g.annotation_list['TextLocation'] = {'Abstract'}
ev_text = 'Some evidence.'
ev_pmid = '123456'
edge_hash = g.add_directly_increases(
mek, erk, evidence=ev_text,
citation=ev_pmid,
annotations={"TextLocation": 'Abstract'},
)
pbp = bel.process_pybel_graph(g)
assert pbp.statements
assert len(pbp.statements) == 1
assert isinstance(pbp.statements[0], Phosphorylation)
assert pbp.statements[0].residue == 'T'
assert pbp.statements[0].position == '185'
enz = pbp.statements[0].enz
sub = pbp.statements[0].sub
assert enz.name == 'MAP2K1'
assert enz.mods == []
assert sub.name == 'MAPK1'
assert sub.mods == []
# Check evidence
assert len(pbp.statements[0].evidence) == 1
ev = pbp.statements[0].evidence[0]
assert ev.source_api == 'bel'
assert ev.source_id == edge_hash
assert ev.pmid == ev_pmid, (ev.pmid, ev_pmid)
assert ev.text == ev_text
assert ev.annotations == {
'bel': 'p(HGNC:MAP2K1) directlyIncreases '
'p(HGNC:MAPK1, pmod(go:0006468 ! "protein phosphorylation", Thr, 185))'
}
assert ev.epistemics == {'direct': True, 'section_type': 'abstract'}
def test_doi_evidence():
"""Test processing edges with DOI citations."""
mek = Protein(name='MAP2K1', namespace='HGNC')
erk = Protein(name='MAPK1', namespace='HGNC')
g = BELGraph()
g.annotation_list['TextLocation'] = {'Abstract'}
ev_doi = '123456'
g.add_directly_increases(
mek, erk, evidence='Some evidence.',
citation=('doi', ev_doi),
annotations={"TextLocation": 'Abstract'},
)
pbp = bel.process_pybel_graph(g)
assert pbp.statements
assert len(pbp.statements) == 1
assert len(pbp.statements[0].evidence) == 1
ev = pbp.statements[0].evidence[0]
assert ev.pmid is None
assert 'DOI' in ev.text_refs
assert ev.text_refs['DOI'] == ev_doi
def test_phosphorylation_two_sites():
mek = Protein(name='MAP2K1', namespace='HGNC')
erk = Protein(name='MAPK1', namespace='HGNC',
variants=[pmod('Ph', position=185, code='Thr'),
pmod('Ph', position=187, code='Tyr')])
g = BELGraph()
g.add_directly_increases(mek, erk, evidence="Some evidence.",
citation='123456')
pbp = bel.process_pybel_graph(g)
assert pbp.statements
assert len(pbp.statements) == 2
stmt1 = pbp.statements[0]
stmt2 = pbp.statements[1]
assert stmt1.residue == 'T'
assert stmt1.position == '185'
assert stmt2.residue == 'Y'
assert stmt2.position == '187'
assert stmt1.sub.mods == []
assert stmt2.sub.mods == []
assert len(pbp.statements[0].evidence) == 1
def test_regulate_amount1_prot_obj():
mek = Protein(name='MAP2K1', namespace='HGNC')
erk = Protein(name='MAPK1', namespace='HGNC')
g = BELGraph()
g.add_increases(mek, erk, evidence="Some evidence.", citation='123456')
pbp = bel.process_pybel_graph(g)
assert pbp.statements
assert len(pbp.statements) == 1
assert isinstance(pbp.statements[0], IncreaseAmount)
assert len(pbp.statements[0].evidence) == 1
def test_regulate_amount2_rna_obj():
# FIXME: Create a transcription-specific statement for p->rna
mek = Protein(name='MAP2K1', namespace='HGNC')
erk = rna(name='MAPK1', namespace='HGNC')
g = BELGraph()
g.add_increases(mek, erk, evidence="Some evidence.", citation='123456')
pbp = bel.process_pybel_graph(g)
assert pbp.statements
assert len(pbp.statements) == 1
assert isinstance(pbp.statements[0], IncreaseAmount)
assert len(pbp.statements[0].evidence) == 1
def test_regulate_amount3_deg():
# FIXME: Create a stability-specific statement for p->deg(p(Foo))
mek = Protein(name='MAP2K1', namespace='HGNC')
erk = Protein(name='MAPK1', namespace='HGNC')
g = BELGraph()
g.add_increases(mek, erk, object_modifier=degradation(),
evidence="Some evidence.", citation='123456')
pbp = bel.process_pybel_graph(g)
assert pbp.statements
assert len(pbp.statements) == 1
assert isinstance(pbp.statements[0], DecreaseAmount), pbp.statements[0]
assert len(pbp.statements[0].evidence) == 1
def test_regulate_amount4_subj_act():
mek = Protein(name='MAP2K1', namespace='HGNC')
erk = Protein(name='MAPK1', namespace='HGNC')
g = BELGraph()
g.add_increases(mek, erk, source_modifier=activity(name='tscript'),
evidence="Some evidence.", citation='123456')
pbp = bel.process_pybel_graph(g)
assert pbp.statements
assert len(pbp.statements) == 1
assert isinstance(pbp.statements[0], IncreaseAmount)
subj = pbp.statements[0].subj
assert subj.name == 'MAP2K1'
assert subj.activity is not None
assert isinstance(subj.activity, ActivityCondition), subj.activity.__class__
assert subj.activity.activity_type == 'transcription'
assert subj.activity.is_active
assert len(pbp.statements[0].evidence) == 1
g = BELGraph()
g.add_increases(mek, erk, source_modifier=activity(),
evidence="Some evidence.", citation='123456')
pbp = bel.process_pybel_graph(g)
assert pbp.statements
assert len(pbp.statements) == 1
assert isinstance(pbp.statements[0], IncreaseAmount)
subj = pbp.statements[0].subj
assert subj.name == 'MAP2K1'
assert isinstance(subj.activity, ActivityCondition)
assert subj.activity.activity_type == 'activity'
assert subj.activity.is_active
assert len(pbp.statements[0].evidence) == 1
def test_regulate_activity():
mek = Protein(name='MAP2K1', namespace='HGNC')
erk = Protein(name='MAPK1', namespace='HGNC')
g = BELGraph()
g.add_increases(mek, erk, source_modifier=activity(name='kin'),
target_modifier=activity(name='kin'),
evidence="Some evidence.", citation='123456')
pbp = bel.process_pybel_graph(g)
assert pbp.statements
assert len(pbp.statements) == 1
assert isinstance(pbp.statements[0], Activation), pbp.statements[0].__class__
subj = pbp.statements[0].subj
assert subj.name == 'MAP2K1'
assert isinstance(subj.activity, ActivityCondition)
assert subj.activity.activity_type == 'kinase'
assert subj.activity.is_active
obj = pbp.statements[0].obj
assert obj.name == 'MAPK1'
assert obj.activity is None
assert pbp.statements[0].obj_activity == 'kinase'
assert len(pbp.statements[0].evidence) == 1
def test_active_form():
p53_pmod = Protein(name='TP53', namespace='HGNC',
variants=[pmod('Ph', position=33, code='Ser')])
p53_obj = Protein(name='TP53', namespace='HGNC')
g = BELGraph()
g.add_increases(p53_pmod, p53_obj,
target_modifier=activity(name='tscript'),
evidence="Some evidence.", citation='123456')
pbp = bel.process_pybel_graph(g)
assert pbp.statements
assert len(pbp.statements) == 1
stmt = pbp.statements[0]
assert isinstance(stmt, ActiveForm)
assert stmt.activity == 'transcription'
assert stmt.is_active is True
ag = stmt.agent
assert ag.name == 'TP53'
assert len(ag.mods) == 1
mc = ag.mods[0]
assert mc.mod_type == 'phosphorylation'
assert mc.residue == 'S'
assert mc.position == '33'
assert len(pbp.statements[0].evidence) == 1
def test_gef():
sos = Protein(name='SOS1', namespace='HGNC')
kras = Protein(name='KRAS', namespace='HGNC')
g = BELGraph()
g.add_directly_increases(sos, kras,
source_modifier=activity(),
target_modifier=activity(name='gtp'),
evidence="Some evidence.", citation='123456')
pbp = bel.process_pybel_graph(g)
assert pbp.statements
assert len(pbp.statements) == 1
stmt = pbp.statements[0]
assert isinstance(stmt, Gef)
assert stmt.gef.name == 'SOS1'
assert stmt.ras.name == 'KRAS'
assert stmt.gef.activity.activity_type == 'activity'
assert stmt.gef.activity.is_active is True
assert stmt.ras.activity is None
assert len(pbp.statements[0].evidence) == 1
def test_indirect_gef_is_activation():
sos = Protein(name='SOS1', namespace='HGNC')
kras = Protein(name='KRAS', namespace='HGNC')
g = BELGraph()
g.add_increases(sos, kras, source_modifier=activity(),
target_modifier=activity(name='gtp'),
evidence="Some evidence.", citation='123456')
pbp = bel.process_pybel_graph(g)
assert pbp.statements
assert len(pbp.statements) == 1
stmt = pbp.statements[0]
assert isinstance(stmt, Activation)
assert stmt.subj.name == 'SOS1'
assert stmt.obj.name == 'KRAS'
assert stmt.subj.activity.activity_type == 'activity'
assert stmt.subj.activity.is_active is True
assert stmt.obj.activity is None
assert stmt.obj_activity == 'gtpbound'
assert len(pbp.statements[0].evidence) == 1
def test_gap():
sos = Protein(name='RASA1', namespace='HGNC')
kras = Protein(name='KRAS', namespace='HGNC')
g = BELGraph()
g.add_directly_decreases(sos, kras,
source_modifier=activity(),
target_modifier=activity(name='gtp'),
evidence="Some evidence.", citation='123456')
pbp = bel.process_pybel_graph(g)
assert pbp.statements
assert len(pbp.statements) == 1
stmt = pbp.statements[0]
assert isinstance(stmt, Gap)
assert stmt.gap.name == 'RASA1'
assert stmt.ras.name == 'KRAS'
assert stmt.gap.activity.activity_type == 'activity'
assert stmt.gap.activity.is_active is True
assert stmt.ras.activity is None
assert len(pbp.statements[0].evidence) == 1
def test_activation_bioprocess():
bax = Protein(name='BAX', namespace='HGNC')
apoptosis = bioprocess(name='apoptotic process', namespace='GOBP')
g = BELGraph()
g.add_increases(bax, apoptosis, evidence="Some evidence.",
citation='123456')
pbp = bel.process_pybel_graph(g)
assert pbp.statements
assert len(pbp.statements) == 1
stmt = pbp.statements[0]
assert isinstance(stmt, Activation)
assert stmt.subj.name == 'BAX'
assert stmt.obj.name == 'apoptotic process'
assert 'GO' in stmt.obj.db_refs
assert len(pbp.statements[0].evidence) == 1
def test_gtpactivation():
kras = Protein(name='KRAS', namespace='HGNC')
braf = Protein(name='BRAF', namespace='HGNC')
g = BELGraph()
g.add_directly_increases(kras, braf,
source_modifier=activity(name='gtp'),
target_modifier=activity(name='kin'),
evidence="Some evidence.", citation='123456')
pbp = bel.process_pybel_graph(g)
assert pbp.statements
assert len(pbp.statements) == 1
stmt = pbp.statements[0]
assert isinstance(stmt, GtpActivation), stmt
assert stmt.subj.name == 'KRAS'
assert stmt.subj.activity.activity_type == 'gtpbound'
assert stmt.subj.activity.is_active is True
assert stmt.obj.name == 'BRAF'
assert stmt.obj.activity is None
assert stmt.obj_activity == 'kinase'
assert len(stmt.evidence) == 1
def test_conversion():
enz = Protein(name='PLCG1', namespace='HGNC')
react_1 = abundance('SCHEM',
'1-Phosphatidyl-D-myo-inositol 4,5-bisphosphate')
p1 = abundance('SCHEM', 'Diacylglycerol')
p2 = abundance('SCHEM', 'Inositol 1,4,5-trisphosphate')
rxn = reaction(
reactants=react_1,
products=[p1, p2],
)
g = BELGraph()
g.add_directly_increases(enz, rxn,
source_modifier=activity(),
evidence="Some evidence.", citation='123456')
pbp = bel.process_pybel_graph(g)
assert pbp.statements
assert len(pbp.statements) == 1
stmt = pbp.statements[0]
assert isinstance(stmt, Conversion)
assert stmt.subj.name == 'PLCG1'
assert stmt.subj.activity is not None
assert stmt.subj.activity.activity_type is not None
assert stmt.subj.activity.activity_type == 'activity', f'Got: {stmt.subj.activity.activity_type}'
assert stmt.subj.activity.is_active is True
assert len(stmt.obj_from) == 1
assert isinstance(stmt.obj_from[0], Agent)
assert stmt.obj_from[0].name == '1-Phosphatidyl-D-myo-inositol ' \
'4,5-bisphosphate'
assert len(stmt.obj_to) == 2
# why do these not appear in alphabetical order?
# PyBEL sorts the nodes based on their BEL, and
# Inositol 1,4,5-trisphosphate gets quoted.
assert stmt.obj_to[0].name == 'Inositol 1,4,5-trisphosphate'
assert stmt.obj_to[1].name == 'Diacylglycerol'
assert len(stmt.evidence) == 1
def test_controlled_transloc_loc_cond():
"""Controlled translocations are currently not handled."""
subj = Protein(name='MAP2K1', namespace='HGNC')
obj = Protein(name='MAPK1', namespace='HGNC')
g = BELGraph()
transloc = translocation(
from_loc=Entity(namespace='GOCC', name='intracellular'),
to_loc=Entity(namespace='GOCC', name='extracellular space'),
)
g.add_increases(subj, obj, target_modifier=transloc,
evidence="Some evidence.", citation='123456')
pbp = bel.process_pybel_graph(g)
assert not pbp.statements, pbp.statements
def test_subject_transloc_loc_cond():
"""Translocations of the subject are treated as location conditions on the
subject (using the to_loc location as the condition)"""
subj = Protein(name='MAP2K1', namespace='HGNC')
obj = Protein(name='MAPK1', namespace='HGNC')
transloc = translocation(
from_loc=Entity(namespace='GOCC', name='intracellular'),
to_loc=Entity(namespace='GOCC', name='extracellular space'),
)
g = BELGraph()
g.add_increases(subj, obj, source_modifier=transloc,
evidence="Some evidence.", citation='123456')
pbp = bel.process_pybel_graph(g)
assert pbp.statements
assert len(pbp.statements) == 1
stmt = pbp.statements[0]
assert isinstance(stmt, IncreaseAmount)
assert stmt.subj.name == 'MAP2K1'
assert stmt.subj.location is not None
assert stmt.subj.location == 'extracellular space'
assert stmt.obj.name == 'MAPK1'
def test_subject_transloc_active_form():
"""ActiveForms where the subject is a translocation--should draw on the
to-location of the subject."""
subj = Protein(name='MAP2K1', namespace='HGNC')
obj = Protein(name='MAP2K1', namespace='HGNC')
transloc = translocation(
from_loc=Entity(namespace='GOCC', name='intracellular'),
to_loc=Entity(namespace='GOCC', name='extracellular space'),
)
g = BELGraph()
g.add_increases(subj, obj, source_modifier=transloc,
target_modifier=activity(name='kin'),
evidence="Some evidence.", citation='123456')
pbp = bel.process_pybel_graph(g)
assert pbp.statements
assert len(pbp.statements) == 1
stmt = pbp.statements[0]
assert isinstance(stmt, ActiveForm)
assert stmt.agent.name == 'MAP2K1'
assert stmt.agent.location == 'extracellular space'
assert stmt.agent.activity is None
assert stmt.activity == 'kinase'
assert stmt.is_active is True
def test_complex_stmt_with_activation():
raf = Protein(name='BRAF', namespace='HGNC')
mek = Protein(name='MAP2K1', namespace='HGNC')
erk = Protein(name='MAPK1', namespace='HGNC')
cplx = complex_abundance([raf, mek])
g = BELGraph()
g.add_directly_increases(cplx, erk,
target_modifier=activity(name='kin'),
evidence="Some evidence.", citation='123456')
pbp = bel.process_pybel_graph(g)
assert pbp.statements
assert len(pbp.statements) == 2
stmt1 = pbp.statements[0]
assert isinstance(stmt1, Complex)
assert len(stmt1.agent_list()) == 2
assert sorted([ag.name for ag in stmt1.agent_list()]) == ['BRAF', 'MAP2K1']
assert stmt1.evidence
stmt2 = pbp.statements[1]
assert isinstance(stmt2, Activation)
assert stmt2.subj.name == 'BRAF'
assert stmt2.subj.bound_conditions[0].agent.name == 'MAP2K1'
assert stmt2.obj.name == 'MAPK1'
assert stmt2.obj.activity is None
assert stmt2.obj_activity == 'kinase'
def test_process_bel_stmts():
bp = bel.process_bel_stmt('p(HGNC:MDM2) directlyDecreases '
'tscript(p(HGNC:TP53))')
assert len(bp.statements) == 1
assert isinstance(bp.statements[0], Inhibition), bp.statements
assert bp.statements[0].subj.name == 'MDM2', bp.statements
assert bp.statements[0].obj.name == 'TP53', bp.statements
bp = bel.process_bel_stmt('a(CHEBI:lipoprotein) increases '
'bp(GOBP:"inflammatory response")')
assert len(bp.statements) == 1
assert isinstance(bp.statements[0], Activation), bp.statements
assert bp.statements[0].subj.name == 'lipoprotein', bp.statements
assert bp.statements[0].obj.name == 'inflammatory response', bp.statements
| bsd-2-clause |
JerzySpendel/python-social-auth | social/strategies/webpy_strategy.py | 77 | 1932 | import web
from social.strategies.base import BaseStrategy, BaseTemplateStrategy
class WebpyTemplateStrategy(BaseTemplateStrategy):
def render_template(self, tpl, context):
return web.template.render(tpl)(**context)
def render_string(self, html, context):
return web.template.Template(html)(**context)
class WebpyStrategy(BaseStrategy):
DEFAULT_TEMPLATE_STRATEGY = WebpyTemplateStrategy
def get_setting(self, name):
return getattr(web.config, name)
def request_data(self, merge=True):
if merge:
data = web.input(_method='both')
elif web.ctx.method == 'POST':
data = web.input(_method='post')
else:
data = web.input(_method='get')
return data
def request_host(self):
return web.ctx.host
def redirect(self, url):
return web.seeother(url)
def html(self, content):
web.header('Content-Type', 'text/html;charset=UTF-8')
return content
def render_html(self, tpl=None, html=None, context=None):
if not tpl and not html:
raise ValueError('Missing template or html parameters')
context = context or {}
if tpl:
tpl = web.template.frender(tpl)
else:
tpl = web.template.Template(html)
return tpl(**context)
def session_get(self, name, default=None):
return web.web_session.get(name, default)
def session_set(self, name, value):
web.web_session[name] = value
def session_pop(self, name):
return web.web_session.pop(name, None)
def session_setdefault(self, name, value):
return web.web_session.setdefault(name, value)
def build_absolute_uri(self, path=None):
path = path or ''
if path.startswith('http://') or path.startswith('https://'):
return path
return web.ctx.protocol + '://' + web.ctx.host + path
| bsd-3-clause |
jjhuff/fcc-comments | lib/nltk/corpus/europarl_raw.py | 17 | 1561 | # Natural Language Toolkit: Europarl Corpus Readers
#
# Copyright (C) 2001-2012 NLTK Project
# Author: Nitin Madnani <nmadnani@umiacs.umd.edu>
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
import re
from util import LazyCorpusLoader
from reader import *
# Create a new corpus reader instance for each European language
danish = LazyCorpusLoader(
'europarl_raw/danish', EuroparlCorpusReader, r'ep-.*\.da', encoding='utf-8')
dutch = LazyCorpusLoader(
'europarl_raw/dutch', EuroparlCorpusReader, r'ep-.*\.nl', encoding='utf-8')
english = LazyCorpusLoader(
'europarl_raw/english', EuroparlCorpusReader, r'ep-.*\.en', encoding='utf-8')
finnish = LazyCorpusLoader(
'europarl_raw/finnish', EuroparlCorpusReader, r'ep-.*\.fi', encoding='utf-8')
french = LazyCorpusLoader(
'europarl_raw/french', EuroparlCorpusReader, r'ep-.*\.fr', encoding='utf-8')
german = LazyCorpusLoader(
'europarl_raw/german', EuroparlCorpusReader, r'ep-.*\.de', encoding='utf-8')
greek = LazyCorpusLoader(
'europarl_raw/greek', EuroparlCorpusReader, r'ep-.*\.el', encoding='utf-8')
italian = LazyCorpusLoader(
'europarl_raw/italian', EuroparlCorpusReader, r'ep-.*\.it', encoding='utf-8')
portuguese = LazyCorpusLoader(
'europarl_raw/portuguese', EuroparlCorpusReader, r'ep-.*\.pt', encoding='utf-8')
spanish = LazyCorpusLoader(
'europarl_raw/spanish', EuroparlCorpusReader, r'ep-.*\.es', encoding='utf-8')
swedish = LazyCorpusLoader(
'europarl_raw/swedish', EuroparlCorpusReader, r'ep-.*\.sv', encoding='utf-8')
| apache-2.0 |
ric2b/Vivaldi-browser | chromium/tools/grit/grit/tool/buildinfo.py | 11 | 2565 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Output the list of files to be generated by GRIT from an input.
"""
from __future__ import print_function
import getopt
import os
import sys
from grit import grd_reader
from grit.node import structure
from grit.tool import interface
class DetermineBuildInfo(interface.Tool):
"""Determine what files will be read and output by GRIT.
Outputs the list of generated files and inputs used to stdout.
Usage: grit buildinfo [-o DIR]
The output directory is used for display only.
"""
def __init__(self):
pass
def ShortDescription(self):
"""Describes this tool for the usage message."""
return ('Determine what files will be needed and\n'
'output by GRIT with a given input.')
def Run(self, opts, args):
"""Main method for the buildinfo tool."""
self.output_directory = '.'
(own_opts, args) = getopt.getopt(args, 'o:', ('help',))
for (key, val) in own_opts:
if key == '-o':
self.output_directory = val
elif key == '--help':
self.ShowUsage()
sys.exit(0)
if len(args) > 0:
print('This tool takes exactly one argument: the output directory via -o')
return 2
self.SetOptions(opts)
res_tree = grd_reader.Parse(opts.input, debug=opts.extra_verbose)
langs = {}
for output in res_tree.GetOutputFiles():
if output.attrs['lang']:
langs[output.attrs['lang']] = os.path.dirname(output.GetFilename())
for lang, dirname in langs.items():
old_output_language = res_tree.output_language
res_tree.SetOutputLanguage(lang)
for node in res_tree.ActiveDescendants():
with node:
if (isinstance(node, structure.StructureNode) and
node.HasFileForLanguage()):
path = node.FileForLanguage(lang, dirname, create_file=False,
return_if_not_generated=False)
if path:
path = os.path.join(self.output_directory, path)
path = os.path.normpath(path)
print('%s|%s' % ('rc_all', path))
res_tree.SetOutputLanguage(old_output_language)
for output in res_tree.GetOutputFiles():
path = os.path.join(self.output_directory, output.GetFilename())
path = os.path.normpath(path)
print('%s|%s' % (output.GetType(), path))
for infile in res_tree.GetInputFiles():
print('input|%s' % os.path.normpath(infile))
| bsd-3-clause |
fujicoin/fujicoin | test/functional/wallet_createwallet.py | 1 | 9841 | #!/usr/bin/env python3
# Copyright (c) 2018-2020 The Fujicoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test createwallet arguments.
"""
from test_framework.address import key_to_p2wpkh
from test_framework.descriptors import descsum_create
from test_framework.key import ECKey
from test_framework.test_framework import FujicoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
from test_framework.wallet_util import bytes_to_wif, generate_wif_key
class CreateWalletTest(FujicoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = False
self.num_nodes = 1
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
node = self.nodes[0]
node.generate(1) # Leave IBD for sethdseed
self.nodes[0].createwallet(wallet_name='w0')
w0 = node.get_wallet_rpc('w0')
address1 = w0.getnewaddress()
self.log.info("Test disableprivatekeys creation.")
self.nodes[0].createwallet(wallet_name='w1', disable_private_keys=True)
w1 = node.get_wallet_rpc('w1')
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", w1.getnewaddress)
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", w1.getrawchangeaddress)
w1.importpubkey(w0.getaddressinfo(address1)['pubkey'])
self.log.info('Test that private keys cannot be imported')
eckey = ECKey()
eckey.generate()
privkey = bytes_to_wif(eckey.get_bytes())
assert_raises_rpc_error(-4, 'Cannot import private keys to a wallet with private keys disabled', w1.importprivkey, privkey)
if self.options.descriptors:
result = w1.importdescriptors([{'desc': descsum_create('wpkh(' + privkey + ')'), 'timestamp': 'now'}])
else:
result = w1.importmulti([{'scriptPubKey': {'address': key_to_p2wpkh(eckey.get_pubkey().get_bytes())}, 'timestamp': 'now', 'keys': [privkey]}])
assert not result[0]['success']
assert 'warning' not in result[0]
assert_equal(result[0]['error']['code'], -4)
assert_equal(result[0]['error']['message'], 'Cannot import private keys to a wallet with private keys disabled')
self.log.info("Test blank creation with private keys disabled.")
self.nodes[0].createwallet(wallet_name='w2', disable_private_keys=True, blank=True)
w2 = node.get_wallet_rpc('w2')
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", w2.getnewaddress)
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", w2.getrawchangeaddress)
w2.importpubkey(w0.getaddressinfo(address1)['pubkey'])
self.log.info("Test blank creation with private keys enabled.")
self.nodes[0].createwallet(wallet_name='w3', disable_private_keys=False, blank=True)
w3 = node.get_wallet_rpc('w3')
assert_equal(w3.getwalletinfo()['keypoolsize'], 0)
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", w3.getnewaddress)
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", w3.getrawchangeaddress)
# Import private key
w3.importprivkey(generate_wif_key())
# Imported private keys are currently ignored by the keypool
assert_equal(w3.getwalletinfo()['keypoolsize'], 0)
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", w3.getnewaddress)
# Set the seed
if self.options.descriptors:
w3.importdescriptors([{
'desc': descsum_create('wpkh(tprv8ZgxMBicQKsPcwuZGKp8TeWppSuLMiLe2d9PupB14QpPeQsqoj3LneJLhGHH13xESfvASyd4EFLJvLrG8b7DrLxEuV7hpF9uUc6XruKA1Wq/0h/*)'),
'timestamp': 'now',
'active': True
},
{
'desc': descsum_create('wpkh(tprv8ZgxMBicQKsPcwuZGKp8TeWppSuLMiLe2d9PupB14QpPeQsqoj3LneJLhGHH13xESfvASyd4EFLJvLrG8b7DrLxEuV7hpF9uUc6XruKA1Wq/1h/*)'),
'timestamp': 'now',
'active': True,
'internal': True
}])
else:
w3.sethdseed()
assert_equal(w3.getwalletinfo()['keypoolsize'], 1)
w3.getnewaddress()
w3.getrawchangeaddress()
self.log.info("Test blank creation with privkeys enabled and then encryption")
self.nodes[0].createwallet(wallet_name='w4', disable_private_keys=False, blank=True)
w4 = node.get_wallet_rpc('w4')
assert_equal(w4.getwalletinfo()['keypoolsize'], 0)
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", w4.getnewaddress)
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", w4.getrawchangeaddress)
# Encrypt the wallet. Nothing should change about the keypool
w4.encryptwallet('pass')
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", w4.getnewaddress)
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", w4.getrawchangeaddress)
# Now set a seed and it should work. Wallet should also be encrypted
w4.walletpassphrase('pass', 60)
if self.options.descriptors:
w4.importdescriptors([{
'desc': descsum_create('wpkh(tprv8ZgxMBicQKsPcwuZGKp8TeWppSuLMiLe2d9PupB14QpPeQsqoj3LneJLhGHH13xESfvASyd4EFLJvLrG8b7DrLxEuV7hpF9uUc6XruKA1Wq/0h/*)'),
'timestamp': 'now',
'active': True
},
{
'desc': descsum_create('wpkh(tprv8ZgxMBicQKsPcwuZGKp8TeWppSuLMiLe2d9PupB14QpPeQsqoj3LneJLhGHH13xESfvASyd4EFLJvLrG8b7DrLxEuV7hpF9uUc6XruKA1Wq/1h/*)'),
'timestamp': 'now',
'active': True,
'internal': True
}])
else:
w4.sethdseed()
w4.getnewaddress()
w4.getrawchangeaddress()
self.log.info("Test blank creation with privkeys disabled and then encryption")
self.nodes[0].createwallet(wallet_name='w5', disable_private_keys=True, blank=True)
w5 = node.get_wallet_rpc('w5')
assert_equal(w5.getwalletinfo()['keypoolsize'], 0)
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", w5.getnewaddress)
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", w5.getrawchangeaddress)
# Encrypt the wallet
assert_raises_rpc_error(-16, "Error: wallet does not contain private keys, nothing to encrypt.", w5.encryptwallet, 'pass')
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", w5.getnewaddress)
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", w5.getrawchangeaddress)
self.log.info('New blank and encrypted wallets can be created')
self.nodes[0].createwallet(wallet_name='wblank', disable_private_keys=False, blank=True, passphrase='thisisapassphrase')
wblank = node.get_wallet_rpc('wblank')
assert_raises_rpc_error(-13, "Error: Please enter the wallet passphrase with walletpassphrase first.", wblank.signmessage, "needanargument", "test")
wblank.walletpassphrase('thisisapassphrase', 60)
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", wblank.getnewaddress)
assert_raises_rpc_error(-4, "Error: This wallet has no available keys", wblank.getrawchangeaddress)
self.log.info('Test creating a new encrypted wallet.')
# Born encrypted wallet is created (has keys)
self.nodes[0].createwallet(wallet_name='w6', disable_private_keys=False, blank=False, passphrase='thisisapassphrase')
w6 = node.get_wallet_rpc('w6')
assert_raises_rpc_error(-13, "Error: Please enter the wallet passphrase with walletpassphrase first.", w6.signmessage, "needanargument", "test")
w6.walletpassphrase('thisisapassphrase', 60)
w6.signmessage(w6.getnewaddress('', 'legacy'), "test")
w6.keypoolrefill(1)
# There should only be 1 key for legacy, 3 for descriptors
walletinfo = w6.getwalletinfo()
keys = 3 if self.options.descriptors else 1
assert_equal(walletinfo['keypoolsize'], keys)
assert_equal(walletinfo['keypoolsize_hd_internal'], keys)
# Allow empty passphrase, but there should be a warning
resp = self.nodes[0].createwallet(wallet_name='w7', disable_private_keys=False, blank=False, passphrase='')
assert 'Empty string given as passphrase, wallet will not be encrypted.' in resp['warning']
w7 = node.get_wallet_rpc('w7')
assert_raises_rpc_error(-15, 'Error: running with an unencrypted wallet, but walletpassphrase was called.', w7.walletpassphrase, '', 60)
self.log.info('Test making a wallet with avoid reuse flag')
self.nodes[0].createwallet('w8', False, False, '', True) # Use positional arguments to check for bug where avoid_reuse could not be set for wallets without needing them to be encrypted
w8 = node.get_wallet_rpc('w8')
assert_raises_rpc_error(-15, 'Error: running with an unencrypted wallet, but walletpassphrase was called.', w7.walletpassphrase, '', 60)
assert_equal(w8.getwalletinfo()["avoid_reuse"], True)
self.log.info('Using a passphrase with private keys disabled returns error')
assert_raises_rpc_error(-4, 'Passphrase provided but private keys are disabled. A passphrase is only used to encrypt private keys, so cannot be used for wallets with private keys disabled.', self.nodes[0].createwallet, wallet_name='w9', disable_private_keys=True, passphrase='thisisapassphrase')
if __name__ == '__main__':
CreateWalletTest().main()
| mit |
m8ttyB/pontoon | pontoon/base/migrations/0014_auto_20150806_0948.py | 7 | 1056 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('base', '0013_add_en_US'),
]
operations = [
migrations.CreateModel(
name='ChangedEntityLocale',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('entity', models.ForeignKey(to='base.Entity')),
('locale', models.ForeignKey(to='base.Locale')),
],
),
migrations.AddField(
model_name='entity',
name='changed_locales',
field=models.ManyToManyField(help_text=b'List of locales in which translations for this entity have changed since the last sync.', to='base.Locale', through='base.ChangedEntityLocale'),
),
migrations.AlterUniqueTogether(
name='changedentitylocale',
unique_together=set([('entity', 'locale')]),
),
]
| bsd-3-clause |
adamchainz/ansible | lib/ansible/modules/remote_management/foreman/katello.py | 30 | 16959 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Eric D Helms <ericdhelms@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: katello
short_description: Manage Katello Resources
description:
- Allows the management of Katello resources inside your Foreman server
version_added: "2.3"
author: "Eric D Helms (@ehelms)"
requirements:
- "nailgun >= 0.28.0"
- "python >= 2.6"
- datetime
options:
server_url:
description:
- URL of Foreman server
required: true
username:
description:
- Username on Foreman server
required: true
password:
description:
- Password for user accessing Foreman server
required: true
entity:
description:
- The Foreman resource that the action will be performed on (e.g. organization, host)
required: true
params:
description:
- Parameters associated to the entity resource to set or edit in dictionary format (e.g. name, description)
required: true
'''
EXAMPLES = '''
---
# Simple Example:
- name: "Create Product"
local_action:
module: katello
username: "admin"
password: "admin"
server_url: "https://fakeserver.com"
entity: "product"
params:
name: "Centos 7"
# Abstraction Example:
# katello.yml
---
- name: "{{ name }}"
local_action:
module: katello
username: "admin"
password: "admin"
server_url: "https://fakeserver.com"
entity: "{{ entity }}"
params: "{{ params }}"
# tasks.yml
---
- include: katello.yml
vars:
name: "Create Dev Environment"
entity: "lifecycle_environment"
params:
name: "Dev"
prior: "Library"
organization: "Default Organization"
- include: katello.yml
vars:
name: "Create Centos Product"
entity: "product"
params:
name: "Centos 7"
organization: "Default Organization"
- include: katello.yml
vars:
name: "Create 7.2 Repository"
entity: "repository"
params:
name: "Centos 7.2"
product: "Centos 7"
organization: "Default Organization"
content_type: "yum"
url: "http://mirror.centos.org/centos/7/os/x86_64/"
- include: katello.yml
vars:
name: "Create Centos 7 View"
entity: "content_view"
params:
name: "Centos 7 View"
organization: "Default Organization"
repositories:
- name: "Centos 7.2"
product: "Centos 7"
- include: katello.yml
vars:
name: "Enable RHEL Product"
entity: "repository_set"
params:
name: "Red Hat Enterprise Linux 7 Server (RPMs)"
product: "Red Hat Enterprise Linux Server"
organization: "Default Organization"
basearch: "x86_64"
releasever: "7"
'''
RETURN = '''# '''
import datetime
try:
from nailgun import entities, entity_fields, entity_mixins
from nailgun.config import ServerConfig
HAS_NAILGUN_PACKAGE = True
except:
HAS_NAILGUN_PACKAGE = False
class NailGun(object):
def __init__(self, server, entities, module):
self._server = server
self._entities = entities
self._module = module
entity_mixins.TASK_TIMEOUT = 1000
def find_organization(self, name, **params):
org = self._entities.Organization(self._server, name=name, **params)
response = org.search(set(), {'search': 'name={}'.format(name)})
if len(response) == 1:
return response[0]
else:
self._module.fail_json(msg="No organization found for %s" % name)
def find_lifecycle_environment(self, name, organization):
org = self.find_organization(organization)
lifecycle_env = self._entities.LifecycleEnvironment(self._server, name=name, organization=org)
response = lifecycle_env.search()
if len(response) == 1:
return response[0]
else:
self._module.fail_json(msg="No Lifecycle Found found for %s" % name)
def find_product(self, name, organization):
org = self.find_organization(organization)
product = self._entities.Product(self._server, name=name, organization=org)
response = product.search()
if len(response) == 1:
return response[0]
else:
self._module.fail_json(msg="No Product found for %s" % name)
def find_repository(self, name, product, organization):
product = self.find_product(product, organization)
repository = self._entities.Repository(self._server, name=name, product=product)
repository._fields['organization'] = entity_fields.OneToOneField(entities.Organization)
repository.organization = product.organization
response = repository.search()
if len(response) == 1:
return response[0]
else:
self._module.fail_json(msg="No Repository found for %s" % name)
def find_content_view(self, name, organization):
org = self.find_organization(organization)
content_view = self._entities.ContentView(self._server, name=name, organization=org)
response = content_view.search()
if len(response) == 1:
return response[0]
else:
self._module.fail_json(msg="No Content View found for %s" % name)
def organization(self, params):
name = params['name']
del params['name']
org = self.find_organization(name, **params)
if org:
org = self._entities.Organization(self._server, name=name, id=org.id, **params)
org.update()
else:
org = self._entities.Organization(self._server, name=name, **params)
org.create()
return True
def manifest(self, params):
org = self.find_organization(params['organization'])
params['organization'] = org.id
try:
file = open(os.getcwd() + params['content'], 'r')
content = file.read()
finally:
file.close()
manifest = self._entities.Subscription(self._server)
try:
manifest.upload(
data={'organization_id': org.id},
files={'content': content}
)
return True
except Exception:
e = get_exception()
if "Import is the same as existing data" in e.message:
return False
else:
self._module.fail_json(msg="Manifest import failed with %s" % e)
def product(self, params):
org = self.find_organization(params['organization'])
params['organization'] = org.id
product = self._entities.Product(self._server, **params)
response = product.search()
if len(response) == 1:
product.id = response[0].id
product.update()
else:
product.create()
return True
def sync_product(self, params):
org = self.find_organization(params['organization'])
product = self.find_product(params['name'], org.name)
return product.sync()
def repository(self, params):
product = self.find_product(params['product'], params['organization'])
params['product'] = product.id
del params['organization']
repository = self._entities.Repository(self._server, **params)
repository._fields['organization'] = entity_fields.OneToOneField(entities.Organization)
repository.organization = product.organization
response = repository.search()
if len(response) == 1:
repository.id = response[0].id
repository.update()
else:
repository.create()
return True
def sync_repository(self, params):
org = self.find_organization(params['organization'])
repository = self.find_repository(params['name'], params['product'], org.name)
return repository.sync()
def repository_set(self, params):
product = self.find_product(params['product'], params['organization'])
del params['product']
del params['organization']
if not product:
return False
else:
reposet = self._entities.RepositorySet(self._server, product=product, name=params['name'])
reposet = reposet.search()[0]
formatted_name = [params['name'].replace('(', '').replace(')', '')]
formatted_name.append(params['basearch'])
if 'releasever' in params:
formatted_name.append(params['releasever'])
formatted_name = ' '.join(formatted_name)
repository = self._entities.Repository(self._server, product=product, name=formatted_name)
repository._fields['organization'] = entity_fields.OneToOneField(entities.Organization)
repository.organization = product.organization
repository = repository.search()
if len(repository) == 0:
if 'releasever' in params:
reposet.enable(data={'basearch': params['basearch'], 'releasever': params['releasever']})
else:
reposet.enable(data={'basearch': params['basearch']})
return True
def sync_plan(self, params):
org = self.find_organization(params['organization'])
params['organization'] = org.id
params['sync_date'] = datetime.datetime.strptime(params['sync_date'], "%H:%M")
products = params['products']
del params['products']
sync_plan = self._entities.SyncPlan(
self._server,
name=params['name'],
organization=org
)
response = sync_plan.search()
sync_plan.sync_date = params['sync_date']
sync_plan.interval = params['interval']
if len(response) == 1:
sync_plan.id = response[0].id
sync_plan.update()
else:
response = sync_plan.create()
sync_plan.id = response[0].id
if products:
ids = []
for name in products:
product = self.find_product(name, org.name)
ids.append(product.id)
sync_plan.add_products(data={'product_ids': ids})
return True
def content_view(self, params):
org = self.find_organization(params['organization'])
content_view = self._entities.ContentView(self._server, name=params['name'], organization=org)
response = content_view.search()
if len(response) == 1:
content_view.id = response[0].id
content_view.update()
else:
content_view = content_view.create()
if params['repositories']:
repos = []
for repository in params['repositories']:
repository = self.find_repository(repository['name'], repository['product'], org.name)
repos.append(repository)
content_view.repository = repos
content_view.update(['repository'])
def find_content_view_version(self, name, organization, environment):
env = self.find_lifecycle_environment(environment, organization)
content_view = self.find_content_view(name, organization)
content_view_version = self._entities.ContentViewVersion(self._server, content_view=content_view)
response = content_view_version.search(['content_view'], {'environment_id': env.id})
if len(response) == 1:
return response[0]
else:
self._module.fail_json(msg="No Content View version found for %s" % response)
def publish(self, params):
content_view = self.find_content_view(params['name'], params['organization'])
return content_view.publish()
def promote(self, params):
to_environment = self.find_lifecycle_environment(params['to_environment'], params['organization'])
version = self.find_content_view_version(params['name'], params['organization'], params['from_environment'])
data = {'environment_id': to_environment.id}
return version.promote(data=data)
def lifecycle_environment(self, params):
org = self.find_organization(params['organization'])
prior_env = self.find_lifecycle_environment(params['prior'], params['organization'])
lifecycle_env = self._entities.LifecycleEnvironment(self._server, name=params['name'], organization=org, prior=prior_env)
response = lifecycle_env.search()
if len(response) == 1:
lifecycle_env.id = response[0].id
lifecycle_env.update()
else:
lifecycle_env.create()
return True
def activation_key(self, params):
org = self.find_organization(params['organization'])
activation_key = self._entities.ActivationKey(self._server, name=params['name'], organization=org)
response = activation_key.search()
if len(response) == 1:
activation_key.id = response[0].id
activation_key.update()
else:
activation_key.create()
if params['content_view']:
content_view = self.find_content_view(params['content_view'], params['organization'])
lifecycle_environment = self.find_lifecycle_environment(params['lifecycle_environment'], params['organization'])
activation_key.content_view = content_view
activation_key.environment = lifecycle_environment
activation_key.update()
return True
def main():
module = AnsibleModule(
argument_spec=dict(
server_url=dict(required=True),
username=dict(required=True, no_log=True),
password=dict(required=True, no_log=True),
entity=dict(required=True, no_log=False),
action=dict(required=False, no_log=False),
verify_ssl=dict(required=False, type='bool', default=False),
params=dict(required=True, no_log=True, type='dict'),
),
supports_check_mode=True
)
if not HAS_NAILGUN_PACKAGE:
module.fail_json(msg="Missing required nailgun module (check docs or install with: pip install nailgun")
server_url = module.params['server_url']
username = module.params['username']
password = module.params['password']
entity = module.params['entity']
action = module.params['action']
params = module.params['params']
verify_ssl = module.params['verify_ssl']
server = ServerConfig(
url=server_url,
auth=(username, password),
verify=verify_ssl
)
ng = NailGun(server, entities, module)
# Lets make an connection to the server with username and password
try:
org = entities.Organization(server)
org.search()
except Exception as e:
module.fail_json(msg="Failed to connect to Foreman server: %s " % e)
result = False
if entity == 'product':
if action == 'sync':
result = ng.sync_product(params)
else:
result = ng.product(params)
elif entity == 'repository':
if action == 'sync':
result = ng.sync_repository(params)
else:
result = ng.repository(params)
elif entity == 'manifest':
result = ng.manifest(params)
elif entity == 'repository_set':
result = ng.repository_set(params)
elif entity == 'sync_plan':
result = ng.sync_plan(params)
elif entity == 'content_view':
if action == 'publish':
result = ng.publish(params)
elif action == 'promote':
result = ng.promote(params)
else:
result = ng.content_view(params)
elif entity == 'lifecycle_environment':
result = ng.lifecycle_environment(params)
elif entity == 'activation_key':
result = ng.activation_key(params)
else:
module.fail_json(changed=False, result="Unsupported entity supplied")
module.exit_json(changed=result, result="%s updated" % entity)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
Tao-Ma/gpdb | gpMgmt/bin/ext/yaml/__init__.py | 118 | 9337 |
from error import *
from tokens import *
from events import *
from nodes import *
from loader import *
from dumper import *
try:
from cyaml import *
except ImportError:
pass
def scan(stream, Loader=Loader):
"""
Scan a YAML stream and produce scanning tokens.
"""
loader = Loader(stream)
while loader.check_token():
yield loader.get_token()
def parse(stream, Loader=Loader):
"""
Parse a YAML stream and produce parsing events.
"""
loader = Loader(stream)
while loader.check_event():
yield loader.get_event()
def compose(stream, Loader=Loader):
"""
Parse the first YAML document in a stream
and produce the corresponding representation tree.
"""
loader = Loader(stream)
if loader.check_node():
return loader.get_node()
def compose_all(stream, Loader=Loader):
"""
Parse all YAML documents in a stream
and produce corresponsing representation trees.
"""
loader = Loader(stream)
while loader.check_node():
yield loader.get_node()
def load_all(stream, Loader=Loader):
"""
Parse all YAML documents in a stream
and produce corresponding Python objects.
"""
loader = Loader(stream)
while loader.check_data():
yield loader.get_data()
def load(stream, Loader=Loader):
"""
Parse the first YAML document in a stream
and produce the corresponding Python object.
"""
loader = Loader(stream)
if loader.check_data():
return loader.get_data()
def safe_load_all(stream):
"""
Parse all YAML documents in a stream
and produce corresponding Python objects.
Resolve only basic YAML tags.
"""
return load_all(stream, SafeLoader)
def safe_load(stream):
"""
Parse the first YAML document in a stream
and produce the corresponding Python object.
Resolve only basic YAML tags.
"""
return load(stream, SafeLoader)
def emit(events, stream=None, Dumper=Dumper,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None):
"""
Emit YAML parsing events into a stream.
If stream is None, return the produced string instead.
"""
getvalue = None
if stream is None:
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
stream = StringIO()
getvalue = stream.getvalue
dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
allow_unicode=allow_unicode, line_break=line_break)
for event in events:
dumper.emit(event)
if getvalue:
return getvalue()
def serialize_all(nodes, stream=None, Dumper=Dumper,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None,
encoding='utf-8', explicit_start=None, explicit_end=None,
version=None, tags=None):
"""
Serialize a sequence of representation trees into a YAML stream.
If stream is None, return the produced string instead.
"""
getvalue = None
if stream is None:
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
stream = StringIO()
getvalue = stream.getvalue
dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
allow_unicode=allow_unicode, line_break=line_break,
encoding=encoding, version=version, tags=tags,
explicit_start=explicit_start, explicit_end=explicit_end)
dumper.open()
for node in nodes:
dumper.serialize(node)
dumper.close()
if getvalue:
return getvalue()
def serialize(node, stream=None, Dumper=Dumper, **kwds):
"""
Serialize a representation tree into a YAML stream.
If stream is None, return the produced string instead.
"""
return serialize_all([node], stream, Dumper=Dumper, **kwds)
def dump_all(documents, stream=None, Dumper=Dumper,
default_style=None, default_flow_style=None,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None,
encoding='utf-8', explicit_start=None, explicit_end=None,
version=None, tags=None):
"""
Serialize a sequence of Python objects into a YAML stream.
If stream is None, return the produced string instead.
"""
getvalue = None
if stream is None:
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
stream = StringIO()
getvalue = stream.getvalue
dumper = Dumper(stream, default_style=default_style,
default_flow_style=default_flow_style,
canonical=canonical, indent=indent, width=width,
allow_unicode=allow_unicode, line_break=line_break,
encoding=encoding, version=version, tags=tags,
explicit_start=explicit_start, explicit_end=explicit_end)
dumper.open()
for data in documents:
dumper.represent(data)
dumper.close()
if getvalue:
return getvalue()
def dump(data, stream=None, Dumper=Dumper, **kwds):
"""
Serialize a Python object into a YAML stream.
If stream is None, return the produced string instead.
"""
return dump_all([data], stream, Dumper=Dumper, **kwds)
def safe_dump_all(documents, stream=None, **kwds):
"""
Serialize a sequence of Python objects into a YAML stream.
Produce only basic YAML tags.
If stream is None, return the produced string instead.
"""
return dump_all(documents, stream, Dumper=SafeDumper, **kwds)
def safe_dump(data, stream=None, **kwds):
"""
Serialize a Python object into a YAML stream.
Produce only basic YAML tags.
If stream is None, return the produced string instead.
"""
return dump_all([data], stream, Dumper=SafeDumper, **kwds)
def add_implicit_resolver(tag, regexp, first=None,
Loader=Loader, Dumper=Dumper):
"""
Add an implicit scalar detector.
If an implicit scalar value matches the given regexp,
the corresponding tag is assigned to the scalar.
first is a sequence of possible initial characters or None.
"""
Loader.add_implicit_resolver(tag, regexp, first)
Dumper.add_implicit_resolver(tag, regexp, first)
def add_path_resolver(tag, path, kind=None, Loader=Loader, Dumper=Dumper):
"""
Add a path based resolver for the given tag.
A path is a list of keys that forms a path
to a node in the representation tree.
Keys can be string values, integers, or None.
"""
Loader.add_path_resolver(tag, path, kind)
Dumper.add_path_resolver(tag, path, kind)
def add_constructor(tag, constructor, Loader=Loader):
"""
Add a constructor for the given tag.
Constructor is a function that accepts a Loader instance
and a node object and produces the corresponding Python object.
"""
Loader.add_constructor(tag, constructor)
def add_multi_constructor(tag_prefix, multi_constructor, Loader=Loader):
"""
Add a multi-constructor for the given tag prefix.
Multi-constructor is called for a node if its tag starts with tag_prefix.
Multi-constructor accepts a Loader instance, a tag suffix,
and a node object and produces the corresponding Python object.
"""
Loader.add_multi_constructor(tag_prefix, multi_constructor)
def add_representer(data_type, representer, Dumper=Dumper):
"""
Add a representer for the given type.
Representer is a function accepting a Dumper instance
and an instance of the given data type
and producing the corresponding representation node.
"""
Dumper.add_representer(data_type, representer)
def add_multi_representer(data_type, multi_representer, Dumper=Dumper):
"""
Add a representer for the given type.
Multi-representer is a function accepting a Dumper instance
and an instance of the given data type or subtype
and producing the corresponding representation node.
"""
Dumper.add_multi_representer(data_type, multi_representer)
class YAMLObjectMetaclass(type):
"""
The metaclass for YAMLObject.
"""
def __init__(cls, name, bases, kwds):
super(YAMLObjectMetaclass, cls).__init__(name, bases, kwds)
if 'yaml_tag' in kwds and kwds['yaml_tag'] is not None:
cls.yaml_loader.add_constructor(cls.yaml_tag, cls.from_yaml)
cls.yaml_dumper.add_representer(cls, cls.to_yaml)
class YAMLObject(object):
"""
An object that can dump itself to a YAML stream
and load itself from a YAML stream.
"""
__metaclass__ = YAMLObjectMetaclass
__slots__ = () # no direct instantiation, so allow immutable subclasses
yaml_loader = Loader
yaml_dumper = Dumper
yaml_tag = None
yaml_flow_style = None
def from_yaml(cls, loader, node):
"""
Convert a representation node to a Python object.
"""
return loader.construct_yaml_object(node, cls)
from_yaml = classmethod(from_yaml)
def to_yaml(cls, dumper, data):
"""
Convert a Python object to a representation node.
"""
return dumper.represent_yaml_object(cls.yaml_tag, data, cls,
flow_style=cls.yaml_flow_style)
to_yaml = classmethod(to_yaml)
| apache-2.0 |
ZelphirKaltstahl/QuestionsAndAnswers | src/InputObservers/StatisticsInputEventObserver.py | 1 | 6547 | import os
import json
from datetime import datetime
from InputObservers.InputObserver import InputObserver
from decorators.overrides import overrides
class StatisticsInputEventObserver(InputObserver):
"""This is the default observer for input events."""
def __init__(self):
super(InputObserver, self).__init__()
self.log_tag = '[StatisticsInputEventObserver]'
self.stats = None
self.current_q_and_a_identifier = None
self.incorrect_counts = {}
self.load_statistics()
@overrides(InputObserver)
def update_on_show_all(self):
# print(self.log_tag, 'show all called')
pass
@overrides(InputObserver)
def update_on_deactivate(self, question_number):
# print(self.log_tag, 'deactivate called')
today = datetime.now().strftime('%Y-%m-%d')
question_number = str(question_number)
if today not in self.stats:
print(self.log_tag, 'creating new date entry')
self.create_new_date_entry(today)
if self.current_q_and_a_identifier not in self.stats[today]:
print(self.log_tag, 'creating new q_and_a_identifier entry')
self.create_new_q_and_a_entry(today, self.current_q_and_a_identifier)
if question_number not in self.stats[today][self.current_q_and_a_identifier]['deactivated']:
self.create_new_deactivated_entry(today, self.current_q_and_a_identifier, question_number)
self.stats[today][self.current_q_and_a_identifier]['count'] += 1
self.stats[today][self.current_q_and_a_identifier]['deactivated'][question_number] += 1
@overrides(InputObserver)
def update_on_correct(self, question_number):
# print(self.log_tag, 'correct called')
today = datetime.now().strftime('%Y-%m-%d')
question_number = str(question_number)
if today not in self.stats:
self.create_new_date_entry(today)
if self.current_q_and_a_identifier not in self.stats[today]:
self.create_new_q_and_a_entry(today, self.current_q_and_a_identifier)
if question_number not in self.stats[today][self.current_q_and_a_identifier]['correct']:
self.create_new_correct_entry(today, self.current_q_and_a_identifier, question_number)
self.stats[today][self.current_q_and_a_identifier]['count'] += 1
self.stats[today][self.current_q_and_a_identifier]['correct'][question_number] += 1
@overrides(InputObserver)
def update_on_incorrect(self, question_number):
# print(self.log_tag, 'incorrect called')
today = datetime.now().strftime('%Y-%m-%d')
question_number = str(question_number)
if today not in self.stats:
self.create_new_date_entry(today)
if self.current_q_and_a_identifier not in self.stats[today]:
self.create_new_q_and_a_entry(today, self.current_q_and_a_identifier)
if question_number not in self.stats[today][self.current_q_and_a_identifier]['incorrect']:
self.create_new_incorrect_entry(today, self.current_q_and_a_identifier, question_number)
self.stats[today][self.current_q_and_a_identifier]['count'] += 1
self.stats[today][self.current_q_and_a_identifier]['incorrect'][question_number] += 1
@overrides(InputObserver)
def update_on_load_training_state(self, file_path, question_set_identifier):
# print(self.log_tag, 'load called')
today = datetime.now().strftime('%Y-%m-%d')
self.current_q_and_a_identifier = question_set_identifier
if today not in self.stats:
self.create_new_date_entry(today)
if self.current_q_and_a_identifier not in self.stats[today]:
self.create_new_q_and_a_entry(today, self.current_q_and_a_identifier)
@overrides(InputObserver)
def update_on_save_training_state(self, file_path, question_set_identifier):
# print(self.log_tag, 'save called')
pass
@overrides(InputObserver)
def update_on_show_help(self):
# print(self.log_tag, 'help called')
pass
@overrides(InputObserver)
def update_on_show_stats(self):
# print(self.log_tag, 'stats called')
pass
@overrides(InputObserver)
def update_on_exit(self):
print(self.log_tag, 'exit called')
# print(json.dumps(self.stats, ensure_ascii=False, indent='\t', sort_keys=True))
self.save_statistics()
@overrides(InputObserver)
def update(self):
"""general update method for any kind of event"""
# print(self.log_tag, 'any method called')
pass
def load_statistics(self):
file_path = 'data' + os.path.sep + 'stats.json'
try:
with open(file_path, 'r') as input_file:
self.stats = json.load(input_file)
except Exception as e:
print(self.log_tag, 'Error while reading stats. Is the file not readable?')
raise e
def save_statistics(self):
file_path = 'data' + os.path.sep + 'stats.json'
try:
with open(file_path, 'w') as output_file:
json.dump(self.stats, output_file, ensure_ascii=False, indent='\t', sort_keys=True)
except Exception as e:
print(self.log_tag, 'Error while saving stats. Is the file not writable?')
raise e
def create_new_date_entry(self, date):
self.stats[date] = {}
def create_new_q_and_a_entry(self, date, q_and_a_identifier):
self.stats[date][q_and_a_identifier] = {
'count': 0,
'deactivated': {},
'correct': {},
'incorrect': {},
}
def create_new_incorrect_entry(self, date, q_and_a_identifier, question_number):
self.stats[date][q_and_a_identifier]['incorrect'][question_number] = 0
def create_new_correct_entry(self, date, q_and_a_identifier, question_number):
self.stats[date][q_and_a_identifier]['correct'][question_number] = 0
def create_new_deactivated_entry(self, date, q_and_a_identifier, question_number):
self.stats[date][q_and_a_identifier]['deactivated'][question_number] = 0
# def update_most_difficult_questions(self):
# """This method updates the keys of the most difficult questions in the stats dictionary,
# by looking up, which questions have the highest incorrectly answered counts."""
# maximum_questions = 3
# # recalculate the most difficult questions
# sorted_counts = list(self.incorrect_counts.values())
# sorted_counts.sort()
# highest_counts = sorted_counts[-maximum_questions:]
# found = 0
# most_difficult_questions_keys = []
# for key,value in self.incorrect_counts.items():
# if found == maximum_questions:
# break
# if value == highest_counts[0]:
# most_difficult_questions_keys.append(key)
# found += 1
# today = datetime.now().strftime('%Y-%m-%d')
# if today not in self.stats:
# self.create_new_date_entry(today)
# if self.current_q_and_a_identifier not in self.stats[today]:
# self.create_new_q_and_a_entry(today, self.current_q_and_a_identifier)
# self.stats[today][self.current_q_and_a_identifier]['most_difficult'] = most_difficult_questions_keys | mit |
Orav/kbengine | kbe/src/lib/python/Tools/scripts/patchcheck.py | 1 | 6012 | #!/usr/bin/env python3
import re
import sys
import shutil
import os.path
import subprocess
import sysconfig
import reindent
import untabify
SRCDIR = sysconfig.get_config_var('srcdir')
def n_files_str(count):
"""Return 'N file(s)' with the proper plurality on 'file'."""
return "{} file{}".format(count, "s" if count != 1 else "")
def status(message, modal=False, info=None):
"""Decorator to output status info to stdout."""
def decorated_fxn(fxn):
def call_fxn(*args, **kwargs):
sys.stdout.write(message + ' ... ')
sys.stdout.flush()
result = fxn(*args, **kwargs)
if not modal and not info:
print("done")
elif info:
print(info(result))
else:
print("yes" if result else "NO")
return result
return call_fxn
return decorated_fxn
def mq_patches_applied():
"""Check if there are any applied MQ patches."""
cmd = 'hg qapplied'
with subprocess.Popen(cmd.split(),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as st:
bstdout, _ = st.communicate()
return st.returncode == 0 and bstdout
@status("Getting the list of files that have been added/changed",
info=lambda x: n_files_str(len(x)))
def changed_files():
"""Get the list of changed or added files from Mercurial."""
if not os.path.isdir(os.path.join(SRCDIR, '.hg')):
sys.exit('need a checkout to get modified files')
cmd = 'hg status --added --modified --no-status'
if mq_patches_applied():
cmd += ' --rev qparent'
with subprocess.Popen(cmd.split(), stdout=subprocess.PIPE) as st:
return [x.decode().rstrip() for x in st.stdout]
def report_modified_files(file_paths):
count = len(file_paths)
if count == 0:
return n_files_str(count)
else:
lines = ["{}:".format(n_files_str(count))]
for path in file_paths:
lines.append(" {}".format(path))
return "\n".join(lines)
@status("Fixing whitespace", info=report_modified_files)
def normalize_whitespace(file_paths):
"""Make sure that the whitespace for .py files have been normalized."""
reindent.makebackup = False # No need to create backups.
fixed = [path for path in file_paths if path.endswith('.py') and
reindent.check(os.path.join(SRCDIR, path))]
return fixed
@status("Fixing C file whitespace", info=report_modified_files)
def normalize_c_whitespace(file_paths):
"""Report if any C files """
fixed = []
for path in file_paths:
abspath = os.path.join(SRCDIR, path)
with open(abspath, 'r') as f:
if '\t' not in f.read():
continue
untabify.process(abspath, 8, verbose=False)
fixed.append(path)
return fixed
ws_re = re.compile(br'\s+(\r?\n)$')
@status("Fixing docs whitespace", info=report_modified_files)
def normalize_docs_whitespace(file_paths):
fixed = []
for path in file_paths:
abspath = os.path.join(SRCDIR, path)
try:
with open(abspath, 'rb') as f:
lines = f.readlines()
new_lines = [ws_re.sub(br'\1', line) for line in lines]
if new_lines != lines:
shutil.copyfile(abspath, abspath + '.bak')
with open(abspath, 'wb') as f:
f.writelines(new_lines)
fixed.append(path)
except Exception as err:
print('Cannot fix %s: %s' % (path, err))
return fixed
@status("Docs modified", modal=True)
def docs_modified(file_paths):
"""Report if any file in the Doc directory has been changed."""
return bool(file_paths)
@status("Misc/ACKS updated", modal=True)
def credit_given(file_paths):
"""Check if Misc/ACKS has been changed."""
return os.path.join('Misc', 'ACKS') in file_paths
@status("Misc/NEWS updated", modal=True)
def reported_news(file_paths):
"""Check if Misc/NEWS has been changed."""
return os.path.join('Misc', 'NEWS') in file_paths
@status("configure regenerated", modal=True, info=str)
def regenerated_configure(file_paths):
"""Check if configure has been regenerated."""
if 'configure.ac' in file_paths:
return "yes" if 'configure' in file_paths else "no"
else:
return "not needed"
@status("pyconfig.h.in regenerated", modal=True, info=str)
def regenerated_pyconfig_h_in(file_paths):
"""Check if pyconfig.h.in has been regenerated."""
if 'configure.ac' in file_paths:
return "yes" if 'pyconfig.h.in' in file_paths else "no"
else:
return "not needed"
def main():
file_paths = changed_files()
python_files = [fn for fn in file_paths if fn.endswith('.py')]
c_files = [fn for fn in file_paths if fn.endswith(('.c', '.h'))]
doc_files = [fn for fn in file_paths if fn.startswith('Doc')]
misc_files = {os.path.join('Misc', 'ACKS'), os.path.join('Misc', 'NEWS')}\
& set(file_paths)
# PEP 8 whitespace rules enforcement.
normalize_whitespace(python_files)
# C rules enforcement.
normalize_c_whitespace(c_files)
# Doc whitespace enforcement.
normalize_docs_whitespace(doc_files)
# Docs updated.
docs_modified(doc_files)
# Misc/ACKS changed.
credit_given(misc_files)
# Misc/NEWS changed.
reported_news(misc_files)
# Regenerated configure, if necessary.
regenerated_configure(file_paths)
# Regenerated pyconfig.h.in, if necessary.
regenerated_pyconfig_h_in(file_paths)
# Test suite run and passed.
if python_files or c_files:
end = " and check for refleaks?" if c_files else "?"
print()
print("Did you run the test suite" + end)
if __name__ == '__main__':
main()
| lgpl-3.0 |
JosmanPS/scikit-learn | sklearn/externals/joblib/my_exceptions.py | 289 | 3257 | """
Exceptions
"""
# Author: Gael Varoquaux < gael dot varoquaux at normalesup dot org >
# Copyright: 2010, Gael Varoquaux
# License: BSD 3 clause
import sys
class JoblibException(Exception):
"""A simple exception with an error message that you can get to."""
def __init__(self, *args):
self.args = args
def __reduce__(self):
# For pickling
return self.__class__, self.args, {}
def __repr__(self):
if hasattr(self, 'args'):
message = self.args[0]
else:
# Python 2 compat: instances of JoblibException can be created
# without calling JoblibException __init__ in case of
# multi-inheritance: in that case the message is stored as an
# explicit attribute under Python 2 (only)
message = self.message
name = self.__class__.__name__
return '%s\n%s\n%s\n%s' % (name, 75 * '_', message, 75 * '_')
__str__ = __repr__
class TransportableException(JoblibException):
"""An exception containing all the info to wrap an original
exception and recreate it.
"""
def __init__(self, message, etype):
self.message = message
self.etype = etype
def __reduce__(self):
# For pickling
return self.__class__, (self.message, self.etype), {}
_exception_mapping = dict()
def _mk_exception(exception, name=None):
# Create an exception inheriting from both JoblibException
# and that exception
if name is None:
name = exception.__name__
this_name = 'Joblib%s' % name
if this_name in _exception_mapping:
# Avoid creating twice the same exception
this_exception = _exception_mapping[this_name]
else:
if exception is Exception:
# We cannot create a subclass: we are already a trivial
# subclass
return JoblibException, this_name
this_exception = type(this_name, (exception, JoblibException),
dict(__repr__=JoblibException.__repr__,
__str__=JoblibException.__str__),
)
_exception_mapping[this_name] = this_exception
return this_exception, this_name
def _mk_common_exceptions():
namespace = dict()
if sys.version_info[0] == 3:
import builtins as _builtin_exceptions
common_exceptions = filter(
lambda x: x.endswith('Error'),
dir(_builtin_exceptions))
else:
import exceptions as _builtin_exceptions
common_exceptions = dir(_builtin_exceptions)
for name in common_exceptions:
obj = getattr(_builtin_exceptions, name)
if isinstance(obj, type) and issubclass(obj, BaseException):
try:
this_obj, this_name = _mk_exception(obj, name=name)
namespace[this_name] = this_obj
except TypeError:
# Cannot create a consistent method resolution order:
# a class that we can't subclass properly, probably
# BaseException
pass
return namespace
# Updating module locals so that the exceptions pickle right. AFAIK this
# works only at module-creation time
locals().update(_mk_common_exceptions())
| bsd-3-clause |
gavin-feng/odoo | addons/website_event/controllers/main.py | 209 | 11805 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import babel.dates
import time
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
import werkzeug.urls
from werkzeug.exceptions import NotFound
from openerp import http
from openerp import tools
from openerp.http import request
from openerp.tools.translate import _
from openerp.addons.website.models.website import slug
class website_event(http.Controller):
@http.route(['/event', '/event/page/<int:page>'], type='http', auth="public", website=True)
def events(self, page=1, **searches):
cr, uid, context = request.cr, request.uid, request.context
event_obj = request.registry['event.event']
type_obj = request.registry['event.type']
country_obj = request.registry['res.country']
searches.setdefault('date', 'all')
searches.setdefault('type', 'all')
searches.setdefault('country', 'all')
domain_search = {}
def sdn(date):
return date.strftime('%Y-%m-%d 23:59:59')
def sd(date):
return date.strftime(tools.DEFAULT_SERVER_DATETIME_FORMAT)
today = datetime.today()
dates = [
['all', _('Next Events'), [("date_end", ">", sd(today))], 0],
['today', _('Today'), [
("date_end", ">", sd(today)),
("date_begin", "<", sdn(today))],
0],
['week', _('This Week'), [
("date_end", ">=", sd(today + relativedelta(days=-today.weekday()))),
("date_begin", "<", sdn(today + relativedelta(days=6-today.weekday())))],
0],
['nextweek', _('Next Week'), [
("date_end", ">=", sd(today + relativedelta(days=7-today.weekday()))),
("date_begin", "<", sdn(today + relativedelta(days=13-today.weekday())))],
0],
['month', _('This month'), [
("date_end", ">=", sd(today.replace(day=1))),
("date_begin", "<", (today.replace(day=1) + relativedelta(months=1)).strftime('%Y-%m-%d 00:00:00'))],
0],
['nextmonth', _('Next month'), [
("date_end", ">=", sd(today.replace(day=1) + relativedelta(months=1))),
("date_begin", "<", (today.replace(day=1) + relativedelta(months=2)).strftime('%Y-%m-%d 00:00:00'))],
0],
['old', _('Old Events'), [
("date_end", "<", today.strftime('%Y-%m-%d 00:00:00'))],
0],
]
# search domains
current_date = None
current_type = None
current_country = None
for date in dates:
if searches["date"] == date[0]:
domain_search["date"] = date[2]
if date[0] != 'all':
current_date = date[1]
if searches["type"] != 'all':
current_type = type_obj.browse(cr, uid, int(searches['type']), context=context)
domain_search["type"] = [("type", "=", int(searches["type"]))]
if searches["country"] != 'all' and searches["country"] != 'online':
current_country = country_obj.browse(cr, uid, int(searches['country']), context=context)
domain_search["country"] = ['|', ("country_id", "=", int(searches["country"])), ("country_id", "=", False)]
elif searches["country"] == 'online':
domain_search["country"] = [("country_id", "=", False)]
def dom_without(without):
domain = [('state', "in", ['draft','confirm','done'])]
for key, search in domain_search.items():
if key != without:
domain += search
return domain
# count by domains without self search
for date in dates:
if date[0] <> 'old':
date[3] = event_obj.search(
request.cr, request.uid, dom_without('date') + date[2],
count=True, context=request.context)
domain = dom_without('type')
types = event_obj.read_group(
request.cr, request.uid, domain, ["id", "type"], groupby="type",
orderby="type", context=request.context)
type_count = event_obj.search(request.cr, request.uid, domain,
count=True, context=request.context)
types.insert(0, {
'type_count': type_count,
'type': ("all", _("All Categories"))
})
domain = dom_without('country')
countries = event_obj.read_group(
request.cr, request.uid, domain, ["id", "country_id"],
groupby="country_id", orderby="country_id", context=request.context)
country_id_count = event_obj.search(request.cr, request.uid, domain,
count=True, context=request.context)
countries.insert(0, {
'country_id_count': country_id_count,
'country_id': ("all", _("All Countries"))
})
step = 10 # Number of events per page
event_count = event_obj.search(
request.cr, request.uid, dom_without("none"), count=True,
context=request.context)
pager = request.website.pager(
url="/event",
url_args={'date': searches.get('date'), 'type': searches.get('type'), 'country': searches.get('country')},
total=event_count,
page=page,
step=step,
scope=5)
order = 'website_published desc, date_begin'
if searches.get('date','all') == 'old':
order = 'website_published desc, date_begin desc'
obj_ids = event_obj.search(
request.cr, request.uid, dom_without("none"), limit=step,
offset=pager['offset'], order=order, context=request.context)
events_ids = event_obj.browse(request.cr, request.uid, obj_ids,
context=request.context)
values = {
'current_date': current_date,
'current_country': current_country,
'current_type': current_type,
'event_ids': events_ids,
'dates': dates,
'types': types,
'countries': countries,
'pager': pager,
'searches': searches,
'search_path': "?%s" % werkzeug.url_encode(searches),
}
return request.website.render("website_event.index", values)
@http.route(['/event/<model("event.event"):event>/page/<path:page>'], type='http', auth="public", website=True)
def event_page(self, event, page, **post):
values = {
'event': event,
'main_object': event
}
if '.' not in page:
page = 'website_event.%s' % page
try:
request.website.get_template(page)
except ValueError, e:
# page not found
raise NotFound
return request.website.render(page, values)
@http.route(['/event/<model("event.event"):event>'], type='http', auth="public", website=True)
def event(self, event, **post):
if event.menu_id and event.menu_id.child_id:
target_url = event.menu_id.child_id[0].url
else:
target_url = '/event/%s/register' % str(event.id)
if post.get('enable_editor') == '1':
target_url += '?enable_editor=1'
return request.redirect(target_url);
@http.route(['/event/<model("event.event"):event>/register'], type='http', auth="public", website=True)
def event_register(self, event, **post):
values = {
'event': event,
'main_object': event,
'range': range,
}
return request.website.render("website_event.event_description_full", values)
@http.route('/event/add_event', type='http', auth="user", methods=['POST'], website=True)
def add_event(self, event_name="New Event", **kwargs):
return self._add_event(event_name, request.context, **kwargs)
def _add_event(self, event_name=None, context={}, **kwargs):
if not event_name:
event_name = _("New Event")
Event = request.registry.get('event.event')
date_begin = datetime.today() + timedelta(days=(14))
vals = {
'name': event_name,
'date_begin': date_begin.strftime('%Y-%m-%d'),
'date_end': (date_begin + timedelta(days=(1))).strftime('%Y-%m-%d'),
}
event_id = Event.create(request.cr, request.uid, vals, context=context)
event = Event.browse(request.cr, request.uid, event_id, context=context)
return request.redirect("/event/%s/register?enable_editor=1" % slug(event))
def get_formated_date(self, event):
context = request.context
start_date = datetime.strptime(event.date_begin, tools.DEFAULT_SERVER_DATETIME_FORMAT).date()
end_date = datetime.strptime(event.date_end, tools.DEFAULT_SERVER_DATETIME_FORMAT).date()
month = babel.dates.get_month_names('abbreviated', locale=context.get('lang', 'en_US'))[start_date.month]
return _('%(month)s %(start_day)s%(end_day)s') % {
'month': month,
'start_day': start_date.strftime("%e"),
'end_day': (end_date != start_date and ("-"+end_date.strftime("%e")) or "")
}
@http.route('/event/get_country_event_list', type='http', auth='public', website=True)
def get_country_events(self ,**post):
cr, uid, context, event_ids = request.cr, request.uid, request.context,[]
country_obj = request.registry['res.country']
event_obj = request.registry['event.event']
country_code = request.session['geoip'].get('country_code')
result = {'events':[],'country':False}
if country_code:
country_ids = country_obj.search(cr, uid, [('code', '=', country_code)], context=context)
event_ids = event_obj.search(cr, uid, ['|', ('address_id', '=', None),('country_id.code', '=', country_code),('date_begin','>=', time.strftime('%Y-%m-%d 00:00:00')),('state', '=', 'confirm')], order="date_begin", context=context)
if not event_ids:
event_ids = event_obj.search(cr, uid, [('date_begin','>=', time.strftime('%Y-%m-%d 00:00:00')),('state', '=', 'confirm')], order="date_begin", context=context)
for event in event_obj.browse(cr, uid, event_ids, context=context)[:6]:
if country_code and event.country_id.code == country_code:
result['country'] = country_obj.browse(cr, uid, country_ids[0], context=context)
result['events'].append({
"date": self.get_formated_date(event),
"event": event,
"url": event.website_url})
return request.website.render("website_event.country_events_list",result)
| agpl-3.0 |
eliksir/mailmojo-python-sdk | mailmojo_sdk/models/newsletter_detail.py | 1 | 18918 | # coding: utf-8
"""
MailMojo API
v1 of the MailMojo API # noqa: E501
OpenAPI spec version: 1.1.0
Contact: hjelp@mailmojo.no
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class NewsletterDetail(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'completed': 'datetime',
'data': 'list[Schema]',
'editor_html': 'str',
'html': 'str',
'id': 'int',
'is_aborted': 'bool',
'is_draft': 'bool',
'is_in_campaign': 'bool',
'is_scheduled': 'bool',
'is_sending': 'bool',
'is_sent': 'bool',
'list': 'List',
'meta': 'PageMeta',
'num_recipients': 'int',
'plain': 'str',
'saved': 'datetime',
'screenshot_url': 'object',
'segments': 'list[MinimalSegment]',
'started': 'datetime',
'statistics': 'Statistics',
'subject': 'str',
'template_id': 'int',
'utm_campaign': 'str',
'view_url': 'object'
}
attribute_map = {
'completed': 'completed',
'data': 'data',
'editor_html': 'editor_html',
'html': 'html',
'id': 'id',
'is_aborted': 'is_aborted',
'is_draft': 'is_draft',
'is_in_campaign': 'is_in_campaign',
'is_scheduled': 'is_scheduled',
'is_sending': 'is_sending',
'is_sent': 'is_sent',
'list': 'list',
'meta': 'meta',
'num_recipients': 'num_recipients',
'plain': 'plain',
'saved': 'saved',
'screenshot_url': 'screenshot_url',
'segments': 'segments',
'started': 'started',
'statistics': 'statistics',
'subject': 'subject',
'template_id': 'template_id',
'utm_campaign': 'utm_campaign',
'view_url': 'view_url'
}
def __init__(self, completed=None, data=None, editor_html=None, html=None, id=None, is_aborted=None, is_draft=None, is_in_campaign=None, is_scheduled=None, is_sending=None, is_sent=None, list=None, meta=None, num_recipients=None, plain=None, saved=None, screenshot_url=None, segments=None, started=None, statistics=None, subject=None, template_id=None, utm_campaign=None, view_url=None): # noqa: E501
"""NewsletterDetail - a model defined in Swagger""" # noqa: E501
self._completed = None
self._data = None
self._editor_html = None
self._html = None
self._id = None
self._is_aborted = None
self._is_draft = None
self._is_in_campaign = None
self._is_scheduled = None
self._is_sending = None
self._is_sent = None
self._list = None
self._meta = None
self._num_recipients = None
self._plain = None
self._saved = None
self._screenshot_url = None
self._segments = None
self._started = None
self._statistics = None
self._subject = None
self._template_id = None
self._utm_campaign = None
self._view_url = None
self.discriminator = None
if completed is not None:
self.completed = completed
if data is not None:
self.data = data
if editor_html is not None:
self.editor_html = editor_html
if html is not None:
self.html = html
if id is not None:
self.id = id
if is_aborted is not None:
self.is_aborted = is_aborted
if is_draft is not None:
self.is_draft = is_draft
if is_in_campaign is not None:
self.is_in_campaign = is_in_campaign
if is_scheduled is not None:
self.is_scheduled = is_scheduled
if is_sending is not None:
self.is_sending = is_sending
if is_sent is not None:
self.is_sent = is_sent
if list is not None:
self.list = list
if meta is not None:
self.meta = meta
if num_recipients is not None:
self.num_recipients = num_recipients
if plain is not None:
self.plain = plain
if saved is not None:
self.saved = saved
if screenshot_url is not None:
self.screenshot_url = screenshot_url
if segments is not None:
self.segments = segments
if started is not None:
self.started = started
if statistics is not None:
self.statistics = statistics
if subject is not None:
self.subject = subject
if template_id is not None:
self.template_id = template_id
if utm_campaign is not None:
self.utm_campaign = utm_campaign
if view_url is not None:
self.view_url = view_url
@property
def completed(self):
"""Gets the completed of this NewsletterDetail. # noqa: E501
:return: The completed of this NewsletterDetail. # noqa: E501
:rtype: datetime
"""
return self._completed
@completed.setter
def completed(self, completed):
"""Sets the completed of this NewsletterDetail.
:param completed: The completed of this NewsletterDetail. # noqa: E501
:type: datetime
"""
self._completed = completed
@property
def data(self):
"""Gets the data of this NewsletterDetail. # noqa: E501
:return: The data of this NewsletterDetail. # noqa: E501
:rtype: list[Schema]
"""
return self._data
@data.setter
def data(self, data):
"""Sets the data of this NewsletterDetail.
:param data: The data of this NewsletterDetail. # noqa: E501
:type: list[Schema]
"""
self._data = data
@property
def editor_html(self):
"""Gets the editor_html of this NewsletterDetail. # noqa: E501
:return: The editor_html of this NewsletterDetail. # noqa: E501
:rtype: str
"""
return self._editor_html
@editor_html.setter
def editor_html(self, editor_html):
"""Sets the editor_html of this NewsletterDetail.
:param editor_html: The editor_html of this NewsletterDetail. # noqa: E501
:type: str
"""
self._editor_html = editor_html
@property
def html(self):
"""Gets the html of this NewsletterDetail. # noqa: E501
:return: The html of this NewsletterDetail. # noqa: E501
:rtype: str
"""
return self._html
@html.setter
def html(self, html):
"""Sets the html of this NewsletterDetail.
:param html: The html of this NewsletterDetail. # noqa: E501
:type: str
"""
self._html = html
@property
def id(self):
"""Gets the id of this NewsletterDetail. # noqa: E501
:return: The id of this NewsletterDetail. # noqa: E501
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this NewsletterDetail.
:param id: The id of this NewsletterDetail. # noqa: E501
:type: int
"""
self._id = id
@property
def is_aborted(self):
"""Gets the is_aborted of this NewsletterDetail. # noqa: E501
:return: The is_aborted of this NewsletterDetail. # noqa: E501
:rtype: bool
"""
return self._is_aborted
@is_aborted.setter
def is_aborted(self, is_aborted):
"""Sets the is_aborted of this NewsletterDetail.
:param is_aborted: The is_aborted of this NewsletterDetail. # noqa: E501
:type: bool
"""
self._is_aborted = is_aborted
@property
def is_draft(self):
"""Gets the is_draft of this NewsletterDetail. # noqa: E501
:return: The is_draft of this NewsletterDetail. # noqa: E501
:rtype: bool
"""
return self._is_draft
@is_draft.setter
def is_draft(self, is_draft):
"""Sets the is_draft of this NewsletterDetail.
:param is_draft: The is_draft of this NewsletterDetail. # noqa: E501
:type: bool
"""
self._is_draft = is_draft
@property
def is_in_campaign(self):
"""Gets the is_in_campaign of this NewsletterDetail. # noqa: E501
:return: The is_in_campaign of this NewsletterDetail. # noqa: E501
:rtype: bool
"""
return self._is_in_campaign
@is_in_campaign.setter
def is_in_campaign(self, is_in_campaign):
"""Sets the is_in_campaign of this NewsletterDetail.
:param is_in_campaign: The is_in_campaign of this NewsletterDetail. # noqa: E501
:type: bool
"""
self._is_in_campaign = is_in_campaign
@property
def is_scheduled(self):
"""Gets the is_scheduled of this NewsletterDetail. # noqa: E501
:return: The is_scheduled of this NewsletterDetail. # noqa: E501
:rtype: bool
"""
return self._is_scheduled
@is_scheduled.setter
def is_scheduled(self, is_scheduled):
"""Sets the is_scheduled of this NewsletterDetail.
:param is_scheduled: The is_scheduled of this NewsletterDetail. # noqa: E501
:type: bool
"""
self._is_scheduled = is_scheduled
@property
def is_sending(self):
"""Gets the is_sending of this NewsletterDetail. # noqa: E501
:return: The is_sending of this NewsletterDetail. # noqa: E501
:rtype: bool
"""
return self._is_sending
@is_sending.setter
def is_sending(self, is_sending):
"""Sets the is_sending of this NewsletterDetail.
:param is_sending: The is_sending of this NewsletterDetail. # noqa: E501
:type: bool
"""
self._is_sending = is_sending
@property
def is_sent(self):
"""Gets the is_sent of this NewsletterDetail. # noqa: E501
:return: The is_sent of this NewsletterDetail. # noqa: E501
:rtype: bool
"""
return self._is_sent
@is_sent.setter
def is_sent(self, is_sent):
"""Sets the is_sent of this NewsletterDetail.
:param is_sent: The is_sent of this NewsletterDetail. # noqa: E501
:type: bool
"""
self._is_sent = is_sent
@property
def list(self):
"""Gets the list of this NewsletterDetail. # noqa: E501
:return: The list of this NewsletterDetail. # noqa: E501
:rtype: List
"""
return self._list
@list.setter
def list(self, list):
"""Sets the list of this NewsletterDetail.
:param list: The list of this NewsletterDetail. # noqa: E501
:type: List
"""
self._list = list
@property
def meta(self):
"""Gets the meta of this NewsletterDetail. # noqa: E501
:return: The meta of this NewsletterDetail. # noqa: E501
:rtype: PageMeta
"""
return self._meta
@meta.setter
def meta(self, meta):
"""Sets the meta of this NewsletterDetail.
:param meta: The meta of this NewsletterDetail. # noqa: E501
:type: PageMeta
"""
self._meta = meta
@property
def num_recipients(self):
"""Gets the num_recipients of this NewsletterDetail. # noqa: E501
:return: The num_recipients of this NewsletterDetail. # noqa: E501
:rtype: int
"""
return self._num_recipients
@num_recipients.setter
def num_recipients(self, num_recipients):
"""Sets the num_recipients of this NewsletterDetail.
:param num_recipients: The num_recipients of this NewsletterDetail. # noqa: E501
:type: int
"""
self._num_recipients = num_recipients
@property
def plain(self):
"""Gets the plain of this NewsletterDetail. # noqa: E501
:return: The plain of this NewsletterDetail. # noqa: E501
:rtype: str
"""
return self._plain
@plain.setter
def plain(self, plain):
"""Sets the plain of this NewsletterDetail.
:param plain: The plain of this NewsletterDetail. # noqa: E501
:type: str
"""
self._plain = plain
@property
def saved(self):
"""Gets the saved of this NewsletterDetail. # noqa: E501
:return: The saved of this NewsletterDetail. # noqa: E501
:rtype: datetime
"""
return self._saved
@saved.setter
def saved(self, saved):
"""Sets the saved of this NewsletterDetail.
:param saved: The saved of this NewsletterDetail. # noqa: E501
:type: datetime
"""
self._saved = saved
@property
def screenshot_url(self):
"""Gets the screenshot_url of this NewsletterDetail. # noqa: E501
:return: The screenshot_url of this NewsletterDetail. # noqa: E501
:rtype: object
"""
return self._screenshot_url
@screenshot_url.setter
def screenshot_url(self, screenshot_url):
"""Sets the screenshot_url of this NewsletterDetail.
:param screenshot_url: The screenshot_url of this NewsletterDetail. # noqa: E501
:type: object
"""
self._screenshot_url = screenshot_url
@property
def segments(self):
"""Gets the segments of this NewsletterDetail. # noqa: E501
:return: The segments of this NewsletterDetail. # noqa: E501
:rtype: list[MinimalSegment]
"""
return self._segments
@segments.setter
def segments(self, segments):
"""Sets the segments of this NewsletterDetail.
:param segments: The segments of this NewsletterDetail. # noqa: E501
:type: list[MinimalSegment]
"""
self._segments = segments
@property
def started(self):
"""Gets the started of this NewsletterDetail. # noqa: E501
:return: The started of this NewsletterDetail. # noqa: E501
:rtype: datetime
"""
return self._started
@started.setter
def started(self, started):
"""Sets the started of this NewsletterDetail.
:param started: The started of this NewsletterDetail. # noqa: E501
:type: datetime
"""
self._started = started
@property
def statistics(self):
"""Gets the statistics of this NewsletterDetail. # noqa: E501
:return: The statistics of this NewsletterDetail. # noqa: E501
:rtype: Statistics
"""
return self._statistics
@statistics.setter
def statistics(self, statistics):
"""Sets the statistics of this NewsletterDetail.
:param statistics: The statistics of this NewsletterDetail. # noqa: E501
:type: Statistics
"""
self._statistics = statistics
@property
def subject(self):
"""Gets the subject of this NewsletterDetail. # noqa: E501
:return: The subject of this NewsletterDetail. # noqa: E501
:rtype: str
"""
return self._subject
@subject.setter
def subject(self, subject):
"""Sets the subject of this NewsletterDetail.
:param subject: The subject of this NewsletterDetail. # noqa: E501
:type: str
"""
self._subject = subject
@property
def template_id(self):
"""Gets the template_id of this NewsletterDetail. # noqa: E501
:return: The template_id of this NewsletterDetail. # noqa: E501
:rtype: int
"""
return self._template_id
@template_id.setter
def template_id(self, template_id):
"""Sets the template_id of this NewsletterDetail.
:param template_id: The template_id of this NewsletterDetail. # noqa: E501
:type: int
"""
self._template_id = template_id
@property
def utm_campaign(self):
"""Gets the utm_campaign of this NewsletterDetail. # noqa: E501
:return: The utm_campaign of this NewsletterDetail. # noqa: E501
:rtype: str
"""
return self._utm_campaign
@utm_campaign.setter
def utm_campaign(self, utm_campaign):
"""Sets the utm_campaign of this NewsletterDetail.
:param utm_campaign: The utm_campaign of this NewsletterDetail. # noqa: E501
:type: str
"""
self._utm_campaign = utm_campaign
@property
def view_url(self):
"""Gets the view_url of this NewsletterDetail. # noqa: E501
:return: The view_url of this NewsletterDetail. # noqa: E501
:rtype: object
"""
return self._view_url
@view_url.setter
def view_url(self, view_url):
"""Sets the view_url of this NewsletterDetail.
:param view_url: The view_url of this NewsletterDetail. # noqa: E501
:type: object
"""
self._view_url = view_url
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(NewsletterDetail, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, NewsletterDetail):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| apache-2.0 |
pizzapanther/HoverMom | hovermom/django/contrib/formtools/tests/wizard/namedwizardtests/urls.py | 320 | 1039 | from django.conf.urls import patterns, url
from django.contrib.formtools.tests.wizard.namedwizardtests.forms import (
SessionContactWizard, CookieContactWizard, Page1, Page2, Page3, Page4)
def get_named_session_wizard():
return SessionContactWizard.as_view(
[('form1', Page1), ('form2', Page2), ('form3', Page3), ('form4', Page4)],
url_name='nwiz_session',
done_step_name='nwiz_session_done'
)
def get_named_cookie_wizard():
return CookieContactWizard.as_view(
[('form1', Page1), ('form2', Page2), ('form3', Page3), ('form4', Page4)],
url_name='nwiz_cookie',
done_step_name='nwiz_cookie_done'
)
urlpatterns = patterns('',
url(r'^nwiz_session/(?P<step>.+)/$', get_named_session_wizard(), name='nwiz_session'),
url(r'^nwiz_session/$', get_named_session_wizard(), name='nwiz_session_start'),
url(r'^nwiz_cookie/(?P<step>.+)/$', get_named_cookie_wizard(), name='nwiz_cookie'),
url(r'^nwiz_cookie/$', get_named_cookie_wizard(), name='nwiz_cookie_start'),
)
| mit |
atheed/servo | tests/wpt/css-tests/tools/pytest/testing/test_pdb.py | 170 | 9594 | import sys
import _pytest._code
def runpdb_and_get_report(testdir, source):
p = testdir.makepyfile(source)
result = testdir.runpytest_inprocess("--pdb", p)
reports = result.reprec.getreports("pytest_runtest_logreport")
assert len(reports) == 3, reports # setup/call/teardown
return reports[1]
class TestPDB:
def pytest_funcarg__pdblist(self, request):
monkeypatch = request.getfuncargvalue("monkeypatch")
pdblist = []
def mypdb(*args):
pdblist.append(args)
plugin = request.config.pluginmanager.getplugin('pdb')
monkeypatch.setattr(plugin, 'post_mortem', mypdb)
return pdblist
def test_pdb_on_fail(self, testdir, pdblist):
rep = runpdb_and_get_report(testdir, """
def test_func():
assert 0
""")
assert rep.failed
assert len(pdblist) == 1
tb = _pytest._code.Traceback(pdblist[0][0])
assert tb[-1].name == "test_func"
def test_pdb_on_xfail(self, testdir, pdblist):
rep = runpdb_and_get_report(testdir, """
import pytest
@pytest.mark.xfail
def test_func():
assert 0
""")
assert "xfail" in rep.keywords
assert not pdblist
def test_pdb_on_skip(self, testdir, pdblist):
rep = runpdb_and_get_report(testdir, """
import pytest
def test_func():
pytest.skip("hello")
""")
assert rep.skipped
assert len(pdblist) == 0
def test_pdb_on_BdbQuit(self, testdir, pdblist):
rep = runpdb_and_get_report(testdir, """
import bdb
def test_func():
raise bdb.BdbQuit
""")
assert rep.failed
assert len(pdblist) == 0
def test_pdb_interaction(self, testdir):
p1 = testdir.makepyfile("""
def test_1():
i = 0
assert i == 1
""")
child = testdir.spawn_pytest("--pdb %s" % p1)
child.expect(".*def test_1")
child.expect(".*i = 0")
child.expect("(Pdb)")
child.sendeof()
rest = child.read().decode("utf8")
assert "1 failed" in rest
assert "def test_1" not in rest
if child.isalive():
child.wait()
def test_pdb_interaction_capture(self, testdir):
p1 = testdir.makepyfile("""
def test_1():
print("getrekt")
assert False
""")
child = testdir.spawn_pytest("--pdb %s" % p1)
child.expect("getrekt")
child.expect("(Pdb)")
child.sendeof()
rest = child.read().decode("utf8")
assert "1 failed" in rest
assert "getrekt" not in rest
if child.isalive():
child.wait()
def test_pdb_interaction_exception(self, testdir):
p1 = testdir.makepyfile("""
import pytest
def globalfunc():
pass
def test_1():
pytest.raises(ValueError, globalfunc)
""")
child = testdir.spawn_pytest("--pdb %s" % p1)
child.expect(".*def test_1")
child.expect(".*pytest.raises.*globalfunc")
child.expect("(Pdb)")
child.sendline("globalfunc")
child.expect(".*function")
child.sendeof()
child.expect("1 failed")
if child.isalive():
child.wait()
def test_pdb_interaction_on_collection_issue181(self, testdir):
p1 = testdir.makepyfile("""
import pytest
xxx
""")
child = testdir.spawn_pytest("--pdb %s" % p1)
#child.expect(".*import pytest.*")
child.expect("(Pdb)")
child.sendeof()
child.expect("1 error")
if child.isalive():
child.wait()
def test_pdb_interaction_on_internal_error(self, testdir):
testdir.makeconftest("""
def pytest_runtest_protocol():
0/0
""")
p1 = testdir.makepyfile("def test_func(): pass")
child = testdir.spawn_pytest("--pdb %s" % p1)
#child.expect(".*import pytest.*")
child.expect("(Pdb)")
child.sendeof()
if child.isalive():
child.wait()
def test_pdb_interaction_capturing_simple(self, testdir):
p1 = testdir.makepyfile("""
import pytest
def test_1():
i = 0
print ("hello17")
pytest.set_trace()
x = 3
""")
child = testdir.spawn_pytest(str(p1))
child.expect("test_1")
child.expect("x = 3")
child.expect("(Pdb)")
child.sendeof()
rest = child.read().decode("utf-8")
assert "1 failed" in rest
assert "def test_1" in rest
assert "hello17" in rest # out is captured
if child.isalive():
child.wait()
def test_pdb_set_trace_interception(self, testdir):
p1 = testdir.makepyfile("""
import pdb
def test_1():
pdb.set_trace()
""")
child = testdir.spawn_pytest(str(p1))
child.expect("test_1")
child.expect("(Pdb)")
child.sendeof()
rest = child.read().decode("utf8")
assert "1 failed" in rest
assert "reading from stdin while output" not in rest
if child.isalive():
child.wait()
def test_pdb_and_capsys(self, testdir):
p1 = testdir.makepyfile("""
import pytest
def test_1(capsys):
print ("hello1")
pytest.set_trace()
""")
child = testdir.spawn_pytest(str(p1))
child.expect("test_1")
child.send("capsys.readouterr()\n")
child.expect("hello1")
child.sendeof()
child.read()
if child.isalive():
child.wait()
def test_set_trace_capturing_afterwards(self, testdir):
p1 = testdir.makepyfile("""
import pdb
def test_1():
pdb.set_trace()
def test_2():
print ("hello")
assert 0
""")
child = testdir.spawn_pytest(str(p1))
child.expect("test_1")
child.send("c\n")
child.expect("test_2")
child.expect("Captured")
child.expect("hello")
child.sendeof()
child.read()
if child.isalive():
child.wait()
def test_pdb_interaction_doctest(self, testdir):
p1 = testdir.makepyfile("""
import pytest
def function_1():
'''
>>> i = 0
>>> assert i == 1
'''
""")
child = testdir.spawn_pytest("--doctest-modules --pdb %s" % p1)
child.expect("(Pdb)")
child.sendline('i')
child.expect("0")
child.expect("(Pdb)")
child.sendeof()
rest = child.read().decode("utf8")
assert "1 failed" in rest
if child.isalive():
child.wait()
def test_pdb_interaction_capturing_twice(self, testdir):
p1 = testdir.makepyfile("""
import pytest
def test_1():
i = 0
print ("hello17")
pytest.set_trace()
x = 3
print ("hello18")
pytest.set_trace()
x = 4
""")
child = testdir.spawn_pytest(str(p1))
child.expect("test_1")
child.expect("x = 3")
child.expect("(Pdb)")
child.sendline('c')
child.expect("x = 4")
child.sendeof()
rest = child.read().decode("utf8")
assert "1 failed" in rest
assert "def test_1" in rest
assert "hello17" in rest # out is captured
assert "hello18" in rest # out is captured
if child.isalive():
child.wait()
def test_pdb_used_outside_test(self, testdir):
p1 = testdir.makepyfile("""
import pytest
pytest.set_trace()
x = 5
""")
child = testdir.spawn("%s %s" %(sys.executable, p1))
child.expect("x = 5")
child.sendeof()
child.wait()
def test_pdb_used_in_generate_tests(self, testdir):
p1 = testdir.makepyfile("""
import pytest
def pytest_generate_tests(metafunc):
pytest.set_trace()
x = 5
def test_foo(a):
pass
""")
child = testdir.spawn_pytest(str(p1))
child.expect("x = 5")
child.sendeof()
child.wait()
def test_pdb_collection_failure_is_shown(self, testdir):
p1 = testdir.makepyfile("""xxx """)
result = testdir.runpytest_subprocess("--pdb", p1)
result.stdout.fnmatch_lines([
"*NameError*xxx*",
"*1 error*",
])
def test_enter_pdb_hook_is_called(self, testdir):
testdir.makeconftest("""
def pytest_enter_pdb(config):
assert config.testing_verification == 'configured'
print 'enter_pdb_hook'
def pytest_configure(config):
config.testing_verification = 'configured'
""")
p1 = testdir.makepyfile("""
import pytest
def test_foo():
pytest.set_trace()
""")
child = testdir.spawn_pytest(str(p1))
child.expect("enter_pdb_hook")
child.send('c\n')
child.sendeof()
if child.isalive():
child.wait()
| mpl-2.0 |
fiuba08/robotframework | doc/userguide/ug2html.py | 4 | 8744 | #!/usr/bin/env python
"""ug2html.py -- Creates HTML version of Robot Framework User Guide
Usage: ug2html.py [ cr(eate) | dist | zip ]
create .. Creates the user guide so that it has relative links to images,
library docs, etc. This version is stored in the version control
and distributed with the source distribution.
dist .... Creates the user guide under 'robotframework-userguide-<version>'
directory and also copies all needed images and other link targets
there. The created output directory can thus be distributed
independently.
zip ..... Uses 'dist' to create a stand-alone distribution and then packages
it into 'robotframework-userguide-<version>.zip'
Version number to use is got automatically from 'src/robot/version.py' file
created by 'package.py'.
"""
import os
import sys
import shutil
# First part of this file is Pygments configuration and actual
# documentation generation follows it.
#
#
# Pygments configuration
# ----------------------
#
# This code is from 'external/rst-directive.py' file included in Pygments 0.9
# distribution. For more details see http://pygments.org/docs/rstdirective/
#
"""
The Pygments MoinMoin Parser
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This fragment is a Docutils_ 0.4 directive that renders source code
(to HTML only, currently) via Pygments.
To use it, adjust the options below and copy the code into a module
that you import on initialization. The code then automatically
registers a ``sourcecode`` directive that you can use instead of
normal code blocks like this::
.. sourcecode:: python
My code goes here.
If you want to have different code styles, e.g. one with line numbers
and one without, add formatters with their names in the VARIANTS dict
below. You can invoke them instead of the DEFAULT one by using a
directive option::
.. sourcecode:: python
:linenos:
My code goes here.
Look at the `directive documentation`_ to get all the gory details.
.. _Docutils: http://docutils.sf.net/
.. _directive documentation:
http://docutils.sourceforge.net/docs/howto/rst-directives.html
:copyright: 2007 by Georg Brandl.
:license: BSD, see LICENSE for more details.
"""
# Options
# ~~~~~~~
# Set to True if you want inline CSS styles instead of classes
INLINESTYLES = False
from pygments.formatters import HtmlFormatter
# The default formatter
DEFAULT = HtmlFormatter(noclasses=INLINESTYLES)
# Add name -> formatter pairs for every variant you want to use
VARIANTS = {
# 'linenos': HtmlFormatter(noclasses=INLINESTYLES, linenos=True),
}
from docutils import nodes
from docutils.parsers.rst import directives
from pygments import highlight
from pygments.lexers import get_lexer_by_name, TextLexer
def pygments_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
try:
lexer = get_lexer_by_name(arguments[0])
except ValueError:
# no lexer found - use the text one instead of an exception
lexer = TextLexer()
# take an arbitrary option if more than one is given
formatter = options and VARIANTS[options.keys()[0]] or DEFAULT
# possibility to read the content from an external file
filtered = [ line for line in content if line.strip() ]
if len(filtered) == 1:
path = filtered[0].replace('/', os.sep)
if os.path.isfile(path):
content = open(path).read().splitlines()
parsed = highlight(u'\n'.join(content), lexer, formatter)
return [nodes.raw('', parsed, format='html')]
pygments_directive.arguments = (1, 0, 1)
pygments_directive.content = 1
pygments_directive.options = dict([(key, directives.flag) for key in VARIANTS])
directives.register_directive('sourcecode', pygments_directive)
#
# Create the user guide using docutils
#
# This code is based on rst2html.py distributed with docutils
#
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
def create_userguide():
from docutils.core import publish_cmdline
print 'Creating user guide ...'
ugdir = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, os.path.join(ugdir, '..', '..', 'src', 'robot'))
from version import get_version
print 'Version:', get_version()
vfile = open(os.path.join(ugdir, 'src', 'version.rst'), 'w')
vfile.write('.. |version| replace:: %s\n' % get_version())
vfile.close()
description = 'HTML generator for Robot Framework User Guide.'
arguments = ['--time',
'--stylesheet-path', ['src/userguide.css'],
'src/RobotFrameworkUserGuide.rst',
'RobotFrameworkUserGuide.html']
os.chdir(ugdir)
publish_cmdline(writer_name='html', description=description, argv=arguments)
os.unlink(vfile.name)
ugpath = os.path.abspath(arguments[-1])
print ugpath
return ugpath, get_version(sep='-')
#
# Create user guide distribution directory
#
def create_distribution():
import re
from urlparse import urlparse
ugpath, version = create_userguide() # we are in doc/userguide after this
outdir = 'robotframework-userguide-%s' % version
tools = os.path.join(outdir, 'tools')
templates = os.path.join(outdir, 'templates')
libraries = os.path.join(outdir, 'libraries')
images = os.path.join(outdir, 'images')
print 'Creating distribution directory ...'
if os.path.exists(outdir):
print 'Removing previous user guide distribution'
shutil.rmtree(outdir)
for dirname in [outdir, tools, templates, libraries, images]:
print "Creating output directory '%s'" % dirname
os.mkdir(dirname)
def replace_links(res):
if not res.group(5):
return res.group(0)
scheme, _, path, _, _, fragment = urlparse(res.group(5))
if scheme or (fragment and not path):
return res.group(0)
replaced_link = '%s %s="%%s/%s"' % (res.group(1), res.group(4),
os.path.basename(path))
if path.startswith('../../tools'):
copy(path, tools)
copy_tool_images(path)
replaced_link = replaced_link % 'tools'
elif path.startswith('../../templates'):
copy(path, templates)
replaced_link = replaced_link % 'templates'
elif path.startswith('../libraries'):
copy(path, libraries)
replaced_link = replaced_link % 'libraries'
elif path.startswith('src/'):
copy(path, images)
replaced_link = replaced_link % 'images'
else:
raise ValueError('Invalid link target: %s (context: %s)'
% (path, res.group(0)))
print "Modified link '%s' -> '%s'" % (res.group(0), replaced_link)
return replaced_link
def copy(source, dest):
print "Copying '%s' -> '%s'" % (source, dest)
shutil.copy(source, dest)
def copy_tool_images(path):
indir = os.path.dirname(path)
for line in open(os.path.splitext(path)[0]+'.txt').readlines():
if line.startswith('.. figure::'):
copy(os.path.join(indir, line.strip().split()[-1]), tools)
link_regexp = re.compile('''
(<(a|img)\s+.*?)
(\s+(href|src)="(.*?)"|>)
''', re.VERBOSE | re.DOTALL | re.IGNORECASE)
content = open(ugpath).read()
content = link_regexp.sub(replace_links, content)
outfile = open(os.path.join(outdir, os.path.basename(ugpath)), 'wb')
outfile.write(content)
outfile.close()
print os.path.abspath(outfile.name)
return outdir
#
# Create a zip distribution package
#
def create_zip():
ugdir = create_distribution()
zip_distribution(ugdir)
def zip_distribution(dirpath):
"""Generic zipper. Used also by qs2html.py """
from zipfile import ZipFile, ZIP_DEFLATED
print 'Creating zip package ...'
zippath = os.path.normpath(dirpath) + '.zip'
zipfile = ZipFile(zippath, 'w', compression=ZIP_DEFLATED)
for root, _, files in os.walk(dirpath):
for name in files:
path = os.path.join(root, name)
print "Adding '%s'" % path
zipfile.write(path)
zipfile.close()
print 'Removing distribution directory', dirpath
shutil.rmtree(dirpath)
print os.path.abspath(zippath)
if __name__ == '__main__':
actions = { 'create': create_userguide, 'cr': create_userguide,
'dist': create_distribution, 'zip': create_zip }
try:
actions[sys.argv[1]](*sys.argv[2:])
except (KeyError, IndexError, TypeError):
print __doc__
| apache-2.0 |
bschuon/django-oscar | tests/unit/wishlist_tests.py | 69 | 1388 | from django.test import TestCase
from oscar.apps.wishlists.models import WishList
from oscar.core.compat import get_user_model
User = get_user_model()
class TestAWishlist(TestCase):
def test_can_generate_a_random_key(self):
key = WishList.random_key(6)
self.assertTrue(len(key) == 6)
class TestAPublicWishList(TestCase):
def setUp(self):
self.wishlist = WishList(visibility=WishList.PUBLIC)
def test_is_visible_to_anyone(self):
user = User()
self.assertTrue(self.wishlist.is_allowed_to_see(user))
class TestASharedWishList(TestCase):
def setUp(self):
self.wishlist = WishList(visibility=WishList.SHARED)
def test_is_visible_to_anyone(self):
user = User()
self.assertTrue(self.wishlist.is_allowed_to_see(user))
class TestAPrivateWishList(TestCase):
def setUp(self):
self.owner = User(id=1)
self.another_user = User(id=2)
self.wishlist = WishList(owner=self.owner)
def test_is_visible_only_to_its_owner(self):
self.assertTrue(self.wishlist.is_allowed_to_see(self.owner))
self.assertFalse(self.wishlist.is_allowed_to_see(self.another_user))
def test_can_only_be_edited_by_its_owner(self):
self.assertTrue(self.wishlist.is_allowed_to_edit(self.owner))
self.assertFalse(self.wishlist.is_allowed_to_edit(self.another_user))
| bsd-3-clause |
ottoszika/smartrcs | smartrcs/logic/cube_orientation.py | 1 | 3476 | class CubeOrientation(object):
"""
The :class:`CubeOrientation <CubeOrientation>` class.
Rotations applied on the cube sometimes needs reorientation,
so this class keeps tracking these orientation changes.
"""
def __init__(self, faces=None):
"""
Initialize a plain cube ore a reoriented one
:param dict faces: Initial orientation
"""
if faces is None:
# Default orientation
self.__faces = {
'U': 'U',
'L': 'L',
'F': 'F',
'R': 'R',
'B': 'B',
'D': 'D'
}
else:
self.__faces = faces
def rotate_ox_cw(self):
"""
Rotate clockwise on OX axe
:return: The new cube orientation
:rtype: dict
"""
self.__faces = {
'U': self.__faces['F'],
'L': self.__faces['L'],
'F': self.__faces['D'],
'R': self.__faces['R'],
'B': self.__faces['U'],
'D': self.__faces['B']
}
return self.__faces
def rotate_ox_ccw(self):
"""
Rotate counterclockwise on OX axe
:return: The new cube orientation
:rtype: dict
"""
self.__faces = {
'U': self.__faces['B'],
'L': self.__faces['L'],
'F': self.__faces['U'],
'R': self.__faces['R'],
'B': self.__faces['D'],
'D': self.__faces['F']
}
return self.__faces
def rotate_oy_cw(self):
"""
Rotate clockwise on OY axe
:return: The new cube orientation
:rtype: dict
"""
self.__faces = {
'U': self.__faces['U'],
'L': self.__faces['F'],
'F': self.__faces['R'],
'R': self.__faces['B'],
'B': self.__faces['L'],
'D': self.__faces['D']
}
return self.__faces
def rotate_oy_ccw(self):
"""
Rotate counterclockwise on OY axe
:return: The new cube orientation
:rtype: dict
"""
self.__faces = {
'U': self.__faces['U'],
'L': self.__faces['B'],
'F': self.__faces['L'],
'R': self.__faces['F'],
'B': self.__faces['R'],
'D': self.__faces['D']
}
return self.__faces
def rotate_oz_cw(self):
"""
Rotate clockwise on OZ axe
:return: The new cube orientation
:rtype: dict
"""
self.__faces = {
'U': self.__faces['L'],
'L': self.__faces['D'],
'F': self.__faces['F'],
'R': self.__faces['U'],
'B': self.__faces['B'],
'D': self.__faces['R']
}
return self.__faces
def rotate_oz_ccw(self):
"""
Rotate counterclockwise on OZ axe
:return: The new cube orientation
:rtype: dict
"""
self.__faces = {
'U': self.__faces['R'],
'L': self.__faces['U'],
'F': self.__faces['F'],
'R': self.__faces['D'],
'B': self.__faces['B'],
'D': self.__faces['L']
}
return self.__faces
@property
def faces(self):
"""
Get faces orientation
:return: The cube faces orientation
"""
return self.__faces
| mit |
jr0d/mercury | src/tests/common/unit/helpers/test_cli.py | 1 | 6700 | # Copyright 2017 Rackspace
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import pytest
import six
import subprocess
from mercury.common.helpers import cli
from tests.common.unit.base import MercuryCommonUnitTest
class CLIResultUnitTest(MercuryCommonUnitTest):
"""Unit tests for mercury.common.helpers.cli.CLIResult"""
def test_setter(self):
"""Test setters for stdout and stderr."""
result = cli.CLIResult('', '', 0)
result.stdout = 'out'
result.stderr = 'err'
assert repr(result) == 'out'
assert result.stderr == 'err'
def test_decode(self):
"""Test arguments are decoded for new CLIResult instances."""
output = 'out'
if isinstance(output, bytes):
output_str = output.decode(cli.CLIResult.ENCODING)
output_bytes = output
else:
output_str = output
output_bytes = output.encode(cli.CLIResult.ENCODING)
result_str = cli.CLIResult(output_str, 'err', 0)
result_bytes = cli.CLIResult(output_bytes, 'err', 0)
assert isinstance(result_str.stdout, six.string_types)
assert isinstance(result_bytes.stdout, six.string_types)
class CliRunUnitTest(MercuryCommonUnitTest):
"""Unit tests for mercury.common.helpers.cli.run()"""
@mock.patch('mercury.common.helpers.cli.os.environ.copy')
@mock.patch('subprocess.Popen')
def test_run(self, mock_subprocess_popen, mock_os_environ):
"""Test run()"""
mock_subprocess_popen.return_value.returncode = 0
mock_subprocess_popen.return_value.communicate.return_value = ('out',
'err')
mock_os_environ.return_value = {'key': 'val'}
cli_result = cli.run('ls')
mock_subprocess_popen.assert_called_once_with(['ls'],
stdin=None,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
bufsize=1048567,
env={'key': 'val'})
mock_subprocess_popen.return_value.communicate.assert_called_once_with(
input=None)
assert cli_result.stdout == 'out'
assert cli_result.stderr == 'err'
assert cli_result.returncode == 0
@mock.patch('mercury.common.helpers.cli.os.environ.copy')
@mock.patch('subprocess.Popen')
def test_run_with_input(self, mock_subprocess_popen, mock_os_environ):
"""Test run() with data passed into stdin."""
mock_subprocess_popen.return_value.returncode = 0
mock_subprocess_popen.return_value.communicate.return_value = ('foo',
'')
mock_os_environ.return_value = {'key': 'val'}
cli_result = cli.run('python', _input='print "foo"')
mock_subprocess_popen.assert_called_once_with(['python'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
bufsize=1048567,
env={'key': 'val'})
mock_subprocess_popen.return_value.communicate.assert_called_once_with(
input='print "foo"')
assert cli_result.stdout == 'foo'
assert cli_result.stderr == ''
assert cli_result.returncode == 0
@mock.patch('subprocess.Popen')
def test_run_dry_run(self, mock_subprocess_popen):
"""Test run() in dry_run mode."""
cli_result = cli.run('ls', dry_run=True)
mock_subprocess_popen.assert_not_called()
mock_subprocess_popen.return_value.communicate.assert_not_called()
assert cli_result.stdout == ''
assert cli_result.stderr == ''
assert cli_result.returncode == 0
@mock.patch('subprocess.Popen')
def test_run_popen_exception(self, mock_subprocess_popen):
"""Test run() with subprocess.Popen failing."""
mock_subprocess_popen.side_effect = [OSError, ValueError]
with pytest.raises(cli.CLIException):
cli.run('ls', raise_exception=True)
cli_result = cli.run('ls', raise_exception=False)
assert cli_result.stdout == ''
assert cli_result.stderr == "Failed while executing 'ls': "
assert cli_result.returncode == 1
assert mock_subprocess_popen.call_count == 2
mock_subprocess_popen.return_value.communicate.assert_not_called()
@mock.patch('subprocess.Popen')
def test_run_error(self, mock_subprocess_popen):
"""Test run() when the command returns an error."""
mock_subprocess_popen.return_value.returncode = 1
mock_subprocess_popen.return_value.communicate.return_value = ('',
'err')
with pytest.raises(cli.CLIException):
cli.run('ls', raise_exception=True)
cli_result = cli.run('ls', raise_exception=False)
assert cli_result.stdout == ''
assert cli_result.stderr == 'err'
assert cli_result.returncode == 1
assert mock_subprocess_popen.call_count == 2
assert mock_subprocess_popen.return_value.communicate.call_count == 2
@mock.patch('mercury.common.helpers.cli.os.path.exists')
def test_find_in_path(mock_os_path_exists):
"""Test find_in_path()."""
# Test with absolute path.
mock_os_path_exists.return_value = True
path = cli.find_in_path('/does/not/exist')
assert path == '/does/not/exist'
# Test with relative path.
with mock.patch.dict('os.environ', {'PATH': '/:/tmp'}):
path = cli.find_in_path('foo')
assert path == '/foo'
# Test with non-existing file.
mock_os_path_exists.return_value = False
path = cli.find_in_path('/tmp/foo')
assert path is None
| apache-2.0 |
davidsiefert/ansible-modules-extras | packaging/os/pacman.py | 31 | 10291 | #!/usr/bin/python -tt
# -*- coding: utf-8 -*-
# (c) 2012, Afterburn <http://github.com/afterburn>
# (c) 2013, Aaron Bull Schaefer <aaron@elasticdog.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: pacman
short_description: Manage packages with I(pacman)
description:
- Manage packages with the I(pacman) package manager, which is used by
Arch Linux and its variants.
version_added: "1.0"
author:
- "'Aaron Bull Schaefer (@elasticdog)' <aaron@elasticdog.com>"
- "Afterburn"
notes: []
requirements: []
options:
name:
description:
- Name of the package to install, upgrade, or remove.
required: false
default: null
state:
description:
- Desired state of the package.
required: false
default: "present"
choices: ["present", "absent", "latest"]
recurse:
description:
- When removing a package, also remove its dependencies, provided
that they are not required by other packages and were not
explicitly installed by a user.
required: false
default: "no"
choices: ["yes", "no"]
version_added: "1.3"
force:
description:
- Force remove package, without any checks.
required: false
default: "no"
choices: ["yes", "no"]
version_added: "2.0"
update_cache:
description:
- Whether or not to refresh the master package lists. This can be
run as part of a package installation or as a separate step.
required: false
default: "no"
choices: ["yes", "no"]
upgrade:
description:
- Whether or not to upgrade whole system
required: false
default: "no"
choices: ["yes", "no"]
version_added: "2.0"
'''
EXAMPLES = '''
# Install package foo
- pacman: name=foo state=present
# Upgrade package foo
- pacman: name=foo state=latest update_cache=yes
# Remove packages foo and bar
- pacman: name=foo,bar state=absent
# Recursively remove package baz
- pacman: name=baz state=absent recurse=yes
# Run the equivalent of "pacman -Sy" as a separate step
- pacman: update_cache=yes
# Run the equivalent of "pacman -Su" as a separate step
- pacman: upgrade=yes
# Run the equivalent of "pacman -Rdd", force remove package baz
- pacman: name=baz state=absent force=yes
'''
import json
import shlex
import os
import re
import sys
def get_version(pacman_output):
"""Take pacman -Qi or pacman -Si output and get the Version"""
lines = pacman_output.split('\n')
for line in lines:
if 'Version' in line:
return line.split(':')[1].strip()
return None
def query_package(module, pacman_path, name, state="present"):
"""Query the package status in both the local system and the repository. Returns a boolean to indicate if the package is installed, and a second boolean to indicate if the package is up-to-date."""
if state == "present":
lcmd = "%s -Qi %s" % (pacman_path, name)
lrc, lstdout, lstderr = module.run_command(lcmd, check_rc=False)
if lrc != 0:
# package is not installed locally
return False, False
# get the version installed locally (if any)
lversion = get_version(lstdout)
rcmd = "%s -Si %s" % (pacman_path, name)
rrc, rstdout, rstderr = module.run_command(rcmd, check_rc=False)
# get the version in the repository
rversion = get_version(rstdout)
if rrc == 0:
# Return True to indicate that the package is installed locally, and the result of the version number comparison
# to determine if the package is up-to-date.
return True, (lversion == rversion)
return False, False
def update_package_db(module, pacman_path):
cmd = "%s -Sy" % (pacman_path)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
if rc == 0:
return True
else:
module.fail_json(msg="could not update package db")
def upgrade(module, pacman_path):
cmdupgrade = "%s -Suq --noconfirm" % (pacman_path)
cmdneedrefresh = "%s -Qqu" % (pacman_path)
rc, stdout, stderr = module.run_command(cmdneedrefresh, check_rc=False)
if rc == 0:
rc, stdout, stderr = module.run_command(cmdupgrade, check_rc=False)
if rc == 0:
module.exit_json(changed=True, msg='System upgraded')
else:
module.fail_json(msg="could not upgrade")
else:
module.exit_json(changed=False, msg='Nothing to upgrade')
def remove_packages(module, pacman_path, packages):
if module.params["recurse"]:
args = "Rs"
else:
args = "R"
def remove_packages(module, pacman_path, packages):
if module.params["force"]:
args = "Rdd"
else:
args = "R"
remove_c = 0
# Using a for loop incase of error, we can report the package that failed
for package in packages:
# Query the package first, to see if we even need to remove
installed, updated = query_package(module, pacman_path, package)
if not installed:
continue
cmd = "%s -%s %s --noconfirm" % (pacman_path, args, package)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
if rc != 0:
module.fail_json(msg="failed to remove %s" % (package))
remove_c += 1
if remove_c > 0:
module.exit_json(changed=True, msg="removed %s package(s)" % remove_c)
module.exit_json(changed=False, msg="package(s) already absent")
def install_packages(module, pacman_path, state, packages, package_files):
install_c = 0
for i, package in enumerate(packages):
# if the package is installed and state == present or state == latest and is up-to-date then skip
installed, updated = query_package(module, pacman_path, package)
if installed and (state == 'present' or (state == 'latest' and updated)):
continue
if package_files[i]:
params = '-U %s' % package_files[i]
else:
params = '-S %s' % package
cmd = "%s %s --noconfirm" % (pacman_path, params)
rc, stdout, stderr = module.run_command(cmd, check_rc=False)
if rc != 0:
module.fail_json(msg="failed to install %s" % (package))
install_c += 1
if install_c > 0:
module.exit_json(changed=True, msg="installed %s package(s)" % (install_c))
module.exit_json(changed=False, msg="package(s) already installed")
def check_packages(module, pacman_path, packages, state):
would_be_changed = []
for package in packages:
installed, updated = query_package(module, pacman_path, package)
if ((state in ["present", "latest"] and not installed) or
(state == "absent" and installed) or
(state == "latest" and not updated)):
would_be_changed.append(package)
if would_be_changed:
if state == "absent":
state = "removed"
module.exit_json(changed=True, msg="%s package(s) would be %s" % (
len(would_be_changed), state))
else:
module.exit_json(change=False, msg="package(s) already %s" % state)
def main():
module = AnsibleModule(
argument_spec = dict(
name = dict(aliases=['pkg']),
state = dict(default='present', choices=['present', 'installed', "latest", 'absent', 'removed']),
recurse = dict(default='no', choices=BOOLEANS, type='bool'),
force = dict(default='no', choices=BOOLEANS, type='bool'),
upgrade = dict(default='no', choices=BOOLEANS, type='bool'),
update_cache = dict(default='no', aliases=['update-cache'], choices=BOOLEANS, type='bool')),
required_one_of = [['name', 'update_cache', 'upgrade']],
supports_check_mode = True)
pacman_path = module.get_bin_path('pacman', True)
if not os.path.exists(pacman_path):
module.fail_json(msg="cannot find pacman, in path %s" % (pacman_path))
p = module.params
# normalize the state parameter
if p['state'] in ['present', 'installed']:
p['state'] = 'present'
elif p['state'] in ['absent', 'removed']:
p['state'] = 'absent'
if p["update_cache"] and not module.check_mode:
update_package_db(module, pacman_path)
if not p['name']:
module.exit_json(changed=True, msg='updated the package master lists')
if p['update_cache'] and module.check_mode and not p['name']:
module.exit_json(changed=True, msg='Would have updated the package cache')
if p['upgrade']:
upgrade(module, pacman_path)
if p['name']:
pkgs = p['name'].split(',')
pkg_files = []
for i, pkg in enumerate(pkgs):
if pkg.endswith('.pkg.tar.xz'):
# The package given is a filename, extract the raw pkg name from
# it and store the filename
pkg_files.append(pkg)
pkgs[i] = re.sub('-[0-9].*$', '', pkgs[i].split('/')[-1])
else:
pkg_files.append(None)
if module.check_mode:
check_packages(module, pacman_path, pkgs, p['state'])
if p['state'] in ['present', 'latest']:
install_packages(module, pacman_path, p['state'], pkgs, pkg_files)
elif p['state'] == 'absent':
remove_packages(module, pacman_path, pkgs)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == "__main__":
main()
| gpl-3.0 |
darjus-amzn/boto | boto/ec2/launchspecification.py | 170 | 3829 | # Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Represents a launch specification for Spot instances.
"""
from boto.ec2.ec2object import EC2Object
from boto.resultset import ResultSet
from boto.ec2.blockdevicemapping import BlockDeviceMapping
from boto.ec2.group import Group
from boto.ec2.instance import SubParse
class GroupList(list):
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'groupId':
self.append(value)
class LaunchSpecification(EC2Object):
def __init__(self, connection=None):
super(LaunchSpecification, self).__init__(connection)
self.key_name = None
self.instance_type = None
self.image_id = None
self.groups = []
self.placement = None
self.kernel = None
self.ramdisk = None
self.monitored = False
self.subnet_id = None
self._in_monitoring_element = False
self.block_device_mapping = None
self.instance_profile = None
self.ebs_optimized = False
def __repr__(self):
return 'LaunchSpecification(%s)' % self.image_id
def startElement(self, name, attrs, connection):
if name == 'groupSet':
self.groups = ResultSet([('item', Group)])
return self.groups
elif name == 'monitoring':
self._in_monitoring_element = True
elif name == 'blockDeviceMapping':
self.block_device_mapping = BlockDeviceMapping()
return self.block_device_mapping
elif name == 'iamInstanceProfile':
self.instance_profile = SubParse('iamInstanceProfile')
return self.instance_profile
else:
return None
def endElement(self, name, value, connection):
if name == 'imageId':
self.image_id = value
elif name == 'keyName':
self.key_name = value
elif name == 'instanceType':
self.instance_type = value
elif name == 'availabilityZone':
self.placement = value
elif name == 'placement':
pass
elif name == 'kernelId':
self.kernel = value
elif name == 'ramdiskId':
self.ramdisk = value
elif name == 'subnetId':
self.subnet_id = value
elif name == 'state':
if self._in_monitoring_element:
if value == 'enabled':
self.monitored = True
self._in_monitoring_element = False
elif name == 'ebsOptimized':
self.ebs_optimized = (value == 'true')
else:
setattr(self, name, value)
| mit |
ryanlelek/SMORESGaitRecorder | proto/road_pb2.py | 1 | 2295 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: road.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
import vector3d_pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='road.proto',
package='gazebo.msgs',
serialized_pb='\n\nroad.proto\x12\x0bgazebo.msgs\x1a\x0evector3d.proto\"I\n\x04Road\x12\x0c\n\x04name\x18\x01 \x02(\t\x12\r\n\x05width\x18\x02 \x02(\x01\x12$\n\x05point\x18\x03 \x03(\x0b\x32\x15.gazebo.msgs.Vector3d')
_ROAD = _descriptor.Descriptor(
name='Road',
full_name='gazebo.msgs.Road',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='gazebo.msgs.Road.name', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=unicode("", "utf-8"),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='width', full_name='gazebo.msgs.Road.width', index=1,
number=2, type=1, cpp_type=5, label=2,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='point', full_name='gazebo.msgs.Road.point', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
extension_ranges=[],
serialized_start=43,
serialized_end=116,
)
_ROAD.fields_by_name['point'].message_type = vector3d_pb2._VECTOR3D
DESCRIPTOR.message_types_by_name['Road'] = _ROAD
class Road(_message.Message):
__metaclass__ = _reflection.GeneratedProtocolMessageType
DESCRIPTOR = _ROAD
# @@protoc_insertion_point(class_scope:gazebo.msgs.Road)
# @@protoc_insertion_point(module_scope)
| gpl-3.0 |
cogeorg/black_rhino | examples/degroot/networkx/algorithms/components/tests/test_attracting.py | 35 | 2377 | #!/usr/bin/env python
from nose.tools import *
import networkx as nx
class TestAttractingComponents(object):
def setUp(self):
self.G1 = nx.DiGraph()
self.G1.add_edges_from([(5,11),(11,2),(11,9),(11,10),
(7,11),(7,8),(8,9),(3,8),(3,10)])
self.G2 = nx.DiGraph()
self.G2.add_edges_from([(0,1),(0,2),(1,1),(1,2),(2,1)])
self.G3 = nx.DiGraph()
self.G3.add_edges_from([(0,1),(1,2),(2,1),(0,3),(3,4),(4,3)])
def test_attracting_components(self):
ac = nx.attracting_components(self.G1)
assert_true([2] in ac)
assert_true([9] in ac)
assert_true([10] in ac)
ac = nx.attracting_components(self.G2)
ac = [tuple(sorted(x)) for x in ac]
assert_true(ac == [(1,2)])
ac = nx.attracting_components(self.G3)
ac = [tuple(sorted(x)) for x in ac]
assert_true((1,2) in ac)
assert_true((3,4) in ac)
assert_equal(len(ac), 2)
def test_number_attacting_components(self):
assert_equal(len(nx.attracting_components(self.G1)), 3)
assert_equal(len(nx.attracting_components(self.G2)), 1)
assert_equal(len(nx.attracting_components(self.G3)), 2)
def test_is_attracting_component(self):
assert_false(nx.is_attracting_component(self.G1))
assert_false(nx.is_attracting_component(self.G2))
assert_false(nx.is_attracting_component(self.G3))
g2 = self.G3.subgraph([1,2])
assert_true(nx.is_attracting_component(g2))
def test_attracting_component_subgraphs(self):
subgraphs = nx.attracting_component_subgraphs(self.G1)
for subgraph in subgraphs:
assert_equal(len(subgraph), 1)
self.G2.add_edge(1,2,eattr='red') # test attrs copied to subgraphs
self.G2.node[2]['nattr']='blue'
self.G2.graph['gattr']='green'
subgraphs = nx.attracting_component_subgraphs(self.G2)
assert_equal(len(subgraphs), 1)
SG2=subgraphs[0]
assert_true(1 in SG2)
assert_true(2 in SG2)
assert_equal(SG2[1][2]['eattr'],'red')
assert_equal(SG2.node[2]['nattr'],'blue')
assert_equal(SG2.graph['gattr'],'green')
SG2.add_edge(1,2,eattr='blue')
assert_equal(SG2[1][2]['eattr'],'blue')
assert_equal(self.G2[1][2]['eattr'],'red')
| gpl-3.0 |
eResearchSA/reporting-nectar-keystone | ersa_nectar_keystone/api.py | 1 | 7317 | #!/usr/bin/env python3
"""The app: very basic."""
# pylint: disable=cyclic-import, no-self-use, no-member, invalid-name, no-init
# pylint: disable=too-few-public-methods, no-name-in-module, import-error
from functools import lru_cache, wraps
from flask import request
from flask import current_app as app
from flask.ext.restful import Resource
from flask.ext.restful import reqparse
from sqlalchemy.orm.relationships import RelationshipProperty
from ersa_nectar_keystone import db, Snapshot, Domain
from ersa_nectar_keystone import Account, Tenant, Membership
from ersa_nectar_keystone import AccountReference, AccountReferenceMapping
QUERY_PARSER = reqparse.RequestParser()
QUERY_PARSER.add_argument("filter", action="append", help="Filter")
QUERY_PARSER.add_argument("page", type=int, default=1, help="Page #")
ITEMS_PER_PAGE = 1000
def get_or_create(session, model, **kwargs):
"""Fetch object if returned by filter query, else create new."""
item = session.query(model).filter_by(**kwargs).first()
if not item:
item = model(**kwargs)
session.add(item)
session.flush()
return item
@lru_cache(maxsize=1000)
def get_domain(name):
"""Extract an organisational domain from an email address."""
if "@" in name:
domain_name = name.split("@")[1]
if domain_name.endswith(".edu.au"):
domain_name = ".".join(domain_name.split(".")[-3:])
elif domain_name.endswith(".edu"):
domain_name = ".".join(domain_name.split(".")[-2:])
return get_or_create(db.session, Domain, name=domain_name)
else:
return None
def dynamic_query(model, query, expression):
"""
Construct query based on:
attribute.operation.expression
For example:
foo.eq.42
"""
key, op, value = expression.split(".", 3)
column = getattr(model, key, None)
if isinstance(column.property, RelationshipProperty):
column = getattr(model, key + "_id", None)
if op == "in":
query_filter = column.in_(value.split(","))
else:
attr = None
for candidate in ["%s", "%s_", "__%s__"]:
if hasattr(column, candidate % op):
attr = candidate % op
break
if value == "null":
value = None
query_filter = getattr(column, attr)(value)
return query.filter(query_filter)
def require_auth(func):
"""Very simple authentication via HTTP header."""
@wraps(func)
def decorated(*args, **kwargs):
"""Check the header."""
token = request.headers.get("x-ersa-nectar-keystone-token", "")
if token == app.config["ERSA_NECTAR_KEYSTONE_TOKEN"]:
return func(*args, **kwargs)
else:
return "", 401
return decorated
class PingResource(Resource):
"""Basic liveness test."""
def get(self):
"""Hello?"""
return "pong"
class AccountResource(Resource):
"""Account"""
@require_auth
def get(self):
"""Account"""
return [account.json() for account in Account.query.all()]
class AccountReferenceResource(Resource):
"""Account Reference"""
@require_auth
def get(self):
"""Account Reference"""
args = QUERY_PARSER.parse_args()
query = AccountReference.query
if args["filter"]:
for query_filter in args["filter"]:
query = dynamic_query(AccountReference, query, query_filter)
return [ar.json()
for ar in query.paginate(args["page"],
per_page=ITEMS_PER_PAGE).items]
@require_auth
def post(self):
return self.get()
class SnapshotResource(Resource):
"""Snapshot"""
@require_auth
def get(self):
"""Snapshot"""
return [snapshot.json() for snapshot in Snapshot.query.all()]
class DomainResource(Resource):
"""Domain"""
@require_auth
def get(self):
"""Domain"""
return [domain.json() for domain in Domain.query.all()]
class TenantResource(Resource):
"""Tenant"""
@require_auth
def get(self):
"""Tenant"""
return [tenant.json() for tenant in Tenant.query.all()]
class MembershipResource(Resource):
"""Membership"""
@require_auth
def get(self):
"""Membership"""
args = QUERY_PARSER.parse_args()
query = Membership.query
if args["filter"]:
for query_filter in args["filter"]:
query = dynamic_query(Membership, query, query_filter)
return [m.json() for m in query.paginate(args["page"],
per_page=ITEMS_PER_PAGE).items
]
@require_auth
def post(self):
return self.get()
class AccountReferenceMappingResource(Resource):
"""Account Reference Mapping"""
@require_auth
def get(self):
"""Account Reference Mapping"""
args = QUERY_PARSER.parse_args()
query = AccountReferenceMapping.query
if args["filter"]:
for query_filter in args["filter"]:
query = dynamic_query(AccountReferenceMapping, query,
query_filter)
return [arm.json()
for arm in query.paginate(args["page"],
per_page=ITEMS_PER_PAGE).items]
@require_auth
def post(self):
return self.get()
class RawResource(Resource):
"""Data ingest"""
@require_auth
def put(self):
"""Data ingest"""
@lru_cache(maxsize=100000)
def cache(model, **kwargs):
return get_or_create(db.session, model, **kwargs)
for message in request.json[:5]:
data = message["data"]
snapshot = cache(Snapshot, ts=data["timestamp"])
print(data["timestamp"])
for account_detail in data["users"]:
account = cache(Account, openstack_id=account_detail["id"])
if not account_detail["email"]:
continue
email = account_detail["email"]
domain = get_domain(email)
reference = cache(AccountReference, value=email, domain=domain)
cache(AccountReferenceMapping,
account=account,
reference=reference,
snapshot=snapshot)
for tenant_detail in data["tenants"]:
tenant = cache(Tenant, openstack_id=tenant_detail["id"])
tenant.name = tenant_detail["name"]
tenant.description = tenant_detail["description"]
if "allocation_id" in tenant_detail:
try:
tenant.allocation = int(tenant_detail["allocation_id"])
except:
pass
if "users" not in tenant_detail:
continue
for member in tenant_detail["users"]:
account = cache(Account, openstack_id=member["id"])
cache(Membership,
account=account,
tenant=tenant,
snapshot=snapshot)
db.session.commit()
return "", 204
| apache-2.0 |
isotoma/django-cms | cms/migrations/0031_improved_language_code_support.py | 525 | 20033 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
try:
from django.contrib.auth import get_user_model
except ImportError: # django < 1.5
from django.contrib.auth.models import User
else:
User = get_user_model()
user_orm_label = '%s.%s' % (User._meta.app_label, User._meta.object_name)
user_model_label = '%s.%s' % (User._meta.app_label, User._meta.module_name)
user_ptr_name = '%s_ptr' % User._meta.object_name.lower()
class Migration(SchemaMigration):
def forwards(self, orm):
# Dummy migration
pass
def backwards(self, orm):
# Dummy migration
pass
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [],
{'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [],
{'to': "orm['auth.Permission']", 'symmetrical': 'False',
'blank': 'True'})
},
'auth.permission': {
'Meta': {
'ordering': "('content_type__app_label', 'content_type__model', 'codename')",
'unique_together': "(('content_type', 'codename'),)",
'object_name': 'Permission'},
'codename': (
'django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['contenttypes.ContentType']"}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
user_model_label: {
'Meta': {'object_name': User.__name__, 'db_table': "'%s'" % User._meta.db_table},
'date_joined': ('django.db.models.fields.DateTimeField', [],
{'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [],
{'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [],
{'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [],
{'to': "orm['auth.Group']", 'symmetrical': 'False',
'blank': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [],
{'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [],
{'max_length': '30', 'blank': 'True'}),
'password': (
'django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': (
'django.db.models.fields.related.ManyToManyField', [],
{'to': "orm['auth.Permission']", 'symmetrical': 'False',
'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [],
{'unique': 'True', 'max_length': '30'})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [],
{'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [],
{'default': 'datetime.datetime.now'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [],
{'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['cms.CMSPlugin']", 'null': 'True',
'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [],
{'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [],
{'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'})
},
'cms.globalpagepermission': {
'Meta': {'object_name': 'GlobalPagePermission'},
'can_add': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change_advanced_settings': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_change_permissions': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_delete': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_moderate': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_move_page': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_publish': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_recover_page': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_view': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'group': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [],
{'symmetrical': 'False', 'to': "orm['sites.Site']",
'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['%s']" % user_orm_label, 'null': 'True', 'blank': 'True'})
},
'cms.page': {
'Meta': {'ordering': "('site', 'tree_id', 'lft')",
'object_name': 'Page'},
'changed_by': (
'django.db.models.fields.CharField', [], {'max_length': '70'}),
'changed_date': ('django.db.models.fields.DateTimeField', [],
{'auto_now': 'True', 'blank': 'True'}),
'created_by': (
'django.db.models.fields.CharField', [], {'max_length': '70'}),
'creation_date': ('django.db.models.fields.DateTimeField', [],
{'auto_now_add': 'True', 'blank': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_navigation': ('django.db.models.fields.BooleanField', [],
{'default': 'True', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'}),
'limit_visibility_in_menu': (
'django.db.models.fields.SmallIntegerField', [],
{'default': 'None', 'null': 'True', 'db_index': 'True',
'blank': 'True'}),
'login_required': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'moderator_state': ('django.db.models.fields.SmallIntegerField', [],
{'default': '1', 'blank': 'True'}),
'navigation_extenders': ('django.db.models.fields.CharField', [],
{'db_index': 'True', 'max_length': '80',
'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [],
{'blank': 'True', 'related_name': "'children'",
'null': 'True', 'to': "orm['cms.Page']"}),
'placeholders': ('django.db.models.fields.related.ManyToManyField', [],
{'to': "orm['cms.Placeholder']",
'symmetrical': 'False'}),
'publication_date': ('django.db.models.fields.DateTimeField', [],
{'db_index': 'True', 'null': 'True',
'blank': 'True'}),
'publication_end_date': ('django.db.models.fields.DateTimeField', [],
{'db_index': 'True', 'null': 'True',
'blank': 'True'}),
'published': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [],
{'default': 'True', 'db_index': 'True'}),
'publisher_public': (
'django.db.models.fields.related.OneToOneField', [],
{'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True',
'to': "orm['cms.Page']"}),
'publisher_state': ('django.db.models.fields.SmallIntegerField', [],
{'default': '0', 'db_index': 'True'}),
'reverse_id': ('django.db.models.fields.CharField', [],
{'db_index': 'True', 'max_length': '40', 'null': 'True',
'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['sites.Site']"}),
'soft_root': ('django.db.models.fields.BooleanField', [],
{'default': 'False', 'db_index': 'True'}),
'template': (
'django.db.models.fields.CharField', [], {'max_length': '100'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [],
{'db_index': 'True'})
},
'cms.pagemoderator': {
'Meta': {'object_name': 'PageModerator'},
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'moderate_children': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'moderate_descendants': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'moderate_page': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'page': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['cms.Page']"}),
'user': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['%s']" % user_orm_label})
},
'cms.pagemoderatorstate': {
'Meta': {'ordering': "('page', 'action', '-created')",
'object_name': 'PageModeratorState'},
'action': ('django.db.models.fields.CharField', [],
{'max_length': '3', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [],
{'auto_now_add': 'True', 'blank': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [],
{'default': "''", 'max_length': '1000', 'blank': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['cms.Page']"}),
'user': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['%s']" % user_orm_label, 'null': 'True'})
},
'cms.pagepermission': {
'Meta': {'object_name': 'PagePermission'},
'can_add': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change_advanced_settings': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_change_permissions': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_delete': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_moderate': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_move_page': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_publish': (
'django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_view': (
'django.db.models.fields.BooleanField', [], {'default': 'False'}),
'grant_on': (
'django.db.models.fields.IntegerField', [], {'default': '5'}),
'group': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['cms.Page']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [],
{'to': "orm['%s']" % user_orm_label, 'null': 'True', 'blank': 'True'})
},
'cms.pageuser': {
'Meta': {'object_name': 'PageUser', '_ormbases': [user_orm_label]},
'created_by': ('django.db.models.fields.related.ForeignKey', [],
{'related_name': "'created_users'",
'to': "orm['%s']" % user_orm_label}),
'user_ptr': ('django.db.models.fields.related.OneToOneField', [],
{'to': "orm['%s']" % user_orm_label, 'unique': 'True',
'primary_key': 'True'})
},
'cms.pageusergroup': {
'Meta': {'object_name': 'PageUserGroup', '_ormbases': ['auth.Group']},
'created_by': ('django.db.models.fields.related.ForeignKey', [],
{'related_name': "'created_usergroups'",
'to': "orm['%s']" % user_orm_label}),
'group_ptr': ('django.db.models.fields.related.OneToOneField', [],
{'to': "orm['auth.Group']", 'unique': 'True',
'primary_key': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': (
'django.db.models.fields.PositiveSmallIntegerField', [],
{'null': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [],
{'max_length': '50', 'db_index': 'True'})
},
'cms.title': {
'Meta': {'unique_together': "(('language', 'page'),)",
'object_name': 'Title'},
'application_urls': ('django.db.models.fields.CharField', [],
{'db_index': 'True', 'max_length': '200',
'null': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [],
{'default': 'datetime.datetime.now'}),
'has_url_overwrite': ('django.db.models.fields.BooleanField', [],
{'default': 'False', 'db_index': 'True'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [],
{'max_length': '15', 'db_index': 'True'}),
'menu_title': ('django.db.models.fields.CharField', [],
{'max_length': '255', 'null': 'True', 'blank': 'True'}),
'meta_description': ('django.db.models.fields.TextField', [],
{'max_length': '255', 'null': 'True',
'blank': 'True'}),
'meta_keywords': ('django.db.models.fields.CharField', [],
{'max_length': '255', 'null': 'True',
'blank': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [],
{'related_name': "'title_set'", 'to': "orm['cms.Page']"}),
'page_title': ('django.db.models.fields.CharField', [],
{'max_length': '255', 'null': 'True', 'blank': 'True'}),
'path': ('django.db.models.fields.CharField', [],
{'max_length': '255', 'db_index': 'True'}),
'redirect': ('django.db.models.fields.CharField', [],
{'max_length': '255', 'null': 'True', 'blank': 'True'}),
'slug': (
'django.db.models.fields.SlugField', [], {'max_length': '255'}),
'title': (
'django.db.models.fields.CharField', [], {'max_length': '255'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)",
'unique_together': "(('app_label', 'model'),)",
'object_name': 'ContentType',
'db_table': "'django_content_type'"},
'app_label': (
'django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': (
'django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site',
'db_table': "'django_site'"},
'domain': (
'django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': (
'django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['cms']
| bsd-3-clause |
Isendir/brython | www/src/Lib/test/test_xml_etree_c.py | 25 | 2689 | # xml.etree test for cElementTree
import sys, struct
from test import support
from test.support import import_fresh_module
import unittest
cET = import_fresh_module('xml.etree.ElementTree',
fresh=['_elementtree'])
cET_alias = import_fresh_module('xml.etree.cElementTree',
fresh=['_elementtree', 'xml.etree'])
class MiscTests(unittest.TestCase):
# Issue #8651.
@support.bigmemtest(size=support._2G + 100, memuse=1)
def test_length_overflow(self, size):
if size < support._2G + 100:
self.skipTest("not enough free memory, need at least 2 GB")
data = b'x' * size
parser = cET.XMLParser()
try:
self.assertRaises(OverflowError, parser.feed, data)
finally:
data = None
@unittest.skipUnless(cET, 'requires _elementtree')
class TestAliasWorking(unittest.TestCase):
# Test that the cET alias module is alive
def test_alias_working(self):
e = cET_alias.Element('foo')
self.assertEqual(e.tag, 'foo')
@unittest.skipUnless(cET, 'requires _elementtree')
class TestAcceleratorImported(unittest.TestCase):
# Test that the C accelerator was imported, as expected
def test_correct_import_cET(self):
self.assertEqual(cET.SubElement.__module__, '_elementtree')
def test_correct_import_cET_alias(self):
self.assertEqual(cET_alias.SubElement.__module__, '_elementtree')
@unittest.skipUnless(cET, 'requires _elementtree')
@support.cpython_only
class SizeofTest(unittest.TestCase):
def setUp(self):
self.elementsize = support.calcobjsize('5P')
# extra
self.extra = struct.calcsize('PiiP4P')
check_sizeof = support.check_sizeof
def test_element(self):
e = cET.Element('a')
self.check_sizeof(e, self.elementsize)
def test_element_with_attrib(self):
e = cET.Element('a', href='about:')
self.check_sizeof(e, self.elementsize + self.extra)
def test_element_with_children(self):
e = cET.Element('a')
for i in range(5):
cET.SubElement(e, 'span')
# should have space for 8 children now
self.check_sizeof(e, self.elementsize + self.extra +
struct.calcsize('8P'))
def test_main():
from test import test_xml_etree, test_xml_etree_c
# Run the tests specific to the C implementation
support.run_unittest(
MiscTests,
TestAliasWorking,
TestAcceleratorImported,
SizeofTest,
)
# Run the same test suite as the Python module
test_xml_etree.test_main(module=cET)
if __name__ == '__main__':
test_main()
| bsd-3-clause |
AyoubZahid/odoo | addons/stock/procurement.py | 10 | 29666 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT, float_compare, float_round
from openerp import SUPERUSER_ID
from dateutil.relativedelta import relativedelta
from datetime import datetime
from psycopg2 import OperationalError
import openerp
class procurement_group(osv.osv):
_inherit = 'procurement.group'
_columns = {
'partner_id': fields.many2one('res.partner', 'Partner')
}
class procurement_rule(osv.osv):
_inherit = 'procurement.rule'
def _get_action(self, cr, uid, context=None):
result = super(procurement_rule, self)._get_action(cr, uid, context=context)
return result + [('move', _('Move From Another Location'))]
def _get_rules(self, cr, uid, ids, context=None):
res = []
for route in self.browse(cr, uid, ids):
res += [x.id for x in route.pull_ids]
return res
_columns = {
'location_id': fields.many2one('stock.location', 'Procurement Location'),
'location_src_id': fields.many2one('stock.location', 'Source Location',
help="Source location is action=move"),
'route_id': fields.many2one('stock.location.route', 'Route',
help="If route_id is False, the rule is global"),
'procure_method': fields.selection([('make_to_stock', 'Take From Stock'), ('make_to_order', 'Create Procurement')], 'Move Supply Method', required=True,
help="""Determines the procurement method of the stock move that will be generated: whether it will need to 'take from the available stock' in its source location or needs to ignore its stock and create a procurement over there."""),
'route_sequence': fields.related('route_id', 'sequence', string='Route Sequence',
store={
'stock.location.route': (_get_rules, ['sequence'], 10),
'procurement.rule': (lambda self, cr, uid, ids, c={}: ids, ['route_id'], 10),
}),
'picking_type_id': fields.many2one('stock.picking.type', 'Picking Type',
help="Picking Type determines the way the picking should be shown in the view, reports, ..."),
'delay': fields.integer('Number of Days'),
'partner_address_id': fields.many2one('res.partner', 'Partner Address'),
'propagate': fields.boolean('Propagate cancel and split', help='If checked, when the previous move of the move (which was generated by a next procurement) is cancelled or split, the move generated by this move will too'),
'warehouse_id': fields.many2one('stock.warehouse', 'Served Warehouse', help='The warehouse this rule is for'),
'propagate_warehouse_id': fields.many2one('stock.warehouse', 'Warehouse to Propagate', help="The warehouse to propagate on the created move/procurement, which can be different of the warehouse this rule is for (e.g for resupplying rules from another warehouse)"),
}
_defaults = {
'procure_method': 'make_to_stock',
'propagate': True,
'delay': 0,
}
class procurement_order(osv.osv):
_inherit = "procurement.order"
_columns = {
'location_id': fields.many2one('stock.location', 'Procurement Location'), # not required because task may create procurements that aren't linked to a location with sale_service
'partner_dest_id': fields.many2one('res.partner', 'Customer Address', help="In case of dropshipping, we need to know the destination address more precisely"),
'move_ids': fields.one2many('stock.move', 'procurement_id', 'Moves', help="Moves created by the procurement"),
'move_dest_id': fields.many2one('stock.move', 'Destination Move', help="Move which caused (created) the procurement"),
'route_ids': fields.many2many('stock.location.route', 'stock_location_route_procurement', 'procurement_id', 'route_id', 'Preferred Routes', help="Preferred route to be followed by the procurement order. Usually copied from the generating document (SO) but could be set up manually."),
'warehouse_id': fields.many2one('stock.warehouse', 'Warehouse', help="Warehouse to consider for the route selection"),
'orderpoint_id': fields.many2one('stock.warehouse.orderpoint', 'Minimum Stock Rule'),
}
def propagate_cancels(self, cr, uid, ids, context=None):
move_cancel = []
for procurement in self.browse(cr, uid, ids, context=context):
if procurement.rule_id.action == 'move' and procurement.move_ids:
move_cancel += [m.id for m in procurement.move_ids]
if move_cancel:
self.pool.get('stock.move').action_cancel(cr, uid, move_cancel, context=context)
return True
def cancel(self, cr, uid, ids, context=None):
if context is None:
context = {}
to_cancel_ids = self.get_cancel_ids(cr, uid, ids, context=context)
ctx = context.copy()
#set the context for the propagation of the procurement cancellation
ctx['cancel_procurement'] = True
self.propagate_cancels(cr, uid, to_cancel_ids, context=ctx)
return super(procurement_order, self).cancel(cr, uid, to_cancel_ids, context=ctx)
def _find_parent_locations(self, cr, uid, procurement, context=None):
location = procurement.location_id
res = [location.id]
while location.location_id:
location = location.location_id
res.append(location.id)
return res
def change_warehouse_id(self, cr, uid, ids, warehouse_id, context=None):
if warehouse_id:
warehouse = self.pool.get('stock.warehouse').browse(cr, uid, warehouse_id, context=context)
return {'value': {'location_id': warehouse.lot_stock_id.id}}
return {}
#Doing assignation, ... in multi
def _assign_multi(self, cr, uid, procurements, context=None):
res = {}
todo_procs = []
context = context or {}
for procurement in procurements:
if procurement.rule_id:
res[procurement.id] = True
elif procurement.product_id.type in ['product', 'consu']:
todo_procs += [procurement]
res_dict = self._find_suitable_rule_multi(cr, uid, todo_procs, context=context)
rule_dict = {}
for proc in res_dict.keys():
if res_dict[proc]:
if rule_dict.get(res_dict[proc]):
rule_dict[res_dict[proc]] += [proc]
else:
rule_dict[res_dict[proc]] = [proc]
for rule in rule_dict.keys():
self.write(cr, uid, rule_dict[rule], {'rule_id': rule}, context=context)
def _get_route_group_dict(self, cr, uid, procurements, context=None):
"""
Returns a dictionary with key the routes and values the products associated
"""
ids = [x.id for x in procurements]
cr.execute("""
SELECT proc_id, route_id FROM
((SELECT p.id AS proc_id, route_id
FROM stock_route_product AS link, procurement_order AS p, product_template AS pt, product_product pp
WHERE pp.product_tmpl_id = pt.id AND link.product_id = pt.id AND pp.id = p.product_id
AND p.id in %s)
UNION (SELECT p.id AS proc_id, link.route_id AS route_id
FROM stock_location_route_categ AS link, product_product AS pp, procurement_order AS p,
product_template AS pt, product_category AS pc, product_category AS pc_product
WHERE p.product_id = pp.id AND pp.product_tmpl_id = pt.id AND pc_product.id = pt.categ_id AND
pc.parent_left <= pc_product.parent_left AND pc.parent_right >= pc_product.parent_left
AND link.categ_id = pc.id AND pp.id IN %s)) p ORDER BY proc_id, route_id
""", (tuple(ids), tuple(ids), ))
product_routes = cr.fetchall()
old_proc = False
key = tuple()
key_routes = {}
proc = False
for proc, route in product_routes:
if not old_proc:
old_proc = proc
if old_proc == proc:
key += (route,)
else:
if key_routes.get(key):
key_routes[key] += [old_proc]
else:
key_routes[key] = [old_proc]
old_proc = proc
key = (route,)
if proc: #do not forget last one as we passed through it
if key_routes.get(key):
key_routes[key] += [proc]
else:
key_routes[key] = [proc]
return key_routes
def _get_wh_loc_dict(self, cr, uid, procurements, context=None):
wh_dict = {}
for procurement in procurements:
if wh_dict.get(procurement.warehouse_id.id):
if wh_dict[procurement.warehouse_id.id].get(procurement.location_id):
wh_dict[procurement.warehouse_id.id][procurement.location_id] += [procurement]
else:
wh_dict[procurement.warehouse_id.id][procurement.location_id] = [procurement]
else:
wh_dict[procurement.warehouse_id.id] = {}
wh_dict[procurement.warehouse_id.id][procurement.location_id] = [procurement]
return wh_dict
def _find_suitable_rule_multi(self, cr, uid, procurements, domain = [], context=None):
'''we try to first find a rule among the ones defined on the procurement order group and if none is found, we try on the routes defined for the product, and finally we fallback on the default behavior'''
results_dict = {}
pull_obj = self.pool.get('procurement.rule')
warehouse_route_ids = []
for procurement in procurements: #Could be replaced by one query for all route_ids
if procurement.route_ids:
procurement_route_ids = [x.id for x in procurement.route_ids]
loc = procurement.location_id
loc_domain = [('location_id.parent_left', '<=', loc.parent_left),
('location_id.parent_right', '>=', loc.parent_left)]
if procurement.warehouse_id:
domain += ['|', ('warehouse_id', '=', procurement.warehouse_id.id), ('warehouse_id', '=', False)]
res = pull_obj.search(cr, uid, loc_domain + [('route_id', 'in', procurement_route_ids)], order='route_sequence, sequence', context=context)
if res and res[0]:
results_dict[procurement.id] = res[0]
procurements_to_check = [x for x in procurements if x.id not in results_dict.keys()]
#group by warehouse_id:
wh_dict = self._get_wh_loc_dict(cr, uid, procurements_to_check, context=context)
for wh in wh_dict.keys():
warehouse_route_ids = []
domain = []
check_wh = False
for loc in wh_dict[wh].keys():
procurement = wh_dict[wh][loc][0]
loc_domain = [('location_id.parent_left', '<=', loc.parent_left),
('location_id.parent_right', '>=', loc.parent_left)]
if wh and not check_wh:
domain += ['|', ('warehouse_id', '=', procurement.warehouse_id.id), ('warehouse_id', '=', False)]
warehouse_route_ids = [x.id for x in procurement.warehouse_id.route_ids]
check_wh = True
key_routes = self._get_route_group_dict(cr, uid, wh_dict[wh][loc], context=context)
for key in key_routes.keys():
procurements = self.browse(cr, uid, key_routes[key], context=context)
domain = loc_domain + domain
res = pull_obj.search(cr, uid, domain + [('route_id', 'in', list(key))], order='route_sequence, sequence', context=context)
result = False
if res and res[0]:
result = res[0]
elif warehouse_route_ids:
res = pull_obj.search(cr, uid, domain + [('route_id', 'in', warehouse_route_ids)], order='route_sequence, sequence', context=context)
result = res and res[0]
if not result:
res = pull_obj.search(cr, uid, domain + [('route_id', '=', False)], order='sequence', context=context)
result = res and res[0]
for proc in key_routes[key]:
results_dict[proc] = result
return results_dict
def _search_suitable_rule(self, cr, uid, procurement, domain, context=None):
'''we try to first find a rule among the ones defined on the procurement order group and if none is found, we try on the routes defined for the product, and finally we fallback on the default behavior'''
pull_obj = self.pool.get('procurement.rule')
warehouse_route_ids = []
if procurement.warehouse_id:
domain += ['|', ('warehouse_id', '=', procurement.warehouse_id.id), ('warehouse_id', '=', False)]
warehouse_route_ids = [x.id for x in procurement.warehouse_id.route_ids]
product_route_ids = [x.id for x in procurement.product_id.route_ids + procurement.product_id.categ_id.total_route_ids]
procurement_route_ids = [x.id for x in procurement.route_ids]
res = pull_obj.search(cr, uid, domain + [('route_id', 'in', procurement_route_ids)], order='route_sequence, sequence', context=context)
if not res:
res = pull_obj.search(cr, uid, domain + [('route_id', 'in', product_route_ids)], order='route_sequence, sequence', context=context)
if not res:
res = warehouse_route_ids and pull_obj.search(cr, uid, domain + [('route_id', 'in', warehouse_route_ids)], order='route_sequence, sequence', context=context) or []
if not res:
res = pull_obj.search(cr, uid, domain + [('route_id', '=', False)], order='sequence', context=context)
return res
def _find_suitable_rule(self, cr, uid, procurement, context=None):
rule_id = super(procurement_order, self)._find_suitable_rule(cr, uid, procurement, context=context)
if not rule_id:
#a rule defined on 'Stock' is suitable for a procurement in 'Stock\Bin A'
all_parent_location_ids = self._find_parent_locations(cr, uid, procurement, context=context)
rule_id = self._search_suitable_rule(cr, uid, procurement, [('location_id', 'in', all_parent_location_ids)], context=context)
rule_id = rule_id and rule_id[0] or False
return rule_id
def _run_move_create(self, cr, uid, procurement, context=None):
''' Returns a dictionary of values that will be used to create a stock move from a procurement.
This function assumes that the given procurement has a rule (action == 'move') set on it.
:param procurement: browse record
:rtype: dictionary
'''
newdate = (datetime.strptime(procurement.date_planned, '%Y-%m-%d %H:%M:%S') - relativedelta(days=procurement.rule_id.delay or 0)).strftime('%Y-%m-%d %H:%M:%S')
group_id = False
if procurement.rule_id.group_propagation_option == 'propagate':
group_id = procurement.group_id and procurement.group_id.id or False
elif procurement.rule_id.group_propagation_option == 'fixed':
group_id = procurement.rule_id.group_id and procurement.rule_id.group_id.id or False
#it is possible that we've already got some move done, so check for the done qty and create
#a new move with the correct qty
already_done_qty = 0
for move in procurement.move_ids:
already_done_qty += move.product_uom_qty if move.state == 'done' else 0
qty_left = max(procurement.product_qty - already_done_qty, 0)
vals = {
'name': procurement.name,
'company_id': procurement.rule_id.company_id.id or procurement.rule_id.location_src_id.company_id.id or procurement.rule_id.location_id.company_id.id or procurement.company_id.id,
'product_id': procurement.product_id.id,
'product_uom': procurement.product_uom.id,
'product_uom_qty': qty_left,
'partner_id': procurement.rule_id.partner_address_id.id or (procurement.group_id and procurement.group_id.partner_id.id) or False,
'location_id': procurement.rule_id.location_src_id.id,
'location_dest_id': procurement.location_id.id,
'move_dest_id': procurement.move_dest_id and procurement.move_dest_id.id or False,
'procurement_id': procurement.id,
'rule_id': procurement.rule_id.id,
'procure_method': procurement.rule_id.procure_method,
'origin': procurement.origin,
'picking_type_id': procurement.rule_id.picking_type_id.id,
'group_id': group_id,
'route_ids': [(4, x.id) for x in procurement.route_ids],
'warehouse_id': procurement.rule_id.propagate_warehouse_id.id or procurement.rule_id.warehouse_id.id,
'date': newdate,
'date_expected': newdate,
'propagate': procurement.rule_id.propagate,
'priority': procurement.priority,
}
return vals
def _run(self, cr, uid, procurement, context=None):
if procurement.rule_id and procurement.rule_id.action == 'move':
if not procurement.rule_id.location_src_id:
self.message_post(cr, uid, [procurement.id], body=_('No source location defined!'), context=context)
return False
move_obj = self.pool.get('stock.move')
move_dict = self._run_move_create(cr, uid, procurement, context=context)
#create the move as SUPERUSER because the current user may not have the rights to do it (mto product launched by a sale for example)
move_obj.create(cr, SUPERUSER_ID, move_dict, context=context)
return True
return super(procurement_order, self)._run(cr, uid, procurement, context=context)
def run(self, cr, uid, ids, autocommit=False, context=None):
new_ids = [x.id for x in self.browse(cr, uid, ids, context=context) if x.state not in ('running', 'done', 'cancel')]
context = dict(context or {}, procurement_auto_defer=True) #When creating
res = super(procurement_order, self).run(cr, uid, new_ids, autocommit=autocommit, context=context)
#after all the procurements are run, check if some created a draft stock move that needs to be confirmed
#(we do that in batch because it fasts the picking assignation and the picking state computation)
move_to_confirm_ids = []
for procurement in self.browse(cr, uid, new_ids, context=context):
if procurement.state == "running" and procurement.rule_id and procurement.rule_id.action == "move":
move_to_confirm_ids += [m.id for m in procurement.move_ids if m.state == 'draft']
if move_to_confirm_ids:
self.pool.get('stock.move').action_confirm(cr, uid, move_to_confirm_ids, context=context)
# If procurements created other procurements, run the created in batch
procurement_ids = self.search(cr, uid, [('move_dest_id.procurement_id', 'in', new_ids)], order='id', context=context)
if procurement_ids:
res = res and self.run(cr, uid, procurement_ids, autocommit=autocommit, context=context)
return res
def _check(self, cr, uid, procurement, context=None):
''' Implement the procurement checking for rules of type 'move'. The procurement will be satisfied only if all related
moves are done/cancel and if the requested quantity is moved.
'''
if procurement.rule_id and procurement.rule_id.action == 'move':
uom_obj = self.pool.get('product.uom')
# In case Phantom BoM splits only into procurements
if not procurement.move_ids:
return True
cancel_test_list = [x.state == 'cancel' for x in procurement.move_ids]
done_cancel_test_list = [x.state in ('done', 'cancel') for x in procurement.move_ids]
at_least_one_cancel = any(cancel_test_list)
all_done_or_cancel = all(done_cancel_test_list)
all_cancel = all(cancel_test_list)
if not all_done_or_cancel:
return False
elif all_done_or_cancel and not all_cancel:
return True
elif all_cancel:
self.message_post(cr, uid, [procurement.id], body=_('All stock moves have been cancelled for this procurement.'), context=context)
self.write(cr, uid, [procurement.id], {'state': 'cancel'}, context=context)
return False
return super(procurement_order, self)._check(cr, uid, procurement, context)
def do_view_pickings(self, cr, uid, ids, context=None):
'''
This function returns an action that display the pickings of the procurements belonging
to the same procurement group of given ids.
'''
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
result = mod_obj.get_object_reference(cr, uid, 'stock', 'do_view_pickings')
id = result and result[1] or False
result = act_obj.read(cr, uid, [id], context=context)[0]
group_ids = set([proc.group_id.id for proc in self.browse(cr, uid, ids, context=context) if proc.group_id])
result['domain'] = "[('group_id','in',[" + ','.join(map(str, list(group_ids))) + "])]"
return result
def run_scheduler(self, cr, uid, use_new_cursor=False, company_id=False, context=None):
'''
Call the scheduler in order to check the running procurements (super method), to check the minimum stock rules
and the availability of moves. This function is intended to be run for all the companies at the same time, so
we run functions as SUPERUSER to avoid intercompanies and access rights issues.
@param self: The object pointer
@param cr: The current row, from the database cursor,
@param uid: The current user ID for security checks
@param ids: List of selected IDs
@param use_new_cursor: if set, use a dedicated cursor and auto-commit after processing each procurement.
This is appropriate for batch jobs only.
@param context: A standard dictionary for contextual values
@return: Dictionary of values
'''
super(procurement_order, self).run_scheduler(cr, uid, use_new_cursor=use_new_cursor, company_id=company_id, context=context)
if context is None:
context = {}
try:
if use_new_cursor:
cr = openerp.registry(cr.dbname).cursor()
move_obj = self.pool.get('stock.move')
#Minimum stock rules
self._procure_orderpoint_confirm(cr, SUPERUSER_ID, use_new_cursor=use_new_cursor, company_id=company_id, context=context)
#Search all confirmed stock_moves and try to assign them
confirmed_ids = move_obj.search(cr, uid, [('state', '=', 'confirmed')], limit=None, order='priority desc, date_expected asc', context=context)
for x in xrange(0, len(confirmed_ids), 100):
move_obj.action_assign(cr, uid, confirmed_ids[x:x + 100], context=context)
if use_new_cursor:
cr.commit()
if use_new_cursor:
cr.commit()
finally:
if use_new_cursor:
try:
cr.close()
except Exception:
pass
return {}
def _get_orderpoint_date_planned(self, cr, uid, orderpoint, start_date, context=None):
days = orderpoint.lead_days or 0.0
if orderpoint.lead_type=='purchase':
# These days will be substracted when creating the PO
days += orderpoint.product_id._select_seller(orderpoint.product_id).delay or 0.0
date_planned = start_date + relativedelta(days=days)
return date_planned.strftime(DEFAULT_SERVER_DATE_FORMAT)
def _prepare_orderpoint_procurement(self, cr, uid, orderpoint, product_qty, context=None):
return {
'name': orderpoint.name,
'date_planned': self._get_orderpoint_date_planned(cr, uid, orderpoint, datetime.today(), context=context),
'product_id': orderpoint.product_id.id,
'product_qty': product_qty,
'company_id': orderpoint.company_id.id,
'product_uom': orderpoint.product_uom.id,
'location_id': orderpoint.location_id.id,
'origin': orderpoint.name,
'warehouse_id': orderpoint.warehouse_id.id,
'orderpoint_id': orderpoint.id,
'group_id': orderpoint.group_id.id,
}
def _procure_orderpoint_confirm(self, cr, uid, use_new_cursor=False, company_id=False, context=None):
'''
Create procurement based on Orderpoint
:param bool use_new_cursor: if set, use a dedicated cursor and auto-commit after processing each procurement.
This is appropriate for batch jobs only.
'''
if context is None:
context = {}
if use_new_cursor:
cr = openerp.registry(cr.dbname).cursor()
orderpoint_obj = self.pool.get('stock.warehouse.orderpoint')
procurement_obj = self.pool.get('procurement.order')
product_obj = self.pool.get('product.product')
dom = company_id and [('company_id', '=', company_id)] or []
orderpoint_ids = orderpoint_obj.search(cr, uid, dom, order="location_id")
prev_ids = []
tot_procs = []
while orderpoint_ids:
ids = orderpoint_ids[:1000]
del orderpoint_ids[:1000]
product_dict = {}
ops_dict = {}
ops = orderpoint_obj.browse(cr, uid, ids, context=context)
#Calculate groups that can be executed together
for op in ops:
key = (op.location_id.id,)
if not product_dict.get(key):
product_dict[key] = [op.product_id]
ops_dict[key] = [op]
else:
product_dict[key] += [op.product_id]
ops_dict[key] += [op]
for key in product_dict.keys():
ctx = context.copy()
ctx.update({'location': ops_dict[key][0].location_id.id})
prod_qty = product_obj._product_available(cr, uid, [x.id for x in product_dict[key]],
context=ctx)
subtract_qty = orderpoint_obj.subtract_procurements_from_orderpoints(cr, uid, [x.id for x in ops_dict[key]], context=context)
for op in ops_dict[key]:
try:
prods = prod_qty[op.product_id.id]['virtual_available']
if prods is None:
continue
if float_compare(prods, op.product_min_qty, precision_rounding=op.product_uom.rounding) <= 0:
qty = max(op.product_min_qty, op.product_max_qty) - prods
reste = op.qty_multiple > 0 and qty % op.qty_multiple or 0.0
if float_compare(reste, 0.0, precision_rounding=op.product_uom.rounding) > 0:
qty += op.qty_multiple - reste
if float_compare(qty, 0.0, precision_rounding=op.product_uom.rounding) < 0:
continue
qty -= subtract_qty[op.id]
qty_rounded = float_round(qty, precision_rounding=op.product_uom.rounding)
if qty_rounded > 0:
proc_id = procurement_obj.create(cr, uid,
self._prepare_orderpoint_procurement(cr, uid, op, qty_rounded, context=context),
context=dict(context, procurement_autorun_defer=True))
tot_procs.append(proc_id)
if use_new_cursor:
cr.commit()
except OperationalError:
if use_new_cursor:
orderpoint_ids.append(op.id)
cr.rollback()
continue
else:
raise
try:
tot_procs.reverse()
self.run(cr, uid, tot_procs, context=context)
tot_procs = []
if use_new_cursor:
cr.commit()
except OperationalError:
if use_new_cursor:
cr.rollback()
continue
else:
raise
if use_new_cursor:
cr.commit()
if prev_ids == ids:
break
else:
prev_ids = ids
if use_new_cursor:
cr.commit()
cr.close()
return {}
| gpl-3.0 |
andreabravetti/xhtml2pdf | xhtml2pdf/xhtml2pdf_reportlab.py | 10 | 32229 | # -*- coding: utf-8 -*-
# Copyright 2010 Dirk Holtwick, holtwick.it
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from hashlib import md5
from reportlab.lib.enums import TA_RIGHT
from reportlab.lib.styles import ParagraphStyle
from reportlab.lib.utils import flatten, open_for_read, getStringIO, \
LazyImageReader, haveImages
from reportlab.platypus.doctemplate import BaseDocTemplate, PageTemplate, IndexingFlowable
from reportlab.platypus.flowables import Flowable, CondPageBreak, \
KeepInFrame, ParagraphAndImage
from reportlab.platypus.tableofcontents import TableOfContents
from reportlab.platypus.tables import Table, TableStyle
from xhtml2pdf.reportlab_paragraph import Paragraph
from xhtml2pdf.util import getUID, getBorderStyle
from types import StringType, TupleType, ListType, IntType
import StringIO
import cgi
import copy
import logging
import reportlab.pdfbase.pdfform as pdfform
import sys
try:
import PIL.Image as PILImage
except:
try:
import Image as PILImage
except:
PILImage = None
log = logging.getLogger("xhtml2pdf")
MAX_IMAGE_RATIO = 0.95
class PTCycle(list):
def __init__(self):
self._restart = 0
self._idx = 0
list.__init__(self)
def cyclicIterator(self):
while 1:
yield self[self._idx]
self._idx += 1
if self._idx >= len(self):
self._idx = self._restart
class PmlMaxHeightMixIn:
def setMaxHeight(self, availHeight):
self.availHeightValue = availHeight
if availHeight < 70000:
if hasattr(self, "canv"):
if not hasattr(self.canv, "maxAvailHeightValue"):
self.canv.maxAvailHeightValue = 0
self.availHeightValue = self.canv.maxAvailHeightValue = max(
availHeight,
self.canv.maxAvailHeightValue)
else:
self.availHeightValue = availHeight
if not hasattr(self, "availHeightValue"):
self.availHeightValue = 0
return self.availHeightValue
def getMaxHeight(self):
if not hasattr(self, "availHeightValue"):
return 0
return self.availHeightValue
class PmlBaseDoc(BaseDocTemplate):
"""
We use our own document template to get access to the canvas
and set some informations once.
"""
def beforePage(self):
# Tricky way to set producer, because of not real privateness in Python
info = "pisa HTML to PDF <http://www.htmltopdf.org>"
self.canv._doc.info.producer = info
'''
# Convert to ASCII because there is a Bug in Reportlab not
# supporting other than ASCII. Send to list on 23.1.2007
author = toString(self.pml_data.get("author", "")).encode("ascii","ignore")
subject = toString(self.pml_data.get("subject", "")).encode("ascii","ignore")
title = toString(self.pml_data.get("title", "")).encode("ascii","ignore")
# print repr((author,title,subject))
self.canv.setAuthor(author)
self.canv.setSubject(subject)
self.canv.setTitle(title)
if self.pml_data.get("fullscreen", 0):
self.canv.showFullScreen0()
if self.pml_data.get("showoutline", 0):
self.canv.showOutline()
if self.pml_data.get("duration", None) is not None:
self.canv.setPageDuration(self.pml_data["duration"])
'''
def afterFlowable(self, flowable):
# Does the flowable contain fragments?
if getattr(flowable, "outline", False):
self.notify('TOCEntry', (
flowable.outlineLevel,
cgi.escape(copy.deepcopy(flowable.text), 1),
self.page))
def handle_nextPageTemplate(self, pt):
'''
if pt has also templates for even and odd page convert it to list
'''
has_left_template = self._has_template_for_name(pt + '_left')
has_right_template = self._has_template_for_name(pt + '_right')
if has_left_template and has_right_template:
pt = [pt + '_left', pt + '_right']
'''On endPage change to the page template with name or index pt'''
if type(pt) is StringType:
if hasattr(self, '_nextPageTemplateCycle'):
del self._nextPageTemplateCycle
for t in self.pageTemplates:
if t.id == pt:
self._nextPageTemplateIndex = self.pageTemplates.index(t)
return
raise ValueError("can't find template('%s')" % pt)
elif type(pt) is IntType:
if hasattr(self, '_nextPageTemplateCycle'):
del self._nextPageTemplateCycle
self._nextPageTemplateIndex = pt
elif type(pt) in (ListType, TupleType):
#used for alternating left/right pages
#collect the refs to the template objects, complain if any are bad
c = PTCycle()
for ptn in pt:
#special case name used to short circuit the iteration
if ptn == '*':
c._restart = len(c)
continue
for t in self.pageTemplates:
if t.id == ptn.strip():
c.append(t)
break
if not c:
raise ValueError("No valid page templates in cycle")
elif c._restart > len(c):
raise ValueError("Invalid cycle restart position")
#ensure we start on the first one$
self._nextPageTemplateCycle = c.cyclicIterator()
else:
raise TypeError("Argument pt should be string or integer or list")
def _has_template_for_name(self, name):
for template in self.pageTemplates:
if template.id == name.strip():
return True
return False
class PmlPageTemplate(PageTemplate):
PORTRAIT = 'portrait'
LANDSCAPE = 'landscape'
# by default portrait
pageorientation = PORTRAIT
def __init__(self, **kw):
self.pisaStaticList = []
self.pisaBackgroundList = []
self.pisaBackground = None
PageTemplate.__init__(self, **kw)
self._page_count = 0
self._first_flow = True
def isFirstFlow(self, canvas):
if self._first_flow:
if canvas.getPageNumber() <= self._page_count:
self._first_flow = False
else:
self._page_count = canvas.getPageNumber()
return self._first_flow
def isPortrait(self):
return self.pageorientation == self.PORTRAIT
def isLandscape(self):
return self.pageorientation == self.LANDSCAPE
def beforeDrawPage(self, canvas, doc):
canvas.saveState()
try:
# Background
pisaBackground = None
if (self.isFirstFlow(canvas)
and hasattr(self, "pisaBackground")
and self.pisaBackground
and (not self.pisaBackground.notFound())):
# Is image not PDF
if self.pisaBackground.mimetype.startswith("image/"):
try:
img = PmlImageReader(StringIO.StringIO(self.pisaBackground.getData()))
iw, ih = img.getSize()
pw, ph = canvas._pagesize
width = pw # min(iw, pw) # max
wfactor = float(width) / iw
height = ph # min(ih, ph) # max
hfactor = float(height) / ih
factor_min = min(wfactor, hfactor)
if self.isPortrait():
w = iw * factor_min
h = ih * factor_min
canvas.drawImage(img, 0, ph - h, w, h)
elif self.isLandscape():
factor_max = max(wfactor, hfactor)
w = ih * factor_max
h = iw * factor_min
canvas.drawImage(img, 0, 0, w, h)
except:
log.exception("Draw background")
# PDF!
else:
pisaBackground = self.pisaBackground
if pisaBackground:
self.pisaBackgroundList.append(pisaBackground)
def pageNumbering(objList):
for obj in flatten(objList):
if isinstance(obj, PmlParagraph):
for frag in obj.frags:
if frag.pageNumber:
frag.text = str(pagenumber)
elif frag.pageCount:
frag.text = str(self._page_count)
elif isinstance(obj, PmlTable):
# Flatten the cells ([[1,2], [3,4]] becomes [1,2,3,4])
flat_cells = [item for sublist in obj._cellvalues for item in sublist]
pageNumbering(flat_cells)
try:
# Paint static frames
pagenumber = canvas.getPageNumber()
for frame in self.pisaStaticList:
frame = copy.deepcopy(frame)
story = frame.pisaStaticStory
pageNumbering(story)
frame.addFromList(story, canvas)
except Exception: # TODO: Kill this!
log.debug("PmlPageTemplate", exc_info=1)
finally:
canvas.restoreState()
_ctr = 1
class PmlImageReader(object): # TODO We need a factory here, returning either a class for java or a class for PIL
"""
Wraps up either PIL or Java to get data from bitmaps
"""
_cache = {}
def __init__(self, fileName):
if isinstance(fileName, PmlImageReader):
self.__dict__ = fileName.__dict__ # borgize
return
#start wih lots of null private fields, to be populated by
#the relevant engine.
self.fileName = fileName
self._image = None
self._width = None
self._height = None
self._transparent = None
self._data = None
imageReaderFlags = 0
if PILImage and isinstance(fileName, PILImage.Image):
self._image = fileName
self.fp = getattr(fileName, 'fp', None)
try:
self.fileName = self._image.fileName
except AttributeError:
self.fileName = 'PILIMAGE_%d' % id(self)
else:
try:
self.fp = open_for_read(fileName, 'b')
if isinstance(self.fp, StringIO.StringIO().__class__):
imageReaderFlags = 0 # avoid messing with already internal files
if imageReaderFlags > 0: # interning
data = self.fp.read()
if imageReaderFlags & 2: # autoclose
try:
self.fp.close()
except:
pass
if imageReaderFlags & 4: # cache the data
if not self._cache:
from rl_config import register_reset
register_reset(self._cache.clear)
data = self._cache.setdefault(md5(data).digest(), data)
self.fp = getStringIO(data)
elif imageReaderFlags == - 1 and isinstance(fileName, (str, unicode)):
#try Ralf Schmitt's re-opening technique of avoiding too many open files
self.fp.close()
del self.fp # will become a property in the next statement
self.__class__ = LazyImageReader
if haveImages:
#detect which library we are using and open the image
if not self._image:
self._image = self._read_image(self.fp)
if getattr(self._image, 'format', None) == 'JPEG':
self.jpeg_fh = self._jpeg_fh
else:
from reportlab.pdfbase.pdfutils import readJPEGInfo
try:
self._width, self._height, c = readJPEGInfo(self.fp)
except:
raise RuntimeError('Imaging Library not available, unable to import bitmaps only jpegs')
self.jpeg_fh = self._jpeg_fh
self._data = self.fp.read()
self._dataA = None
self.fp.seek(0)
except: # TODO: Kill the catch-all
et, ev, tb = sys.exc_info()
if hasattr(ev, 'args'):
a = str(ev.args[- 1]) + (' fileName=%r' % fileName)
ev.args = ev.args[: - 1] + (a,)
raise et, ev, tb
else:
raise
def _read_image(self, fp):
if sys.platform[0:4] == 'java':
from javax.imageio import ImageIO
from java.io import ByteArrayInputStream
input_stream = ByteArrayInputStream(fp.read())
return ImageIO.read(input_stream)
elif PILImage:
return PILImage.open(fp)
def _jpeg_fh(self):
fp = self.fp
fp.seek(0)
return fp
def jpeg_fh(self):
return None
def getSize(self):
if self._width is None or self._height is None:
if sys.platform[0:4] == 'java':
self._width = self._image.getWidth()
self._height = self._image.getHeight()
else:
self._width, self._height = self._image.size
return self._width, self._height
def getRGBData(self):
"Return byte array of RGB data as string"
if self._data is None:
self._dataA = None
if sys.platform[0:4] == 'java':
import jarray # TODO: Move to top.
from java.awt.image import PixelGrabber
width, height = self.getSize()
buffer = jarray.zeros(width * height, 'i')
pg = PixelGrabber(self._image, 0, 0, width, height, buffer, 0, width)
pg.grabPixels()
# there must be a way to do this with a cast not a byte-level loop,
# I just haven't found it yet...
pixels = []
a = pixels.append
for rgb in buffer:
a(chr((rgb >> 16) & 0xff))
a(chr((rgb >> 8) & 0xff))
a(chr(rgb & 0xff))
self._data = ''.join(pixels)
self.mode = 'RGB'
else:
im = self._image
mode = self.mode = im.mode
if mode == 'RGBA':
im.load()
self._dataA = PmlImageReader(im.split()[3])
im = im.convert('RGB')
self.mode = 'RGB'
elif mode not in ('L', 'RGB', 'CMYK'):
im = im.convert('RGB')
self.mode = 'RGB'
self._data = im.tostring()
return self._data
def getImageData(self):
width, height = self.getSize()
return width, height, self.getRGBData()
def getTransparent(self):
if sys.platform[0:4] == 'java':
return None
elif "transparency" in self._image.info:
transparency = self._image.info["transparency"] * 3
palette = self._image.palette
if hasattr(palette, 'palette'):
palette = palette.palette
elif hasattr(palette, 'data'):
palette = palette.data
else:
return None
# 8-bit PNGs could give an empty string as transparency value, so
# we have to be careful here.
try:
return map(ord, palette[transparency:transparency + 3])
except:
return None
else:
return None
def __str__(self):
try:
fn = self.fileName.read()
if not fn:
fn = id(self)
return "PmlImageObject_%s" % hash(fn)
except:
fn = self.fileName
if not fn:
fn = id(self)
return fn
class PmlImage(Flowable, PmlMaxHeightMixIn):
def __init__(self, data, width=None, height=None, mask="auto", mimetype=None, **kw):
self.kw = kw
self.hAlign = 'CENTER'
self._mask = mask
self._imgdata = data
# print "###", repr(data)
self.mimetype = mimetype
img = self.getImage()
if img:
self.imageWidth, self.imageHeight = img.getSize()
self.drawWidth = width or self.imageWidth
self.drawHeight = height or self.imageHeight
def wrap(self, availWidth, availHeight):
" This can be called more than once! Do not overwrite important data like drawWidth "
availHeight = self.setMaxHeight(availHeight)
# print "image wrap", id(self), availWidth, availHeight, self.drawWidth, self.drawHeight
width = min(self.drawWidth, availWidth)
wfactor = float(width) / self.drawWidth
height = min(self.drawHeight, availHeight * MAX_IMAGE_RATIO)
hfactor = float(height) / self.drawHeight
factor = min(wfactor, hfactor)
self.dWidth = self.drawWidth * factor
self.dHeight = self.drawHeight * factor
# print "imgage result", factor, self.dWidth, self.dHeight
return self.dWidth, self.dHeight
def getImage(self):
img = PmlImageReader(StringIO.StringIO(self._imgdata))
return img
def draw(self):
img = self.getImage()
self.canv.drawImage(
img,
0, 0,
self.dWidth,
self.dHeight,
mask=self._mask)
def identity(self, maxLen=None):
r = Flowable.identity(self, maxLen)
return r
class PmlParagraphAndImage(ParagraphAndImage, PmlMaxHeightMixIn):
def wrap(self, availWidth, availHeight):
self.I.canv = self.canv
result = ParagraphAndImage.wrap(self, availWidth, availHeight)
del self.I.canv
return result
def split(self, availWidth, availHeight):
# print "# split", id(self)
if not hasattr(self, "wI"):
self.wI, self.hI = self.I.wrap(availWidth, availHeight) # drawWidth, self.I.drawHeight
return ParagraphAndImage.split(self, availWidth, availHeight)
class PmlParagraph(Paragraph, PmlMaxHeightMixIn):
def _calcImageMaxSizes(self, availWidth, availHeight):
self.hasImages = False
availHeight = self.getMaxHeight()
for frag in self.frags:
if hasattr(frag, "cbDefn") and frag.cbDefn.kind == "img":
img = frag.cbDefn
if img.width > 0 and img.height > 0:
self.hasImages = True
width = min(img.width, availWidth)
wfactor = float(width) / img.width
height = min(img.height, availHeight * MAX_IMAGE_RATIO) # XXX 99% because 100% do not work...
hfactor = float(height) / img.height
factor = min(wfactor, hfactor)
img.height *= factor
img.width *= factor
def wrap(self, availWidth, availHeight):
availHeight = self.setMaxHeight(availHeight)
style = self.style
self.deltaWidth = style.paddingLeft + style.paddingRight + style.borderLeftWidth + style.borderRightWidth
self.deltaHeight = style.paddingTop + style.paddingBottom + style.borderTopWidth + style.borderBottomWidth
# reduce the available width & height by the padding so the wrapping
# will use the correct size
availWidth -= self.deltaWidth
availHeight -= self.deltaHeight
# Modify maxium image sizes
self._calcImageMaxSizes(availWidth, self.getMaxHeight() - self.deltaHeight)
# call the base class to do wrapping and calculate the size
Paragraph.wrap(self, availWidth, availHeight)
#self.height = max(1, self.height)
#self.width = max(1, self.width)
# increase the calculated size by the padding
self.width = self.width + self.deltaWidth
self.height = self.height + self.deltaHeight
return self.width, self.height
def split(self, availWidth, availHeight):
if len(self.frags) <= 0:
return []
#the split information is all inside self.blPara
if not hasattr(self, 'deltaWidth'):
self.wrap(availWidth, availHeight)
availWidth -= self.deltaWidth
availHeight -= self.deltaHeight
return Paragraph.split(self, availWidth, availHeight)
def draw(self):
# Create outline
if getattr(self, "outline", False):
# Check level and add all levels
last = getattr(self.canv, "outlineLast", - 1) + 1
while last < self.outlineLevel:
# print "(OUTLINE", last, self.text
key = getUID()
self.canv.bookmarkPage(key)
self.canv.addOutlineEntry(
self.text,
key,
last,
not self.outlineOpen)
last += 1
self.canv.outlineLast = self.outlineLevel
key = getUID()
self.canv.bookmarkPage(key)
self.canv.addOutlineEntry(
self.text,
key,
self.outlineLevel,
not self.outlineOpen)
last += 1
# Draw the background and borders here before passing control on to
# ReportLab. This is because ReportLab can't handle the individual
# components of the border independently. This will also let us
# support more border styles eventually.
canvas = self.canv
style = self.style
bg = style.backColor
leftIndent = style.leftIndent
bp = 0 # style.borderPadding
x = leftIndent - bp
y = - bp
w = self.width - (leftIndent + style.rightIndent) + 2 * bp
h = self.height + 2 * bp
if bg:
# draw a filled rectangle (with no stroke) using bg color
canvas.saveState()
canvas.setFillColor(bg)
canvas.rect(x, y, w, h, fill=1, stroke=0)
canvas.restoreState()
# we need to hide the bg color (if any) so Paragraph won't try to draw it again
style.backColor = None
# offset the origin to compensate for the padding
canvas.saveState()
canvas.translate(
(style.paddingLeft + style.borderLeftWidth),
-1 * (style.paddingTop + style.borderTopWidth)) # + (style.leading / 4)))
# Call the base class draw method to finish up
Paragraph.draw(self)
canvas.restoreState()
# Reset color because we need it again if we run 2-PASS like we
# do when using TOC
style.backColor = bg
canvas.saveState()
def _drawBorderLine(bstyle, width, color, x1, y1, x2, y2):
# We need width and border style to be able to draw a border
if width and getBorderStyle(bstyle):
# If no color for border is given, the text color is used (like defined by W3C)
if color is None:
color = style.textColor
# print "Border", bstyle, width, color
if color is not None:
canvas.setStrokeColor(color)
canvas.setLineWidth(width)
canvas.line(x1, y1, x2, y2)
_drawBorderLine(style.borderLeftStyle,
style.borderLeftWidth,
style.borderLeftColor,
x, y, x, y + h)
_drawBorderLine(style.borderRightStyle,
style.borderRightWidth,
style.borderRightColor,
x + w, y, x + w, y + h)
_drawBorderLine(style.borderTopStyle,
style.borderTopWidth,
style.borderTopColor,
x, y + h, x + w, y + h)
_drawBorderLine(style.borderBottomStyle,
style.borderBottomWidth,
style.borderBottomColor,
x, y, x + w, y)
canvas.restoreState()
class PmlKeepInFrame(KeepInFrame, PmlMaxHeightMixIn):
def wrap(self, availWidth, availHeight):
availWidth = max(availWidth, 1.0)
self.maxWidth = availWidth
self.maxHeight = self.setMaxHeight(availHeight)
return KeepInFrame.wrap(self, availWidth, availHeight)
class PmlTable(Table, PmlMaxHeightMixIn):
def _normWidth(self, w, maxw):
"""
Helper for calculating percentages
"""
if type(w) == type(""):
w = ((maxw / 100.0) * float(w[: - 1]))
elif (w is None) or (w == "*"):
w = maxw
return min(w, maxw)
def _listCellGeom(self, V, w, s, W=None, H=None, aH=72000):
# print "#", self.availHeightValue
if aH == 72000:
aH = self.getMaxHeight() or aH
return Table._listCellGeom(self, V, w, s, W=W, H=H, aH=aH)
def wrap(self, availWidth, availHeight):
self.setMaxHeight(availHeight)
# Strange bug, sometime the totalWidth is not set !?
try:
self.totalWidth
except:
self.totalWidth = availWidth
# Prepare values
totalWidth = self._normWidth(self.totalWidth, availWidth)
remainingWidth = totalWidth
remainingCols = 0
newColWidths = self._colWidths
# Calculate widths that are fix
# IMPORTANT!!! We can not substitute the private value
# self._colWidths therefore we have to modify list in place
for i, colWidth in enumerate(newColWidths):
if (colWidth is not None) or (colWidth == '*'):
colWidth = self._normWidth(colWidth, totalWidth)
remainingWidth -= colWidth
else:
remainingCols += 1
colWidth = None
newColWidths[i] = colWidth
# Distribute remaining space
minCellWidth = totalWidth * 0.01
if remainingCols > 0:
for i, colWidth in enumerate(newColWidths):
if colWidth is None:
newColWidths[i] = max(minCellWidth, remainingWidth / remainingCols) # - 0.1
# Bigger than totalWidth? Lets reduce the fix entries propotionally
if sum(newColWidths) > totalWidth:
quotient = totalWidth / sum(newColWidths)
for i in range(len(newColWidths)):
newColWidths[i] = newColWidths[i] * quotient
# To avoid rounding errors adjust one col with the difference
diff = sum(newColWidths) - totalWidth
if diff > 0:
newColWidths[0] -= diff
return Table.wrap(self, availWidth, availHeight)
class PmlPageCount(IndexingFlowable):
def __init__(self):
IndexingFlowable.__init__(self)
self.second_round = False
def isSatisfied(self):
s = self.second_round
self.second_round = True
return s
def drawOn(self, canvas, x, y, _sW=0):
pass
class PmlTableOfContents(TableOfContents):
def wrap(self, availWidth, availHeight):
"""
All table properties should be known by now.
"""
widths = (availWidth - self.rightColumnWidth,
self.rightColumnWidth)
# makes an internal table which does all the work.
# we draw the LAST RUN's entries! If there are
# none, we make some dummy data to keep the table
# from complaining
if len(self._lastEntries) == 0:
_tempEntries = [(0, 'Placeholder for table of contents', 0)]
else:
_tempEntries = self._lastEntries
lastMargin = 0
tableData = []
tableStyle = [
('VALIGN', (0, 0), (- 1, - 1), 'TOP'),
('LEFTPADDING', (0, 0), (- 1, - 1), 0),
('RIGHTPADDING', (0, 0), (- 1, - 1), 0),
('TOPPADDING', (0, 0), (- 1, - 1), 0),
('BOTTOMPADDING', (0, 0), (- 1, - 1), 0),
]
for i, entry in enumerate(_tempEntries):
level, text, pageNum = entry[:3]
leftColStyle = self.levelStyles[level]
if i: # Not for first element
tableStyle.append((
'TOPPADDING',
(0, i), (- 1, i),
max(lastMargin, leftColStyle.spaceBefore)))
# print leftColStyle.leftIndent
lastMargin = leftColStyle.spaceAfter
#right col style is right aligned
rightColStyle = ParagraphStyle(name='leftColLevel%d' % level,
parent=leftColStyle,
leftIndent=0,
alignment=TA_RIGHT)
leftPara = Paragraph(text, leftColStyle)
rightPara = Paragraph(str(pageNum), rightColStyle)
tableData.append([leftPara, rightPara])
self._table = Table(
tableData,
colWidths=widths,
style=TableStyle(tableStyle))
self.width, self.height = self._table.wrapOn(self.canv, availWidth, availHeight)
return self.width, self.height
class PmlRightPageBreak(CondPageBreak):
def __init__(self):
pass
def wrap(self, availWidth, availHeight):
if not self.canv.getPageNumber() % 2:
self.width = availWidth
self.height = availHeight
return availWidth, availHeight
self.width = self.height = 0
return 0, 0
class PmlLeftPageBreak(CondPageBreak):
def __init__(self):
pass
def wrap(self, availWidth, availHeight):
if self.canv.getPageNumber() % 2:
self.width = availWidth
self.height = availHeight
return availWidth, availHeight
self.width = self.height = 0
return 0, 0
# --- Pdf Form
class PmlInput(Flowable):
def __init__(self, name, type="text", width=10, height=10, default="", options=[]):
self.width = width
self.height = height
self.type = type
self.name = name
self.default = default
self.options = options
def wrap(self, *args):
return self.width, self.height
def draw(self):
c = self.canv
c.saveState()
c.setFont("Helvetica", 10)
if self.type == "text":
pdfform.textFieldRelative(c, self.name, 0, 0, self.width, self.height)
c.rect(0, 0, self.width, self.height)
elif self.type == "radio":
c.rect(0, 0, self.width, self.height)
elif self.type == "checkbox":
if self.default:
pdfform.buttonFieldRelative(c, self.name, "Yes", 0, 0)
else:
pdfform.buttonFieldRelative(c, self.name, "Off", 0, 0)
c.rect(0, 0, self.width, self.height)
elif self.type == "select":
pdfform.selectFieldRelative(c, self.name, self.default, self.options, 0, 0, self.width, self.height)
c.rect(0, 0, self.width, self.height)
c.restoreState()
| apache-2.0 |
MichaelDrogalis/ansible | lib/ansible/plugins/action/__init__.py | 12 | 25200 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from six.moves import StringIO
import base64
import json
import os
import random
import stat
import sys
import tempfile
import time
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.executor.module_common import modify_module
from ansible.parsing.utils.jsonify import jsonify
from ansible.utils.unicode import to_bytes
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class ActionBase:
'''
This class is the base class for all action plugins, and defines
code common to all actions. The base class handles the connection
by putting/getting files and executing commands based on the current
action in use.
'''
def __init__(self, task, connection, play_context, loader, templar, shared_loader_obj):
self._task = task
self._connection = connection
self._play_context = play_context
self._loader = loader
self._templar = templar
self._shared_loader_obj = shared_loader_obj
self._display = display
self._supports_check_mode = True
def _configure_module(self, module_name, module_args, task_vars=dict()):
'''
Handles the loading and templating of the module code through the
modify_module() function.
'''
# Search module path(s) for named module.
module_suffixes = getattr(self._connection, 'default_suffixes', None)
# Check to determine if PowerShell modules are supported, and apply
# some fixes (hacks) to module name + args.
if module_suffixes and '.ps1' in module_suffixes:
# Use Windows versions of stat/file/copy modules when called from
# within other action plugins.
if module_name in ('stat', 'file', 'copy') and self._task.action != module_name:
module_name = 'win_%s' % module_name
# Remove extra quotes surrounding path parameters before sending to module.
if module_name in ('win_stat', 'win_file', 'win_copy', 'slurp') and module_args and hasattr(self._connection._shell, '_unquote'):
for key in ('src', 'dest', 'path'):
if key in module_args:
module_args[key] = self._connection._shell._unquote(module_args[key])
module_path = self._shared_loader_obj.module_loader.find_plugin(module_name, module_suffixes)
if module_path is None:
# Use Windows version of ping module to check module paths when
# using a connection that supports .ps1 suffixes.
if module_suffixes and '.ps1' in module_suffixes:
ping_module = 'win_ping'
else:
ping_module = 'ping'
module_path2 = self._shared_loader_obj.module_loader.find_plugin(ping_module, module_suffixes)
if module_path2 is not None:
raise AnsibleError("The module %s was not found in configured module paths" % (module_name))
else:
raise AnsibleError("The module %s was not found in configured module paths. " \
"Additionally, core modules are missing. If this is a checkout, " \
"run 'git submodule update --init --recursive' to correct this problem." % (module_name))
# insert shared code and arguments into the module
(module_data, module_style, module_shebang) = modify_module(module_path, module_args, task_vars=task_vars)
return (module_style, module_shebang, module_data)
def _compute_environment_string(self):
'''
Builds the environment string to be used when executing the remote task.
'''
final_environment = dict()
if self._task.environment is not None:
environments = self._task.environment
if not isinstance(environments, list):
environments = [ environments ]
for environment in environments:
if not isinstance(environment, dict):
raise AnsibleError("environment must be a dictionary, received %s (%s)" % (environment, type(environment)))
# very deliberatly using update here instead of combine_vars, as
# these environment settings should not need to merge sub-dicts
final_environment.update(environment)
return self._connection._shell.env_prefix(**final_environment)
def _early_needs_tmp_path(self):
'''
Determines if a temp path should be created before the action is executed.
'''
# FIXME: modified from original, needs testing? Since this is now inside
# the action plugin, it should make it just this simple
return getattr(self, 'TRANSFERS_FILES', False)
def _late_needs_tmp_path(self, tmp, module_style):
'''
Determines if a temp path is required after some early actions have already taken place.
'''
if tmp and "tmp" in tmp:
# tmp has already been created
return False
if not self._connection.has_pipelining or not C.ANSIBLE_SSH_PIPELINING or C.DEFAULT_KEEP_REMOTE_FILES or self._play_context.become:
# tmp is necessary to store the module source code
# or we want to keep the files on the target system
return True
if module_style != "new":
# even when conn has pipelining, old style modules need tmp to store arguments
return True
return False
# FIXME: return a datastructure in this function instead of raising errors -
# the new executor pipeline handles it much better that way
def _make_tmp_path(self):
'''
Create and return a temporary path on a remote box.
'''
basefile = 'ansible-tmp-%s-%s' % (time.time(), random.randint(0, 2**48))
use_system_tmp = False
if self._play_context.become and self._play_context.become_user != 'root':
use_system_tmp = True
tmp_mode = None
if self._play_context.remote_user != 'root' or self._play_context.become and self._play_context.become_user != 'root':
tmp_mode = 'a+rx'
cmd = self._connection._shell.mkdtemp(basefile, use_system_tmp, tmp_mode)
self._display.debug("executing _low_level_execute_command to create the tmp path")
result = self._low_level_execute_command(cmd, None, sudoable=False)
self._display.debug("done with creation of tmp path")
# error handling on this seems a little aggressive?
if result['rc'] != 0:
if result['rc'] == 5:
output = 'Authentication failure.'
elif result['rc'] == 255 and self._connection.transport in ('ssh',):
if self._play_context.verbosity > 3:
output = 'SSH encountered an unknown error. The output was:\n%s' % (result['stdout']+result['stderr'])
else:
output = 'SSH encountered an unknown error during the connection. We recommend you re-run the command using -vvvv, which will enable SSH debugging output to help diagnose the issue'
elif 'No space left on device' in result['stderr']:
output = result['stderr']
else:
output = 'Authentication or permission failure. In some cases, you may have been able to authenticate and did not have permissions on the remote directory. Consider changing the remote temp path in ansible.cfg to a path rooted in "/tmp". Failed command was: %s, exited with result %d' % (cmd, result['rc'])
if 'stdout' in result and result['stdout'] != '':
output = output + ": %s" % result['stdout']
raise AnsibleError(output)
# FIXME: do we still need to do this?
#rc = self._connection._shell.join_path(utils.last_non_blank_line(result['stdout']).strip(), '')
rc = self._connection._shell.join_path(result['stdout'].strip(), '').splitlines()[-1]
# Catch failure conditions, files should never be
# written to locations in /.
if rc == '/':
raise AnsibleError('failed to resolve remote temporary directory from %s: `%s` returned empty string' % (basefile, cmd))
return rc
def _remove_tmp_path(self, tmp_path):
'''Remove a temporary path we created. '''
if tmp_path and "-tmp-" in tmp_path:
cmd = self._connection._shell.remove(tmp_path, recurse=True)
# If we have gotten here we have a working ssh configuration.
# If ssh breaks we could leave tmp directories out on the remote system.
self._display.debug("calling _low_level_execute_command to remove the tmp path")
self._low_level_execute_command(cmd, None, sudoable=False)
self._display.debug("done removing the tmp path")
def _transfer_data(self, remote_path, data):
'''
Copies the module data out to the temporary module path.
'''
if isinstance(data, dict):
data = jsonify(data)
afd, afile = tempfile.mkstemp()
afo = os.fdopen(afd, 'w')
try:
data = to_bytes(data, errors='strict')
afo.write(data)
except Exception as e:
#raise AnsibleError("failure encoding into utf-8: %s" % str(e))
raise AnsibleError("failure writing module data to temporary file for transfer: %s" % str(e))
afo.flush()
afo.close()
try:
self._connection.put_file(afile, remote_path)
finally:
os.unlink(afile)
return remote_path
def _remote_chmod(self, tmp, mode, path, sudoable=False):
'''
Issue a remote chmod command
'''
cmd = self._connection._shell.chmod(mode, path)
self._display.debug("calling _low_level_execute_command to chmod the remote path")
res = self._low_level_execute_command(cmd, tmp, sudoable=sudoable)
self._display.debug("done with chmod call")
return res
def _remote_checksum(self, tmp, path):
'''
Takes a remote checksum and returns 1 if no file
'''
# FIXME: figure out how this will work, probably pulled from the variable manager data
#python_interp = inject['hostvars'][inject['inventory_hostname']].get('ansible_python_interpreter', 'python')
python_interp = 'python'
cmd = self._connection._shell.checksum(path, python_interp)
self._display.debug("calling _low_level_execute_command to get the remote checksum")
data = self._low_level_execute_command(cmd, tmp, sudoable=True)
self._display.debug("done getting the remote checksum")
# FIXME: implement this function?
#data2 = utils.last_non_blank_line(data['stdout'])
try:
data2 = data['stdout'].strip().splitlines()[-1]
if data2 == '':
# this may happen if the connection to the remote server
# failed, so just return "INVALIDCHECKSUM" to avoid errors
return "INVALIDCHECKSUM"
else:
return data2.split()[0]
except IndexError:
# FIXME: this should probably not print to sys.stderr, but should instead
# fail in a more normal way?
sys.stderr.write("warning: Calculating checksum failed unusually, please report this to the list so it can be fixed\n")
sys.stderr.write("command: %s\n" % cmd)
sys.stderr.write("----\n")
sys.stderr.write("output: %s\n" % data)
sys.stderr.write("----\n")
# this will signal that it changed and allow things to keep going
return "INVALIDCHECKSUM"
def _remote_expand_user(self, path, tmp):
''' takes a remote path and performs tilde expansion on the remote host '''
if not path.startswith('~'): # FIXME: Windows paths may start with "~ instead of just ~
return path
# FIXME: Can't use os.path.sep for Windows paths.
split_path = path.split(os.path.sep, 1)
expand_path = split_path[0]
if expand_path == '~':
if self._play_context.become and self._play_context.become_user:
expand_path = '~%s' % self._play_context.become_user
cmd = self._connection._shell.expand_user(expand_path)
self._display.debug("calling _low_level_execute_command to expand the remote user path")
data = self._low_level_execute_command(cmd, tmp, sudoable=False)
self._display.debug("done expanding the remote user path")
#initial_fragment = utils.last_non_blank_line(data['stdout'])
initial_fragment = data['stdout'].strip().splitlines()[-1]
if not initial_fragment:
# Something went wrong trying to expand the path remotely. Return
# the original string
return path
if len(split_path) > 1:
return self._connection._shell.join_path(initial_fragment, *split_path[1:])
else:
return initial_fragment
def _filter_leading_non_json_lines(self, data):
'''
Used to avoid random output from SSH at the top of JSON output, like messages from
tcagetattr, or where dropbear spews MOTD on every single command (which is nuts).
need to filter anything which starts not with '{', '[', ', '=' or is an empty line.
filter only leading lines since multiline JSON is valid.
'''
filtered_lines = StringIO()
stop_filtering = False
for line in data.splitlines():
if stop_filtering or line.startswith('{') or line.startswith('['):
stop_filtering = True
filtered_lines.write(line + '\n')
return filtered_lines.getvalue()
def _execute_module(self, module_name=None, module_args=None, tmp=None, task_vars=dict(), persist_files=False, delete_remote_tmp=True):
'''
Transfer and run a module along with its arguments.
'''
# if a module name was not specified for this execution, use
# the action from the task
if module_name is None:
module_name = self._task.action
if module_args is None:
module_args = self._task.args
# set check mode in the module arguments, if required
if self._play_context.check_mode and not self._task.always_run:
if not self._supports_check_mode:
raise AnsibleError("check mode is not supported for this operation")
module_args['_ansible_check_mode'] = True
# set no log in the module arguments, if required
if self._play_context.no_log:
module_args['_ansible_no_log'] = True
self._display.debug("in _execute_module (%s, %s)" % (module_name, module_args))
(module_style, shebang, module_data) = self._configure_module(module_name=module_name, module_args=module_args, task_vars=task_vars)
if not shebang:
raise AnsibleError("module is missing interpreter line")
# a remote tmp path may be necessary and not already created
remote_module_path = None
if not tmp and self._late_needs_tmp_path(tmp, module_style):
tmp = self._make_tmp_path()
if tmp:
remote_module_path = self._connection._shell.join_path(tmp, module_name)
# FIXME: async stuff here?
#if (module_style != 'new' or async_jid is not None or not self._connection._has_pipelining or not C.ANSIBLE_SSH_PIPELINING or C.DEFAULT_KEEP_REMOTE_FILES):
if remote_module_path:
self._display.debug("transferring module to remote")
self._transfer_data(remote_module_path, module_data)
self._display.debug("done transferring module to remote")
environment_string = self._compute_environment_string()
if tmp and "tmp" in tmp and self._play_context.become and self._play_context.become_user != 'root':
# deal with possible umask issues once sudo'ed to other user
self._remote_chmod(tmp, 'a+r', remote_module_path)
cmd = ""
in_data = None
# FIXME: all of the old-module style and async stuff has been removed from here, and
# might need to be re-added (unless we decide to drop support for old-style modules
# at this point and rework things to support non-python modules specifically)
if self._connection.has_pipelining and C.ANSIBLE_SSH_PIPELINING and not C.DEFAULT_KEEP_REMOTE_FILES:
in_data = module_data
else:
if remote_module_path:
cmd = remote_module_path
rm_tmp = None
if tmp and "tmp" in tmp and not C.DEFAULT_KEEP_REMOTE_FILES and not persist_files and delete_remote_tmp:
if not self._play_context.become or self._play_context.become_user == 'root':
# not sudoing or sudoing to root, so can cleanup files in the same step
rm_tmp = tmp
cmd = self._connection._shell.build_module_command(environment_string, shebang, cmd, rm_tmp)
cmd = cmd.strip()
sudoable = True
if module_name == "accelerate":
# always run the accelerate module as the user
# specified in the play, not the sudo_user
sudoable = False
self._display.debug("calling _low_level_execute_command() for command %s" % cmd)
res = self._low_level_execute_command(cmd, tmp, sudoable=sudoable, in_data=in_data)
self._display.debug("_low_level_execute_command returned ok")
if tmp and "tmp" in tmp and not C.DEFAULT_KEEP_REMOTE_FILES and not persist_files and delete_remote_tmp:
if self._play_context.become and self._play_context.become_user != 'root':
# not sudoing to root, so maybe can't delete files as that other user
# have to clean up temp files as original user in a second step
cmd2 = self._connection._shell.remove(tmp, recurse=True)
self._low_level_execute_command(cmd2, tmp, sudoable=False)
try:
data = json.loads(self._filter_leading_non_json_lines(res.get('stdout', '')))
except ValueError:
# not valid json, lets try to capture error
data = dict(failed=True, parsed=False)
if 'stderr' in res and res['stderr'].startswith('Traceback'):
data['exception'] = res['stderr']
else:
data['msg'] = res.get('stdout', '')
if 'stderr' in res:
data['msg'] += res['stderr']
# pre-split stdout into lines, if stdout is in the data and there
# isn't already a stdout_lines value there
if 'stdout' in data and 'stdout_lines' not in data:
data['stdout_lines'] = data.get('stdout', '').splitlines()
# store the module invocation details back into the result
if self._task.async != 0:
data['invocation'] = dict(
module_args = module_args,
module_name = module_name,
)
self._display.debug("done with _execute_module (%s, %s)" % (module_name, module_args))
return data
def _low_level_execute_command(self, cmd, tmp, sudoable=True, in_data=None, executable=None):
'''
This is the function which executes the low level shell command, which
may be commands to create/remove directories for temporary files, or to
run the module code or python directly when pipelining.
'''
if executable is not None:
cmd = executable + ' -c ' + cmd
self._display.debug("in _low_level_execute_command() (%s)" % (cmd,))
if not cmd:
# this can happen with powershell modules when there is no analog to a Windows command (like chmod)
self._display.debug("no command, exiting _low_level_execute_command()")
return dict(stdout='', stderr='')
if sudoable:
cmd = self._play_context.make_become_cmd(cmd, executable=executable)
self._display.debug("executing the command %s through the connection" % cmd)
rc, stdin, stdout, stderr = self._connection.exec_command(cmd, tmp, in_data=in_data, sudoable=sudoable)
self._display.debug("command execution done")
if not isinstance(stdout, basestring):
out = ''.join(stdout.readlines())
else:
out = stdout
if not isinstance(stderr, basestring):
err = ''.join(stderr.readlines())
else:
err = stderr
self._display.debug("done with _low_level_execute_command() (%s)" % (cmd,))
if rc is None:
rc = 0
return dict(rc=rc, stdout=out, stdout_lines=out.splitlines(), stderr=err)
def _get_first_available_file(self, faf, of=None, searchdir='files'):
self._display.deprecated("first_available_file, use with_first_found or lookup('first_found',...) instead")
for fn in faf:
fn_orig = fn
fnt = self._templar.template(fn)
if self._task._role is not None:
lead = self._task._role._role_path
else:
lead = fnt
fnd = self._loader.path_dwim_relative(lead, searchdir, fnt)
if not os.path.exists(fnd) and of is not None:
if self._task._role is not None:
lead = self._task._role._role_path
else:
lead = of
fnd = self._loader.path_dwim_relative(lead, searchdir, of)
if os.path.exists(fnd):
return fnd
return None
def _get_diff_data(self, tmp, destination, source, task_vars, source_file=True):
diff = {}
self._display.debug("Going to peek to see if file has changed permissions")
peek_result = self._execute_module(module_name='file', module_args=dict(path=destination, diff_peek=True), task_vars=task_vars, persist_files=True)
if not('failed' in peek_result and peek_result['failed']) or peek_result.get('rc', 0) == 0:
if peek_result['state'] == 'absent':
diff['before'] = ''
elif peek_result['appears_binary']:
diff['dst_binary'] = 1
elif peek_result['size'] > C.MAX_FILE_SIZE_FOR_DIFF:
diff['dst_larger'] = C.MAX_FILE_SIZE_FOR_DIFF
else:
self._display.debug("Slurping the file %s" % source)
dest_result = self._execute_module(module_name='slurp', module_args=dict(path=destination), task_vars=task_vars, persist_files=True)
if 'content' in dest_result:
dest_contents = dest_result['content']
if dest_result['encoding'] == 'base64':
dest_contents = base64.b64decode(dest_contents)
else:
raise AnsibleError("unknown encoding in content option, failed: %s" % dest_result)
diff['before_header'] = destination
diff['before'] = dest_contents
if source_file:
self._display.debug("Reading local copy of the file %s" % source)
try:
src = open(source)
src_contents = src.read(8192)
st = os.stat(source)
except Exception as e:
raise AnsibleError("Unexpected error while reading source (%s) for diff: %s " % (source, str(e)))
if "\x00" in src_contents:
diff['src_binary'] = 1
elif st[stat.ST_SIZE] > C.MAX_FILE_SIZE_FOR_DIFF:
diff['src_larger'] = C.MAX_FILE_SIZE_FOR_DIFF
else:
diff['after_header'] = source
diff['after'] = src.read()
else:
self._display.debug("source of file passed in")
diff['after_header'] = 'dynamically generated'
diff['after'] = source
return diff
| gpl-3.0 |
jasonacox/SentryPi | sentrypi-service.py | 1 | 4227 | #!/usr/bin/python
#
# Raspberry Pi Home Sentry - RPI script to monitor home
#
# Version: 0.1
#
# Descripton:
# Service to watch door status and alert if door is left open too long.
# Uses motion sensor to detect motion and delay alert.
# Illuminates LED when door is open.
#
# Jason A. Cox, @jasonacox
# https://github.com/jasonacox/SentryPI
# load libraries
import os
import RPi.GPIO as io
import time
import boto3
import utils
# set GPIO mode to BCM - allows us to use GPIO number instead of pin number
io.setmode(io.BCM)
io.setwarnings(False)
#
# Config Settings
#
# Alert timeouts - send notification
ALERTTIME = 300
ALERTNOMOTION = 100
ALERTMAX = 600
# Create an SNS client for alerts
client = boto3.client(
"sns",
aws_access_key_id="--------------------",
aws_secret_access_key="----------------------------------------",
region_name="us-east-1")
SNSTopicArn = "arn:aws:sns:us-east-1:------------:SentryPiAlerts"
# Mute alerting file
MUTE_FILE = "/var/www-tmp/sentrypi-mute"
# Motion file
MOTION_FILE = "/var/www-tmp/sentrypi-movement"
# set GPIO pins to use
DOOR_PIN = 23
LED_PIN = 17
MOTION_PIN = 24
# AWS IoT Command
DoorCMD = "/bin/bash /home/pi/iot/door-pub.sh"
#
# Start
#
print "SentryPi Started\n\n"
print("Sentry Activated - Watching: GPIO %d" % DOOR_PIN)
print(" Using LED on GPIO %d" % LED_PIN)
# use the built-in "pull-up" resistor
io.setup(DOOR_PIN, io.IN, pull_up_down=io.PUD_UP) # activate input
io.setup(LED_PIN, io.OUT) # activate LED
io.setup(MOTION_PIN, io.IN)
# States for door: 0=closed, 1=open, 2=init
door = 2
watchcount = 0
alertset = False
count = 0
lastmotion = 0
# log state to AWS IoT
os.system(DoorCMD)
#
# Main Loop
#
try:
while True:
# check for motion
if (io.input(MOTION_PIN) == True):
lastmotion = watchcount
try:
with open(MOTION_FILE, 'r+') as f:
count = int(f.readline()) + 1
f.seek(0)
f.write(str(count))
except:
with open(MOTION_FILE, 'w') as f:
f.write("0\n")
# if switch is open - DOOR CLOSED
if (io.input(DOOR_PIN) == True and door != 0):
door = 0
print("Door closed after %s" % utils.sectext(watchcount))
# do some action
io.output(LED_PIN, io.LOW)
# log in AWS IoT
os.system(DoorCMD)
if (alertset == True):
# ALERT - send notification
alertset = False
# verify the notification is not muted
if (os.path.isfile(MUTE_FILE) <> True):
print(" - Send notification")
client.publish(
Message="Garage Closed %s" % utils.sectext(watchcount),
TopicArn="%s" % SNSTopicArn)
# if switch is closed - DOOR OPEN
if (io.input(DOOR_PIN) == False and door != 1):
door = 1
watchcount = 0
lastmotion = 0
print "Door open"
io.output(LED_PIN, io.HIGH)
# log in AWS IoT
os.system(DoorCMD)
if (door == 1):
watchcount = watchcount + 1
io.output(LED_PIN, io.HIGH)
print("\rCount: %ds" % watchcount)
if (watchcount > ALERTTIME and
(((watchcount - lastmotion) > ALERTNOMOTION) or
watchcount > ALERTMAX) and alertset == False):
# ALERT - send notification
alertset = True
if (os.path.isfile(MUTE_FILE) <> True):
print(" - Send notification")
client.publish(
Message="Garage Open %s" % utils.sectext(watchcount),
TopicArn="%s" % SNSTopicArn)
if (alertset == True):
# ALERT - flash LED
io.output(LED_PIN, io.LOW)
time.sleep(0.2) # 1 second wait
io.output(LED_PIN, io.HIGH)
time.sleep(1) # 1 second wait
# if switch is open - DOOR CLOSED
# while True:
except KeyboardInterrupt, e:
print("Stopping SentryPi...")
| mit |
kmggh/monopoly | kmg/monopoly/player.py | 1 | 3090 | # Mon 2013-07-29 06:59:08 -0400
# Copyright (c) 2013 by Ken Guyton. All Rights Reserved.
"""Represent a player."""
import kmg.monopoly.property as game_property
class IllegalPieceError(Exception):
"""Raise if an illegal piece name was added."""
class Piece(object):
"""A game piece."""
def __init__(self, name):
"""Create a game piece.
Args:
name: str.
"""
if name in ('Hat', 'Car', 'Dog', 'Shoe', 'Thimble'):
self.name = name
else:
raise IllegalPieceError('{0} is not a valid piece name.'.format(name))
def __eq__(self, other):
"""Test if two pieces are equal. Only the name matters."""
return self.name == other.name
HAT = Piece('Hat')
CAR = Piece('Car')
DOG = Piece('Dog')
SHOE = Piece('Shoe')
THIMBLE = Piece('Thimble')
class Player(object):
"""A player with a name and piece on the board.
Attributes:
name: str. The name of the player. Can be just a first name or whatever.
piece: Piece. A piece object that's the player's chosen token.
balance: int. The starting amount of money in dollars.
location: int. The Board location. 0 = GO.
"""
def __init__(self, name, piece):
"""Initialize the player.
Args:
name: str.
piece. Piece.
"""
self.name = name
self.piece = piece
self.balance = 1500
self.position = 0
self.properties = game_property.PlayerProperties()
self.in_jail = False
self.passed_go = False
self.doubles = 0
def pay(self, amount):
"""Remove money from the player's balance.
Args:
amount: int. Dollars.
"""
if amount > self.balance:
raise NotEnoughMoneyError("The player doesn't have {0}.".format(amount))
else:
self.balance -= amount
def receive(self, amount):
"""Add money to the player's balance.
Args:
amount: int. Dollars.
"""
self.balance += amount
def move_to(self, position):
"""Move the player to a new position.
Args:
position: int. :
"""
self.position = position
def move(self, amount):
"""Move a number of steps to a new position."""
self.position += amount
if self.position > 39:
self.position -= 40
self.passed_go = True
def go_to_jail(self):
"""Go to jail. Change the in_jail state to True."""
self.in_jail = True
def leave_jail(self):
"""Leave jail. Change in_jail to False."""
self.in_jail = False
def receive_property(self, a_property):
"""Receive a property.
Args:
property: game_property.Property.
"""
self.properties.load(a_property)
def give_property(self, short_name):
"""Give away a property.
Args:
short_name: str. The short name of teh property.
"""
self.properties.remove(short_name)
def net_worth(self):
"""Report the net worth of the player.
This is the total that doesn't consider mortgaged property.
Returns:
An int.
"""
net = self.balance
for a_property in self.properties.dict.values():
net += a_property.price
return net
| artistic-2.0 |
lorenzo-stoakes/linux-historical | scripts/gdb/linux/modules.py | 41 | 2749 | #
# gdb helper commands and functions for Linux kernel debugging
#
# module tools
#
# Copyright (c) Siemens AG, 2013
#
# Authors:
# Jan Kiszka <jan.kiszka@siemens.com>
#
# This work is licensed under the terms of the GNU GPL version 2.
#
import gdb
from linux import cpus, utils
module_type = utils.CachedType("struct module")
def module_list():
global module_type
module_ptr_type = module_type.get_type().pointer()
modules = gdb.parse_and_eval("modules")
entry = modules['next']
end_of_list = modules.address
while entry != end_of_list:
yield utils.container_of(entry, module_ptr_type, "list")
entry = entry['next']
def find_module_by_name(name):
for module in module_list():
if module['name'].string() == name:
return module
return None
class LxModule(gdb.Function):
"""Find module by name and return the module variable.
$lx_module("MODULE"): Given the name MODULE, iterate over all loaded modules
of the target and return that module variable which MODULE matches."""
def __init__(self):
super(LxModule, self).__init__("lx_module")
def invoke(self, mod_name):
mod_name = mod_name.string()
module = find_module_by_name(mod_name)
if module:
return module.dereference()
else:
raise gdb.GdbError("Unable to find MODULE " + mod_name)
LxModule()
class LxLsmod(gdb.Command):
"""List currently loaded modules."""
_module_use_type = utils.CachedType("struct module_use")
def __init__(self):
super(LxLsmod, self).__init__("lx-lsmod", gdb.COMMAND_DATA)
def invoke(self, arg, from_tty):
gdb.write(
"Address{0} Module Size Used by\n".format(
" " if utils.get_long_type().sizeof == 8 else ""))
for module in module_list():
layout = module['core_layout']
gdb.write("{address} {name:<19} {size:>8} {ref}".format(
address=str(layout['base']).split()[0],
name=module['name'].string(),
size=str(layout['size']),
ref=str(module['refcnt']['counter'])))
source_list = module['source_list']
t = self._module_use_type.get_type().pointer()
entry = source_list['next']
first = True
while entry != source_list.address:
use = utils.container_of(entry, t, "source_list")
gdb.write("{separator}{name}".format(
separator=" " if first else ",",
name=use['source']['name'].string()))
first = False
entry = entry['next']
gdb.write("\n")
LxLsmod()
| gpl-2.0 |
bcheung92/Paperproject | gem5/src/dev/x86/I8254.py | 69 | 1912 | # Copyright (c) 2008 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
from m5.params import *
from m5.proxy import *
from Device import BasicPioDevice
from X86IntPin import X86IntSourcePin
class I8254(BasicPioDevice):
type = 'I8254'
cxx_class = 'X86ISA::I8254'
cxx_header = "dev/x86/i8254.hh"
int_pin = Param.X86IntSourcePin(X86IntSourcePin(),
'Pin to signal timer interrupts to')
| mit |
rodorad/spark-tk | python/sparktk/frame/ops/power_iteration_clustering.py | 13 | 4211 | # vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import namedtuple
PicResult = namedtuple("PicResult", ["frame", "k", "cluster_sizes"])
def power_iteration_clustering(self, source_column, destination_column, similarity_column, k=2, max_iterations=100,
initialization_mode = "random"):
"""
Power Iteration Clustering finds a low-dimensional embedding of a dataset using truncated power iteration on a
normalized pair-wise similarity matrix of the data.
Parameters
----------
:param source_column: (str) Name of the column containing the source node
:param destination_column: (str) Name of the column containing the destination node
:param similarity_column: (str) Name of the column containing the similarity
:param k: (Optional(int)) Number of clusters to cluster the graph into. Default is 2
:param max_iterations: (Optional(int)) Maximum number of iterations of the power iteration loop. Default is 100
:param initialization_mode: (Optional(str)) Initialization mode of power iteration clustering. This can be either
"random" to use a random vector as vertex properties, or "degree" to use normalized sum similarities. Default is "random".
:return: (namedtuple) Returns namedtuple containing the results frame(node and cluster), k (number of clusters),
and cluster_sizes(a map of clusters and respective size)
Example
-------
>>> frame = tc.frame.create([[1,2,1.0],
... [1,3,0.3],
... [2,3,0.3],
... [3,0,0.03],
... [0,5,0.01],
... [5,4,0.7],
... [5,6,1.0],
... [4,6,0.7]],
... [('Source', int), ('Destination', int), ('Similarity',float)])
>>> frame.inspect()
[#] Source Destination Similarity
====================================
[0] 1 2 1.0
[1] 1 3 0.3
[2] 2 3 0.3
[3] 3 0 0.03
[4] 0 5 0.01
[5] 5 4 0.7
[6] 5 6 1.0
[7] 4 6 0.7
>>> x = frame.power_iteration_clustering('Source', 'Destination', 'Similarity', k=3, max_iterations=20)
>>> x.frame.inspect()
[#] id cluster
================
[0] 4 2
[1] 0 3
[2] 6 2
[3] 2 1
[4] 1 1
[5] 3 1
[6] 5 2
>>> x.k
3
>>> x.cluster_sizes
{u'2': 3, u'3': 1, u'1': 3}
"""
result = self._scala.powerIterationClustering(source_column,
destination_column,
similarity_column,
k,
max_iterations,
initialization_mode)
k_val = result.k()
cluster_sizes = self._tc.jutils.convert.scala_map_to_python(result.clusterSizes())
from sparktk.frame.frame import Frame
py_frame = Frame(self._tc, result.clusterMapFrame())
return PicResult(frame=py_frame, k=k_val, cluster_sizes=cluster_sizes)
| apache-2.0 |
Trafire/purchaseorders | lib/python2.7/site-packages/Jinja2-2.8-py2.7.egg/jinja2/runtime.py | 335 | 22530 | # -*- coding: utf-8 -*-
"""
jinja2.runtime
~~~~~~~~~~~~~~
Runtime helpers.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD.
"""
import sys
from itertools import chain
from jinja2.nodes import EvalContext, _context_function_types
from jinja2.utils import Markup, soft_unicode, escape, missing, concat, \
internalcode, object_type_repr
from jinja2.exceptions import UndefinedError, TemplateRuntimeError, \
TemplateNotFound
from jinja2._compat import imap, text_type, iteritems, \
implements_iterator, implements_to_string, string_types, PY2
# these variables are exported to the template runtime
__all__ = ['LoopContext', 'TemplateReference', 'Macro', 'Markup',
'TemplateRuntimeError', 'missing', 'concat', 'escape',
'markup_join', 'unicode_join', 'to_string', 'identity',
'TemplateNotFound', 'make_logging_undefined']
#: the name of the function that is used to convert something into
#: a string. We can just use the text type here.
to_string = text_type
#: the identity function. Useful for certain things in the environment
identity = lambda x: x
_last_iteration = object()
def markup_join(seq):
"""Concatenation that escapes if necessary and converts to unicode."""
buf = []
iterator = imap(soft_unicode, seq)
for arg in iterator:
buf.append(arg)
if hasattr(arg, '__html__'):
return Markup(u'').join(chain(buf, iterator))
return concat(buf)
def unicode_join(seq):
"""Simple args to unicode conversion and concatenation."""
return concat(imap(text_type, seq))
def new_context(environment, template_name, blocks, vars=None,
shared=None, globals=None, locals=None):
"""Internal helper to for context creation."""
if vars is None:
vars = {}
if shared:
parent = vars
else:
parent = dict(globals or (), **vars)
if locals:
# if the parent is shared a copy should be created because
# we don't want to modify the dict passed
if shared:
parent = dict(parent)
for key, value in iteritems(locals):
if key[:2] == 'l_' and value is not missing:
parent[key[2:]] = value
return environment.context_class(environment, parent, template_name,
blocks)
class TemplateReference(object):
"""The `self` in templates."""
def __init__(self, context):
self.__context = context
def __getitem__(self, name):
blocks = self.__context.blocks[name]
return BlockReference(name, self.__context, blocks, 0)
def __repr__(self):
return '<%s %r>' % (
self.__class__.__name__,
self.__context.name
)
class Context(object):
"""The template context holds the variables of a template. It stores the
values passed to the template and also the names the template exports.
Creating instances is neither supported nor useful as it's created
automatically at various stages of the template evaluation and should not
be created by hand.
The context is immutable. Modifications on :attr:`parent` **must not**
happen and modifications on :attr:`vars` are allowed from generated
template code only. Template filters and global functions marked as
:func:`contextfunction`\s get the active context passed as first argument
and are allowed to access the context read-only.
The template context supports read only dict operations (`get`,
`keys`, `values`, `items`, `iterkeys`, `itervalues`, `iteritems`,
`__getitem__`, `__contains__`). Additionally there is a :meth:`resolve`
method that doesn't fail with a `KeyError` but returns an
:class:`Undefined` object for missing variables.
"""
__slots__ = ('parent', 'vars', 'environment', 'eval_ctx', 'exported_vars',
'name', 'blocks', '__weakref__')
def __init__(self, environment, parent, name, blocks):
self.parent = parent
self.vars = {}
self.environment = environment
self.eval_ctx = EvalContext(self.environment, name)
self.exported_vars = set()
self.name = name
# create the initial mapping of blocks. Whenever template inheritance
# takes place the runtime will update this mapping with the new blocks
# from the template.
self.blocks = dict((k, [v]) for k, v in iteritems(blocks))
def super(self, name, current):
"""Render a parent block."""
try:
blocks = self.blocks[name]
index = blocks.index(current) + 1
blocks[index]
except LookupError:
return self.environment.undefined('there is no parent block '
'called %r.' % name,
name='super')
return BlockReference(name, self, blocks, index)
def get(self, key, default=None):
"""Returns an item from the template context, if it doesn't exist
`default` is returned.
"""
try:
return self[key]
except KeyError:
return default
def resolve(self, key):
"""Looks up a variable like `__getitem__` or `get` but returns an
:class:`Undefined` object with the name of the name looked up.
"""
if key in self.vars:
return self.vars[key]
if key in self.parent:
return self.parent[key]
return self.environment.undefined(name=key)
def get_exported(self):
"""Get a new dict with the exported variables."""
return dict((k, self.vars[k]) for k in self.exported_vars)
def get_all(self):
"""Return a copy of the complete context as dict including the
exported variables.
"""
return dict(self.parent, **self.vars)
@internalcode
def call(__self, __obj, *args, **kwargs):
"""Call the callable with the arguments and keyword arguments
provided but inject the active context or environment as first
argument if the callable is a :func:`contextfunction` or
:func:`environmentfunction`.
"""
if __debug__:
__traceback_hide__ = True # noqa
# Allow callable classes to take a context
fn = __obj.__call__
for fn_type in ('contextfunction',
'evalcontextfunction',
'environmentfunction'):
if hasattr(fn, fn_type):
__obj = fn
break
if isinstance(__obj, _context_function_types):
if getattr(__obj, 'contextfunction', 0):
args = (__self,) + args
elif getattr(__obj, 'evalcontextfunction', 0):
args = (__self.eval_ctx,) + args
elif getattr(__obj, 'environmentfunction', 0):
args = (__self.environment,) + args
try:
return __obj(*args, **kwargs)
except StopIteration:
return __self.environment.undefined('value was undefined because '
'a callable raised a '
'StopIteration exception')
def derived(self, locals=None):
"""Internal helper function to create a derived context."""
context = new_context(self.environment, self.name, {},
self.parent, True, None, locals)
context.vars.update(self.vars)
context.eval_ctx = self.eval_ctx
context.blocks.update((k, list(v)) for k, v in iteritems(self.blocks))
return context
def _all(meth):
proxy = lambda self: getattr(self.get_all(), meth)()
proxy.__doc__ = getattr(dict, meth).__doc__
proxy.__name__ = meth
return proxy
keys = _all('keys')
values = _all('values')
items = _all('items')
# not available on python 3
if PY2:
iterkeys = _all('iterkeys')
itervalues = _all('itervalues')
iteritems = _all('iteritems')
del _all
def __contains__(self, name):
return name in self.vars or name in self.parent
def __getitem__(self, key):
"""Lookup a variable or raise `KeyError` if the variable is
undefined.
"""
item = self.resolve(key)
if isinstance(item, Undefined):
raise KeyError(key)
return item
def __repr__(self):
return '<%s %s of %r>' % (
self.__class__.__name__,
repr(self.get_all()),
self.name
)
# register the context as mapping if possible
try:
from collections import Mapping
Mapping.register(Context)
except ImportError:
pass
class BlockReference(object):
"""One block on a template reference."""
def __init__(self, name, context, stack, depth):
self.name = name
self._context = context
self._stack = stack
self._depth = depth
@property
def super(self):
"""Super the block."""
if self._depth + 1 >= len(self._stack):
return self._context.environment. \
undefined('there is no parent block called %r.' %
self.name, name='super')
return BlockReference(self.name, self._context, self._stack,
self._depth + 1)
@internalcode
def __call__(self):
rv = concat(self._stack[self._depth](self._context))
if self._context.eval_ctx.autoescape:
rv = Markup(rv)
return rv
class LoopContext(object):
"""A loop context for dynamic iteration."""
def __init__(self, iterable, recurse=None, depth0=0):
self._iterator = iter(iterable)
self._recurse = recurse
self._after = self._safe_next()
self.index0 = -1
self.depth0 = depth0
# try to get the length of the iterable early. This must be done
# here because there are some broken iterators around where there
# __len__ is the number of iterations left (i'm looking at your
# listreverseiterator!).
try:
self._length = len(iterable)
except (TypeError, AttributeError):
self._length = None
def cycle(self, *args):
"""Cycles among the arguments with the current loop index."""
if not args:
raise TypeError('no items for cycling given')
return args[self.index0 % len(args)]
first = property(lambda x: x.index0 == 0)
last = property(lambda x: x._after is _last_iteration)
index = property(lambda x: x.index0 + 1)
revindex = property(lambda x: x.length - x.index0)
revindex0 = property(lambda x: x.length - x.index)
depth = property(lambda x: x.depth0 + 1)
def __len__(self):
return self.length
def __iter__(self):
return LoopContextIterator(self)
def _safe_next(self):
try:
return next(self._iterator)
except StopIteration:
return _last_iteration
@internalcode
def loop(self, iterable):
if self._recurse is None:
raise TypeError('Tried to call non recursive loop. Maybe you '
"forgot the 'recursive' modifier.")
return self._recurse(iterable, self._recurse, self.depth0 + 1)
# a nifty trick to enhance the error message if someone tried to call
# the the loop without or with too many arguments.
__call__ = loop
del loop
@property
def length(self):
if self._length is None:
# if was not possible to get the length of the iterator when
# the loop context was created (ie: iterating over a generator)
# we have to convert the iterable into a sequence and use the
# length of that + the number of iterations so far.
iterable = tuple(self._iterator)
self._iterator = iter(iterable)
iterations_done = self.index0 + 2
self._length = len(iterable) + iterations_done
return self._length
def __repr__(self):
return '<%s %r/%r>' % (
self.__class__.__name__,
self.index,
self.length
)
@implements_iterator
class LoopContextIterator(object):
"""The iterator for a loop context."""
__slots__ = ('context',)
def __init__(self, context):
self.context = context
def __iter__(self):
return self
def __next__(self):
ctx = self.context
ctx.index0 += 1
if ctx._after is _last_iteration:
raise StopIteration()
next_elem = ctx._after
ctx._after = ctx._safe_next()
return next_elem, ctx
class Macro(object):
"""Wraps a macro function."""
def __init__(self, environment, func, name, arguments, defaults,
catch_kwargs, catch_varargs, caller):
self._environment = environment
self._func = func
self._argument_count = len(arguments)
self.name = name
self.arguments = arguments
self.defaults = defaults
self.catch_kwargs = catch_kwargs
self.catch_varargs = catch_varargs
self.caller = caller
@internalcode
def __call__(self, *args, **kwargs):
# try to consume the positional arguments
arguments = list(args[:self._argument_count])
off = len(arguments)
# if the number of arguments consumed is not the number of
# arguments expected we start filling in keyword arguments
# and defaults.
if off != self._argument_count:
for idx, name in enumerate(self.arguments[len(arguments):]):
try:
value = kwargs.pop(name)
except KeyError:
try:
value = self.defaults[idx - self._argument_count + off]
except IndexError:
value = self._environment.undefined(
'parameter %r was not provided' % name, name=name)
arguments.append(value)
# it's important that the order of these arguments does not change
# if not also changed in the compiler's `function_scoping` method.
# the order is caller, keyword arguments, positional arguments!
if self.caller:
caller = kwargs.pop('caller', None)
if caller is None:
caller = self._environment.undefined('No caller defined',
name='caller')
arguments.append(caller)
if self.catch_kwargs:
arguments.append(kwargs)
elif kwargs:
raise TypeError('macro %r takes no keyword argument %r' %
(self.name, next(iter(kwargs))))
if self.catch_varargs:
arguments.append(args[self._argument_count:])
elif len(args) > self._argument_count:
raise TypeError('macro %r takes not more than %d argument(s)' %
(self.name, len(self.arguments)))
return self._func(*arguments)
def __repr__(self):
return '<%s %s>' % (
self.__class__.__name__,
self.name is None and 'anonymous' or repr(self.name)
)
@implements_to_string
class Undefined(object):
"""The default undefined type. This undefined type can be printed and
iterated over, but every other access will raise an :exc:`jinja2.exceptions.UndefinedError`:
>>> foo = Undefined(name='foo')
>>> str(foo)
''
>>> not foo
True
>>> foo + 42
Traceback (most recent call last):
...
jinja2.exceptions.UndefinedError: 'foo' is undefined
"""
__slots__ = ('_undefined_hint', '_undefined_obj', '_undefined_name',
'_undefined_exception')
def __init__(self, hint=None, obj=missing, name=None, exc=UndefinedError):
self._undefined_hint = hint
self._undefined_obj = obj
self._undefined_name = name
self._undefined_exception = exc
@internalcode
def _fail_with_undefined_error(self, *args, **kwargs):
"""Regular callback function for undefined objects that raises an
`jinja2.exceptions.UndefinedError` on call.
"""
if self._undefined_hint is None:
if self._undefined_obj is missing:
hint = '%r is undefined' % self._undefined_name
elif not isinstance(self._undefined_name, string_types):
hint = '%s has no element %r' % (
object_type_repr(self._undefined_obj),
self._undefined_name
)
else:
hint = '%r has no attribute %r' % (
object_type_repr(self._undefined_obj),
self._undefined_name
)
else:
hint = self._undefined_hint
raise self._undefined_exception(hint)
@internalcode
def __getattr__(self, name):
if name[:2] == '__':
raise AttributeError(name)
return self._fail_with_undefined_error()
__add__ = __radd__ = __mul__ = __rmul__ = __div__ = __rdiv__ = \
__truediv__ = __rtruediv__ = __floordiv__ = __rfloordiv__ = \
__mod__ = __rmod__ = __pos__ = __neg__ = __call__ = \
__getitem__ = __lt__ = __le__ = __gt__ = __ge__ = __int__ = \
__float__ = __complex__ = __pow__ = __rpow__ = \
_fail_with_undefined_error
def __eq__(self, other):
return type(self) is type(other)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return id(type(self))
def __str__(self):
return u''
def __len__(self):
return 0
def __iter__(self):
if 0:
yield None
def __nonzero__(self):
return False
__bool__ = __nonzero__
def __repr__(self):
return 'Undefined'
def make_logging_undefined(logger=None, base=None):
"""Given a logger object this returns a new undefined class that will
log certain failures. It will log iterations and printing. If no
logger is given a default logger is created.
Example::
logger = logging.getLogger(__name__)
LoggingUndefined = make_logging_undefined(
logger=logger,
base=Undefined
)
.. versionadded:: 2.8
:param logger: the logger to use. If not provided, a default logger
is created.
:param base: the base class to add logging functionality to. This
defaults to :class:`Undefined`.
"""
if logger is None:
import logging
logger = logging.getLogger(__name__)
logger.addHandler(logging.StreamHandler(sys.stderr))
if base is None:
base = Undefined
def _log_message(undef):
if undef._undefined_hint is None:
if undef._undefined_obj is missing:
hint = '%s is undefined' % undef._undefined_name
elif not isinstance(undef._undefined_name, string_types):
hint = '%s has no element %s' % (
object_type_repr(undef._undefined_obj),
undef._undefined_name)
else:
hint = '%s has no attribute %s' % (
object_type_repr(undef._undefined_obj),
undef._undefined_name)
else:
hint = undef._undefined_hint
logger.warning('Template variable warning: %s', hint)
class LoggingUndefined(base):
def _fail_with_undefined_error(self, *args, **kwargs):
try:
return base._fail_with_undefined_error(self, *args, **kwargs)
except self._undefined_exception as e:
logger.error('Template variable error: %s', str(e))
raise e
def __str__(self):
rv = base.__str__(self)
_log_message(self)
return rv
def __iter__(self):
rv = base.__iter__(self)
_log_message(self)
return rv
if PY2:
def __nonzero__(self):
rv = base.__nonzero__(self)
_log_message(self)
return rv
def __unicode__(self):
rv = base.__unicode__(self)
_log_message(self)
return rv
else:
def __bool__(self):
rv = base.__bool__(self)
_log_message(self)
return rv
return LoggingUndefined
@implements_to_string
class DebugUndefined(Undefined):
"""An undefined that returns the debug info when printed.
>>> foo = DebugUndefined(name='foo')
>>> str(foo)
'{{ foo }}'
>>> not foo
True
>>> foo + 42
Traceback (most recent call last):
...
jinja2.exceptions.UndefinedError: 'foo' is undefined
"""
__slots__ = ()
def __str__(self):
if self._undefined_hint is None:
if self._undefined_obj is missing:
return u'{{ %s }}' % self._undefined_name
return '{{ no such element: %s[%r] }}' % (
object_type_repr(self._undefined_obj),
self._undefined_name
)
return u'{{ undefined value printed: %s }}' % self._undefined_hint
@implements_to_string
class StrictUndefined(Undefined):
"""An undefined that barks on print and iteration as well as boolean
tests and all kinds of comparisons. In other words: you can do nothing
with it except checking if it's defined using the `defined` test.
>>> foo = StrictUndefined(name='foo')
>>> str(foo)
Traceback (most recent call last):
...
jinja2.exceptions.UndefinedError: 'foo' is undefined
>>> not foo
Traceback (most recent call last):
...
jinja2.exceptions.UndefinedError: 'foo' is undefined
>>> foo + 42
Traceback (most recent call last):
...
jinja2.exceptions.UndefinedError: 'foo' is undefined
"""
__slots__ = ()
__iter__ = __str__ = __len__ = __nonzero__ = __eq__ = \
__ne__ = __bool__ = __hash__ = \
Undefined._fail_with_undefined_error
# remove remaining slots attributes, after the metaclass did the magic they
# are unneeded and irritating as they contain wrong data for the subclasses.
del Undefined.__slots__, DebugUndefined.__slots__, StrictUndefined.__slots__
| mit |
denys-duchier/Scolar | ZopeProducts/exUserFolder/AuthSources/__init__.py | 1 | 1411 | #
# Extensible User Folder
#
# (C) Copyright 2000-2004 The Internet (Aust) Pty Ltd
# ACN: 082 081 472 ABN: 83 082 081 472
# All Rights Reserved
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
# Author: Andrew Milton <akm@theinternet.com.au>
# $Id: __init__.py,v 1.1 2004/11/10 14:15:34 akm Exp $
import etcAuthSource
import httpsAuthSource
import mysqlAuthSource
import pgAuthSource
import pgAuthSourceAlt
import radiusAuthSource
import smbAuthSource
import usAuthSource
import zodbAuthSource
import zodbBTreeAuthSource
#
# These have special requirements for external libraries
# that my not be present.
#
try:
import nisAuthSource
except:
pass
try:
import LDAPAuthSource
except:
pass
| gpl-2.0 |
nonZero/demos-python | src/examples/short/imap/imaplib_demo.py | 1 | 1900 | #!/usr/bin/python2
'''
This example shows how to use the imaplib module.
In this case I use it to access my gmail account, upload messages, download messages
and more.
To see the documentation of the API use: pydoc imaplib
This thing started from me wanting to import my old mail to gmail and seeing
this blog post: http://scott.yang.id.au/2009/01/migrate-emails-maildir-gmail.html
'''
import imaplib # for IMAP4_SSL
import ConfigParser # for ConfigParser
import os.path # for expanduser
config = ConfigParser.ConfigParser()
config.read(os.path.expanduser('~/.pyimap.ini'))
opt_username = config.get('imap', 'username')
opt_password = config.get('imap', 'password')
opt_hostname = config.get('imap', 'hostname')
opt_port = config.get('imap', 'port')
def imap_have_mailbox(imap, name):
(res, l) = imap.list(name)
if res != 'OK':
raise ValueError('could not list', name)
if len(l) == 1 and l[0] is None:
return False
return True
def imap_create(imap, name):
(res, l) = imap.create(name)
if res != 'OK':
raise ValueError('could not create', name)
def imap_delete(imap, name):
(res, l) = imap.delete(name)
if res != 'OK':
raise ValueError('could not delete', name)
def imap_logout(imap):
(res, l) = imap.logout()
if res != 'BYE':
raise ValueError('could not logout', res, l)
def imap_login(imap, username, password):
(res, l) = imap.login(username, password)
if res != 'OK':
raise ValueError('could not login')
imap = imaplib.IMAP4_SSL(opt_hostname, opt_port)
imap_login(imap, opt_username, opt_password)
print(imap.capability())
print(imap.list())
if imap_have_mailbox(imap, 'foo'):
raise ValueError('have mailbox foo')
if not imap_have_mailbox(imap, 'business'):
raise ValueError('do not have mailbox business')
imap_create(imap, 'foo')
imap_delete(imap, 'foo')
imap_logout(imap)
| gpl-3.0 |
elovalo/elovalo | generators/gperf.py | 1 | 2637 | #
# Copyright 2012 Elovalo project group
#
# This file is part of Elovalo.
#
# Elovalo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Elovalo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Elovalo. If not, see <http://www.gnu.org/licenses/>.
#
import re
import subprocess
def pgm_wrap(var, s):
"""Wrap given var access inside pgm_get. Assumes return value of
uint8_t in that var """
m = re.search(var + '\[[^\]]', s)
if not m:
return s
start = m.start()
brackets = 0
for i in range(start, s.__len__()):
if s[i] == '[':
brackets += 1
if s[i] == ']':
brackets -= 1
if brackets == 0:
return s[:start] + \
'pgm_get(' + s[start:i + 1] + \
',byte)' + \
pgm_wrap(var, s[i + 1:])
def generate(source, target):
"""This function runs gperf and mangles it output so it allows
placing look-up table to PROGMEM. This implementation allows false
negatives, i.e. if a glyph is not found then it returns
anything."""
# Get gperf output
p = subprocess.Popen(["gperf", source],
stdout=subprocess.PIPE)
out, err = p.communicate()
if not p.returncode == 0:
raise Exception('gperf failed with ' + str(p.returncode))
# Wrap asso_values and wordlist inside pgmspace and tune the
# visibility of glyphs
out = pgm_wrap('asso_values', out)
out = out.replace("static const ",
"PROGMEM static const ")
out = out.replace("static const struct glyph",
"const struct glyph")
# Remove strings from wordlist, no need for verification
out = re.sub(r'{".*",', r'{', out)
out = out.replace('{""}', '{}')
# Remove in_word_set to keep it compiling. It's not useful for us.
out = out.replace('\nconst struct glyph *', '\n#if 0')
out = out + '\n#endif'
# Replace binary with hex (GCC 4.3<)
def replace_binary(m):
return hex(int(m.group(0)[2:], 2))
out = re.sub('0B[01]+', replace_binary, out)
# Write to target file
with open(target, 'w') as f:
f.write(out)
| gpl-3.0 |
codrut3/tensorflow | tensorflow/contrib/eager/python/metrics_impl.py | 5 | 11931 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Metrics classes for computing the output of an evaluation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from tensorflow.contrib.summary import summary_ops
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
_to_replace = re.compile("[^A-Za-z0-9.]")
class Metric(object):
"""A metric holds state for aggregating statistics over an evaluation run.
Example use with eager execution:
```python
m = SomeMetric(...)
for input in ...:
m(input)
print(m.result())
```
Example use with graph execution:
```python
m = SomeMetric(...)
m_placeholder = tf.placeholder(...)
m_update = m(m_placeholder)
# Variables defined in first call, so get the initialization op afterwards.
m_init = m.init_variables() # or tf.global_variables_initializer()
m_result = m.result()
with tf.Session() as sess:
sess.run(m_init)
for input in ...:
sess.run(m_update, feed_dict={m_placeholder: input})
print(sess.run(m_result))
```
Descendants will implement:
* `build()`: All variables should be created in this method, by calling
`self.add_variable()` as in: `self.var = self.add_variable(...)`
build() will be called in the first invocation of `__call__()`, with
the same arguments passed `call()`.
* `call()`: Has all updates to variables, as in:
self.var.assign_add(...)
* `result()`: Computes and returns a final value for the metric
from the variables in `self`.
Descendants may override `aggregate()`, but usually won't need to. It
adds in the state from a list of metrics of the same type as `self`.
(Default is to sum all the variables.) Note that users should not call
`aggregate()`, it is for use by TensorFlow infrastructure.
"""
def __init__(self, name=None):
self._built = False
self._vars = []
self._initial_values = {}
self._updates = []
name = name or self.__class__.__name__
# Replace things like spaces in name to create a valid scope name.
scope_name = _to_replace.sub("_", name)
# We create the variable scope now to get the unique name that will
# be used as a variable prefix when build() calls add_variable().
with variable_scope.variable_scope(
scope_name, use_resource=True, reuse=False) as scope:
pos = scope.name.rfind(scope_name)
self._name = name + scope.name[pos + len(scope_name):]
self._scope = scope
if context.in_graph_mode():
# We make self.call() into a graph callable here, so that we can
# return a single op that performs all of the variable updates.
self._construction_scope = ops.get_default_graph().as_default
self.call = function.defun(self.call)
else:
self._construction_scope = context.eager_mode
# ---- API for users ----
def __call__(self, *args, **kwargs):
"""Returns op to execute to update this metric for these inputs.
Returns None if eager execution is enabled.
Args:
*args:
**kwargs: A mini-batch of inputs to the Metric, passed on to `call()`.
"""
if not self._built:
with variable_scope.variable_scope(
self._scope), self._construction_scope():
self.build(*args, **kwargs)
self._built = True
return self.call(*args, **kwargs)
@property
def name(self):
return self._name
@property
def variables(self):
return self._vars
def init_variables(self):
"""Initializes this Metric's variables.
Should be called after variables are created in the first execution
of `__call__()`. If using graph execution, the return value should be
`run()` in a session before running the op returned by `__call__()`.
(See example above.)
Returns:
If using graph execution, this returns an op to perform the
initialization. Under eager execution, the variables are reset to their
initial values as a side effect and this function returns None.
"""
if context.in_graph_mode():
return control_flow_ops.group([v.initializer for v in self._vars])
for v in self._vars:
v.assign(self._initial_values[v])
# ---- To be implemented by descendants ---
def build(self, *args, **kwargs):
"""Method to create variables.
Called by `__call__()` before `call()` for the first time.
Args:
*args:
**kwargs: The arguments to the first invocation of `__call__()`.
`build()` may use the shape and/or dtype of these arguments
when deciding how to create variables.
"""
raise NotImplementedError("Metrics must define a build() member function")
def call(self, *args, **kwargs):
"""Accumulates statistics for the metric. Users should use __call__ instead.
Note: This function is executed as a graph function in graph mode.
This means:
a) Operations on the same resource are executed in textual order.
This should make it easier to do things like add the updated
value of a variable to another, for example.
b) You don't need to worry about collecting the update ops to execute.
All update ops added to the graph by this function will be executed.
As a result, code should generally work the same way with graph or
eager execution.
Args:
*args:
**kwargs: A mini-batch of inputs to the Metric, as passed to
`__call__()`.
"""
raise NotImplementedError("Metrics must define a call() member function")
def result(self): # TODO(josh11b): Add an optional summary_writer parameter.
"""Computes and returns a final value for the metric."""
raise NotImplementedError("Metrics must define a result() member function")
# We can support two different strategies of for doing data-parallel
# distributed metric computations:
# * Put metric variables on the first device and rely on small
# bandwidth needed to do updates. (Doesn't require any particular
# code in Metric implementations.)
# * Ask each type of metric to define an aggregation method to run
# at the end of eval to merge across devices. Note: this is good
# for the use case where they want to record the metric's state
# for each example and then later decide which examples they want
# to aggregate over. (Recommended -- not too much harder and adds
# flexibility over previous option.)
# I'm going with the second strategy since we can define a default
# implementation of aggregate() that will work for most descendants.
def aggregate(self, metrics):
"""Adds in the state from a list of metrics.
Default implementation sums all the metric variables.
Args:
metrics: A list of metrics with the same type as `self`.
Raises:
ValueError: If metrics contains invalid data.
"""
for m in metrics:
if type(self) != type(m): # pylint: disable=unidiomatic-typecheck
raise TypeError("All metrics must be the same type, '%s' != '%s'." %
(type(self), type(m)))
# pylint: disable=protected-access
for i in range(len(self._vars)):
if any(m._vars[i].name != self._vars[i].name for m in metrics):
raise ValueError("All metrics must have variables in the same order.")
self._vars[i].assign_add(math_ops.add_n([m._vars[i] for m in metrics]))
# pylint: enable=protected-access
# ---- For use by descendants ---
def add_variable(self, name, shape=None, dtype=None, initializer=None):
"""***Only for use by descendants of Metric***."""
if self._built:
raise RuntimeError("Can't call add_variable() except in build().")
collections = None if context.in_eager_mode() else [
ops.GraphKeys.LOCAL_VARIABLES, ops.GraphKeys.METRIC_VARIABLES
]
v = variable_scope.get_variable(
name,
shape,
dtype,
initializer,
trainable=False,
collections=collections,
use_resource=True)
self._vars.append(v)
if context.in_eager_mode():
self._initial_values[v] = v.value()
return v
class Mean(Metric):
"""Computes the (weighted) mean of the given values."""
# TODO(josh11b): Maybe have a dtype argument that defaults to tf.float64?
# Or defaults to type of the input if it is tf.float32, else tf.float64?
def __init__(self, name=None, dtype=dtypes.float64):
super(Mean, self).__init__(name=name)
self.dtype = dtype
def build(self, *args, **kwargs):
# build() does not use call's arguments, by using *args, **kwargs
# we make it easier to inherit from Mean().
del args, kwargs
self.numer = self.add_variable(name="numer", shape=(),
dtype=self.dtype,
initializer=init_ops.zeros_initializer)
self.denom = self.add_variable(name="denom", shape=(),
dtype=self.dtype,
initializer=init_ops.zeros_initializer)
def call(self, values, weights=None):
"""Accumulate statistics for computing the mean.
For example, if values is [1, 3, 5, 7] then the mean is 4.
If the weights were specified as [1, 1, 0, 0] then the mean would be 2.
Args:
values: Tensor with the per-example value.
weights: Optional weighting of each example. Defaults to 1.
"""
if weights is None:
self.denom.assign_add(
math_ops.cast(array_ops.identity(array_ops.size(values)), self.dtype))
values = math_ops.reduce_sum(values)
self.numer.assign_add(math_ops.cast(values, self.dtype))
else:
weights = math_ops.cast(weights, self.dtype)
self.denom.assign_add(math_ops.reduce_sum(weights))
values = math_ops.cast(values, self.dtype) * weights
self.numer.assign_add(math_ops.reduce_sum(values))
def result(self):
t = self.numer / self.denom
summary_ops.scalar(name=self.name, tensor=t)
return t
class Accuracy(Mean):
"""Calculates how often `predictions` matches `labels`."""
def __init__(self, name=None, dtype=dtypes.float64):
super(Accuracy, self).__init__(name=name, dtype=dtype)
def call(self, labels, predictions, weights=None):
"""Accumulate accuracy statistics.
For example, if labels is [1, 2, 3, 4] and predictions is [0, 2, 3, 4]
then the accuracy is 3/4 or .75. If the weights were specified as
[1, 1, 0, 0] then the accuracy would be 1/2 or .5.
`labels` and `predictions` should have the same shape and type.
Args:
labels: Tensor with the true labels for each example. One example
per element of the Tensor.
predictions: Tensor with the predicted label for each example.
weights: Optional weighting of each example. Defaults to 1.
"""
matches = math_ops.equal(labels, predictions)
matches = math_ops.cast(matches, dtypes.float64)
super(Accuracy, self).call(matches, weights=weights)
| apache-2.0 |
USC-NSL/FlowRadar-P4 | testutils/pd_base_tests.py | 7 | 2669 | """
Base classes for PD test cases
Tests will usually inherit from one of these classes to have the controller
and/or dataplane automatically set up.
"""
import importlib
import ptf
from ptf.base_tests import BaseTest
from ptf import config
import ptf.testutils as testutils
################################################################
#
# Thrift interface base tests
#
################################################################
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
from thrift.protocol import TMultiplexedProtocol
class ThriftInterface(BaseTest):
def __init__(self, p4_name):
BaseTest.__init__(self)
self.p4_name = p4_name
self.p4_client_module = importlib.import_module(".".join(["p4_pd_rpc", p4_name]))
self.mc_client_module = importlib.import_module(".".join(["mc_pd_rpc", "mc"]))
self.conn_mgr_client_module = importlib.import_module(".".join(["conn_mgr_pd_rpc", "conn_mgr"]))
def setUp(self):
BaseTest.setUp(self)
# Set up thrift client and contact server
self.transport = TSocket.TSocket('localhost', 9090)
self.transport = TTransport.TBufferedTransport(self.transport)
bprotocol = TBinaryProtocol.TBinaryProtocol(self.transport)
self.mc_protocol = TMultiplexedProtocol.TMultiplexedProtocol(bprotocol, "mc")
self.conn_mgr_protocol = TMultiplexedProtocol.TMultiplexedProtocol(bprotocol, "conn_mgr")
self.p4_protocol = TMultiplexedProtocol.TMultiplexedProtocol(bprotocol, self.p4_name)
self.client = self.p4_client_module.Client(self.p4_protocol)
self.mc = self.mc_client_module.Client(self.mc_protocol)
self.conn_mgr = self.conn_mgr_client_module.Client(self.conn_mgr_protocol)
self.transport.open()
def tearDown(self):
if config["log_dir"] != None:
self.dataplane.stop_pcap()
BaseTest.tearDown(self)
self.transport.close()
class ThriftInterfaceDataPlane(ThriftInterface):
"""
Root class that sets up the thrift interface and dataplane
"""
def __init__(self, p4_name):
ThriftInterface.__init__(self, p4_name)
def setUp(self):
ThriftInterface.setUp(self)
self.dataplane = ptf.dataplane_instance
self.dataplane.flush()
if config["log_dir"] != None:
filename = os.path.join(config["log_dir"], str(self)) + ".pcap"
self.dataplane.start_pcap(filename)
def tearDown(self):
if config["log_dir"] != None:
self.dataplane.stop_pcap()
ThriftInterface.tearDown(self)
| apache-2.0 |
sajeeshcs/nested_quota | nova/virt/baremetal/pxe.py | 9 | 19750 | # Copyright 2012,2014 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2012 NTT DOCOMO, INC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Class for PXE bare-metal nodes.
"""
import datetime
import os
import jinja2
from oslo.config import cfg
from oslo.db import exception as db_exc
from nova.compute import flavors
from nova import exception
from nova.i18n import _
from nova import objects
from nova.openstack.common import fileutils
from nova.openstack.common import log as logging
from nova.openstack.common import loopingcall
from nova.openstack.common import timeutils
from nova import utils
from nova.virt.baremetal import baremetal_states
from nova.virt.baremetal import base
from nova.virt.baremetal import db
from nova.virt.baremetal import utils as bm_utils
pxe_opts = [
cfg.StrOpt('deploy_kernel',
help='Default kernel image ID used in deployment phase'),
cfg.StrOpt('deploy_ramdisk',
help='Default ramdisk image ID used in deployment phase'),
cfg.StrOpt('net_config_template',
default='$pybasedir/nova/virt/baremetal/'
'net-dhcp.ubuntu.template',
help='Template file for injected network config'),
cfg.StrOpt('pxe_append_params',
default='nofb nomodeset vga=normal',
help='Additional append parameters for baremetal PXE boot'),
cfg.StrOpt('pxe_config_template',
default='$pybasedir/nova/virt/baremetal/pxe_config.template',
help='Template file for PXE configuration'),
cfg.BoolOpt('use_file_injection',
help='If True, enable file injection for network info, '
'files and admin password',
default=False),
cfg.IntOpt('pxe_deploy_timeout',
help='Timeout for PXE deployments. Default: 0 (unlimited)',
default=0),
cfg.BoolOpt('pxe_network_config',
help='If set, pass the network configuration details to the '
'initramfs via cmdline.',
default=False),
cfg.StrOpt('pxe_bootfile_name',
help='This gets passed to Neutron as the bootfile dhcp '
'parameter.',
default='pxelinux.0'),
]
LOG = logging.getLogger(__name__)
baremetal_group = cfg.OptGroup(name='baremetal',
title='Baremetal Options')
CONF = cfg.CONF
CONF.register_group(baremetal_group)
CONF.register_opts(pxe_opts, baremetal_group)
CONF.import_opt('use_ipv6', 'nova.netconf')
def build_pxe_network_config(network_info):
interfaces = bm_utils.map_network_interfaces(network_info, CONF.use_ipv6)
template = None
if not CONF.use_ipv6:
template = "ip=%(address)s::%(gateway)s:%(netmask)s::%(name)s:off"
else:
template = ("ip=[%(address_v6)s]::[%(gateway_v6)s]:"
"[%(netmask_v6)s]::%(name)s:off")
net_config = [template % iface for iface in interfaces]
return ' '.join(net_config)
def build_pxe_config(deployment_id, deployment_key, deployment_iscsi_iqn,
deployment_aki_path, deployment_ari_path,
aki_path, ari_path, network_info):
"""Build the PXE config file for a node
This method builds the PXE boot configuration file for a node,
given all the required parameters.
The resulting file has both a "deploy" and "boot" label, which correspond
to the two phases of booting. This may be extended later.
"""
LOG.debug("Building PXE config for deployment %s.", deployment_id)
network_config = None
if network_info and CONF.baremetal.pxe_network_config:
network_config = build_pxe_network_config(network_info)
pxe_options = {
'deployment_id': deployment_id,
'deployment_key': deployment_key,
'deployment_iscsi_iqn': deployment_iscsi_iqn,
'deployment_aki_path': deployment_aki_path,
'deployment_ari_path': deployment_ari_path,
'aki_path': aki_path,
'ari_path': ari_path,
'pxe_append_params': CONF.baremetal.pxe_append_params,
'pxe_network_config': network_config,
}
tmpl_path, tmpl_file = os.path.split(CONF.baremetal.pxe_config_template)
env = jinja2.Environment(loader=jinja2.FileSystemLoader(tmpl_path))
template = env.get_template(tmpl_file)
return template.render({'pxe_options': pxe_options,
'ROOT': '${ROOT}'})
def build_network_config(network_info):
interfaces = bm_utils.map_network_interfaces(network_info, CONF.use_ipv6)
tmpl_path, tmpl_file = os.path.split(CONF.baremetal.net_config_template)
env = jinja2.Environment(loader=jinja2.FileSystemLoader(tmpl_path))
template = env.get_template(tmpl_file)
return template.render({'interfaces': interfaces,
'use_ipv6': CONF.use_ipv6})
def get_deploy_aki_id(flavor):
return flavor.get('extra_specs', {}).\
get('baremetal:deploy_kernel_id', CONF.baremetal.deploy_kernel)
def get_deploy_ari_id(flavor):
return flavor.get('extra_specs', {}).\
get('baremetal:deploy_ramdisk_id', CONF.baremetal.deploy_ramdisk)
def get_image_dir_path(instance):
"""Generate the dir for an instances disk."""
return os.path.join(CONF.instances_path, instance['name'])
def get_image_file_path(instance):
"""Generate the full path for an instances disk."""
return os.path.join(CONF.instances_path, instance['name'], 'disk')
def get_pxe_config_file_path(instance):
"""Generate the path for an instances PXE config file."""
return os.path.join(CONF.baremetal.tftp_root, instance['uuid'], 'config')
def get_partition_sizes(instance):
flavor = flavors.extract_flavor(instance)
root_mb = flavor['root_gb'] * 1024
swap_mb = flavor['swap']
ephemeral_mb = flavor['ephemeral_gb'] * 1024
# NOTE(deva): For simpler code paths on the deployment side,
# we always create a swap partition. If the flavor
# does not specify any swap, we default to 1MB
if swap_mb < 1:
swap_mb = 1
return (root_mb, swap_mb, ephemeral_mb)
def get_pxe_mac_path(mac):
"""Convert a MAC address into a PXE config file name."""
return os.path.join(
CONF.baremetal.tftp_root,
'pxelinux.cfg',
"01-" + mac.replace(":", "-").lower()
)
def get_tftp_image_info(instance, flavor):
"""Generate the paths for tftp files for this instance
Raises NovaException if
- instance does not contain kernel_id or ramdisk_id
- deploy_kernel_id or deploy_ramdisk_id can not be read from
flavor['extra_specs'] and defaults are not set
"""
image_info = {
'kernel': [None, None],
'ramdisk': [None, None],
'deploy_kernel': [None, None],
'deploy_ramdisk': [None, None],
}
try:
image_info['kernel'][0] = str(instance['kernel_id'])
image_info['ramdisk'][0] = str(instance['ramdisk_id'])
image_info['deploy_kernel'][0] = get_deploy_aki_id(flavor)
image_info['deploy_ramdisk'][0] = get_deploy_ari_id(flavor)
except KeyError:
pass
missing_labels = []
for label in image_info.keys():
(uuid, path) = image_info[label]
if not uuid:
missing_labels.append(label)
else:
image_info[label][1] = os.path.join(CONF.baremetal.tftp_root,
instance['uuid'], label)
if missing_labels:
raise exception.NovaException(_(
"Can not activate PXE bootloader. The following boot parameters "
"were not passed to baremetal driver: %s") % missing_labels)
return image_info
class PXE(base.NodeDriver):
"""PXE bare metal driver."""
def __init__(self, virtapi):
super(PXE, self).__init__(virtapi)
def _collect_mac_addresses(self, context, node):
macs = set()
for nic in db.bm_interface_get_all_by_bm_node_id(context, node['id']):
if nic['address']:
macs.add(nic['address'])
return sorted(macs)
def _cache_tftp_images(self, context, instance, image_info):
"""Fetch the necessary kernels and ramdisks for the instance."""
fileutils.ensure_tree(
os.path.join(CONF.baremetal.tftp_root, instance['uuid']))
LOG.debug("Fetching kernel and ramdisk for instance %s",
instance['name'])
for label in image_info.keys():
(uuid, path) = image_info[label]
bm_utils.cache_image(
context=context,
target=path,
image_id=uuid,
user_id=instance['user_id'],
project_id=instance['project_id'],
)
def _cache_image(self, context, instance, image_meta):
"""Fetch the instance's image from Glance
This method pulls the relevant AMI and associated kernel and ramdisk,
and the deploy kernel and ramdisk from Glance, and writes them
to the appropriate places on local disk.
Both sets of kernel and ramdisk are needed for PXE booting, so these
are stored under CONF.baremetal.tftp_root.
At present, the AMI is cached and certain files are injected.
Debian/ubuntu-specific assumptions are made regarding the injected
files. In a future revision, this functionality will be replaced by a
more scalable and os-agnostic approach: the deployment ramdisk will
fetch from Glance directly, and write its own last-mile configuration.
"""
fileutils.ensure_tree(get_image_dir_path(instance))
image_path = get_image_file_path(instance)
LOG.debug("Fetching image %(ami)s for instance %(name)s",
{'ami': image_meta['id'], 'name': instance['name']})
bm_utils.cache_image(context=context,
target=image_path,
image_id=image_meta['id'],
user_id=instance['user_id'],
project_id=instance['project_id'],
clean=True,
)
return [image_meta['id'], image_path]
def _inject_into_image(self, context, node, instance, network_info,
injected_files=None, admin_password=None):
"""Inject last-mile configuration into instances image
Much of this method is a hack around DHCP and cloud-init
not working together with baremetal provisioning yet.
"""
# NOTE(deva): We assume that if we're not using a kernel,
# then the target partition is the first partition
partition = None
if not instance['kernel_id']:
partition = "1"
ssh_key = None
if 'key_data' in instance and instance['key_data']:
ssh_key = str(instance['key_data'])
if injected_files is None:
injected_files = []
else:
# NOTE(deva): copy so we don't modify the original
injected_files = list(injected_files)
net_config = build_network_config(network_info)
if instance['hostname']:
injected_files.append(('/etc/hostname', instance['hostname']))
LOG.debug("Injecting files into image for instance %(name)s",
{'name': instance['name']})
bm_utils.inject_into_image(
image=get_image_file_path(instance),
key=ssh_key,
net=net_config,
metadata=utils.instance_meta(instance),
admin_password=admin_password,
files=injected_files,
partition=partition,
)
def cache_images(self, context, node, instance,
admin_password, image_meta, injected_files, network_info):
"""Prepare all the images for this instance."""
flavor = objects.Flavor.get_by_id(context,
instance['instance_type_id'])
tftp_image_info = get_tftp_image_info(instance, flavor)
self._cache_tftp_images(context, instance, tftp_image_info)
self._cache_image(context, instance, image_meta)
if CONF.baremetal.use_file_injection:
self._inject_into_image(context, node, instance, network_info,
injected_files, admin_password)
def destroy_images(self, context, node, instance):
"""Delete instance's image file."""
bm_utils.unlink_without_raise(get_image_file_path(instance))
bm_utils.rmtree_without_raise(get_image_dir_path(instance))
def dhcp_options_for_instance(self, instance):
return [{'opt_name': 'bootfile-name',
'opt_value': CONF.baremetal.pxe_bootfile_name},
{'opt_name': 'server-ip-address',
'opt_value': CONF.my_ip},
{'opt_name': 'tftp-server',
'opt_value': CONF.my_ip}
]
def activate_bootloader(self, context, node, instance, network_info):
"""Configure PXE boot loader for an instance
Kernel and ramdisk images are downloaded by cache_tftp_images,
and stored in /tftpboot/{uuid}/
This method writes the instances config file, and then creates
symlinks for each MAC address in the instance.
By default, the complete layout looks like this:
/tftpboot/
./{uuid}/
kernel
ramdisk
deploy_kernel
deploy_ramdisk
config
./pxelinux.cfg/
{mac} -> ../{uuid}/config
"""
flavor = objects.Flavor.get_by_id(context,
instance['instance_type_id'])
image_info = get_tftp_image_info(instance, flavor)
(root_mb, swap_mb, ephemeral_mb) = get_partition_sizes(instance)
pxe_config_file_path = get_pxe_config_file_path(instance)
image_file_path = get_image_file_path(instance)
deployment_key = bm_utils.random_alnum(32)
deployment_iscsi_iqn = "iqn-%s" % instance['uuid']
db.bm_node_update(context, node['id'],
{'deploy_key': deployment_key,
'image_path': image_file_path,
'pxe_config_path': pxe_config_file_path,
'root_mb': root_mb,
'swap_mb': swap_mb,
'ephemeral_mb': ephemeral_mb})
pxe_config = build_pxe_config(
node['id'],
deployment_key,
deployment_iscsi_iqn,
image_info['deploy_kernel'][1],
image_info['deploy_ramdisk'][1],
image_info['kernel'][1],
image_info['ramdisk'][1],
network_info,
)
bm_utils.write_to_file(pxe_config_file_path, pxe_config)
macs = self._collect_mac_addresses(context, node)
for mac in macs:
mac_path = get_pxe_mac_path(mac)
bm_utils.unlink_without_raise(mac_path)
bm_utils.create_link_without_raise(pxe_config_file_path, mac_path)
def deactivate_bootloader(self, context, node, instance):
"""Delete PXE bootloader images and config."""
try:
db.bm_node_update(context, node['id'],
{'deploy_key': None,
'image_path': None,
'pxe_config_path': None,
'root_mb': 0,
'swap_mb': 0})
except exception.NodeNotFound:
pass
# NOTE(danms): the flavor extra_specs do not need to be
# present/correct at deactivate time, so pass something empty
# to avoid an extra lookup
flavor = dict(extra_specs={
'baremetal:deploy_ramdisk_id': 'ignore',
'baremetal:deploy_kernel_id': 'ignore'})
try:
image_info = get_tftp_image_info(instance, flavor)
except exception.NovaException:
pass
else:
for label in image_info.keys():
(uuid, path) = image_info[label]
bm_utils.unlink_without_raise(path)
bm_utils.unlink_without_raise(get_pxe_config_file_path(instance))
try:
macs = self._collect_mac_addresses(context, node)
except db_exc.DBError:
pass
else:
for mac in macs:
bm_utils.unlink_without_raise(get_pxe_mac_path(mac))
bm_utils.rmtree_without_raise(
os.path.join(CONF.baremetal.tftp_root, instance['uuid']))
def activate_node(self, context, node, instance):
"""Wait for PXE deployment to complete."""
locals = {'error': '', 'started': False}
def _wait_for_deploy():
"""Called at an interval until the deployment completes."""
try:
row = db.bm_node_get(context, node['id'])
if instance['uuid'] != row.get('instance_uuid'):
locals['error'] = _("Node associated with another instance"
" while waiting for deploy of %s")
raise loopingcall.LoopingCallDone()
status = row.get('task_state')
if (status == baremetal_states.DEPLOYING
and locals['started'] is False):
LOG.info(_("PXE deploy started for instance %s")
% instance['uuid'])
locals['started'] = True
elif status in (baremetal_states.DEPLOYDONE,
baremetal_states.ACTIVE):
LOG.info(_("PXE deploy completed for instance %s")
% instance['uuid'])
raise loopingcall.LoopingCallDone()
elif status == baremetal_states.DEPLOYFAIL:
locals['error'] = _("PXE deploy failed for instance %s")
except exception.NodeNotFound:
locals['error'] = _("Baremetal node deleted while waiting "
"for deployment of instance %s")
if (CONF.baremetal.pxe_deploy_timeout and
timeutils.utcnow() > expiration):
locals['error'] = _("Timeout reached while waiting for "
"PXE deploy of instance %s")
if locals['error']:
raise loopingcall.LoopingCallDone()
expiration = timeutils.utcnow() + datetime.timedelta(
seconds=CONF.baremetal.pxe_deploy_timeout)
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_deploy)
timer.start(interval=1).wait()
if locals['error']:
raise exception.InstanceDeployFailure(
locals['error'] % instance['uuid'])
def deactivate_node(self, context, node, instance):
pass
| apache-2.0 |
tiagormk/gem5-hmp | tests/configs/t1000-simple-atomic.py | 4 | 2095 | # Copyright (c) 2007 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Ali Saidi
import m5
from m5.objects import *
m5.util.addToPath('../configs/common')
import FSConfig
system = FSConfig.makeSparcSystem('atomic', SimpleMemory)
system.clk_domain = SrcClockDomain(clock = '1GHz')
system.cpu_clk_domain = SrcClockDomain(clock = '1GHz')
cpu = AtomicSimpleCPU(cpu_id=0, clk_domain = system.cpu_clk_domain)
system.cpu = cpu
# create the interrupt controller
cpu.createInterruptController()
cpu.connectAllPorts(system.membus)
root = Root(full_system=True, system=system)
m5.ticks.setGlobalFrequency('2GHz')
| bsd-3-clause |
evelynmitchell/namebench | nb_third_party/dns/tsigkeyring.py | 248 | 1658 | # Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""A place to store TSIG keys."""
import base64
import dns.name
def from_text(textring):
"""Convert a dictionary containing (textual DNS name, base64 secret) pairs
into a binary keyring which has (dns.name.Name, binary secret) pairs.
@rtype: dict"""
keyring = {}
for keytext in textring:
keyname = dns.name.from_text(keytext)
secret = base64.decodestring(textring[keytext])
keyring[keyname] = secret
return keyring
def to_text(keyring):
"""Convert a dictionary containing (dns.name.Name, binary secret) pairs
into a text keyring which has (textual DNS name, base64 secret) pairs.
@rtype: dict"""
textring = {}
for keyname in keyring:
keytext = dns.name.to_text(keyname)
secret = base64.encodestring(keyring[keyname])
textring[keytext] = secret
return textring
| apache-2.0 |
zubron/servo | components/script/dom/bindings/codegen/ply/ply/lex.py | 344 | 40739 | # -----------------------------------------------------------------------------
# ply: lex.py
#
# Copyright (C) 2001-2009,
# David M. Beazley (Dabeaz LLC)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the David Beazley or Dabeaz LLC may be used to
# endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# -----------------------------------------------------------------------------
__version__ = "3.3"
__tabversion__ = "3.2" # Version of table file used
import re, sys, types, copy, os
# This tuple contains known string types
try:
# Python 2.6
StringTypes = (types.StringType, types.UnicodeType)
except AttributeError:
# Python 3.0
StringTypes = (str, bytes)
# Extract the code attribute of a function. Different implementations
# are for Python 2/3 compatibility.
if sys.version_info[0] < 3:
def func_code(f):
return f.func_code
else:
def func_code(f):
return f.__code__
# This regular expression is used to match valid token names
_is_identifier = re.compile(r'^[a-zA-Z0-9_]+$')
# Exception thrown when invalid token encountered and no default error
# handler is defined.
class LexError(Exception):
def __init__(self,message,s):
self.args = (message,)
self.text = s
# Token class. This class is used to represent the tokens produced.
class LexToken(object):
def __str__(self):
return "LexToken(%s,%r,%d,%d)" % (self.type,self.value,self.lineno,self.lexpos)
def __repr__(self):
return str(self)
# This object is a stand-in for a logging object created by the
# logging module.
class PlyLogger(object):
def __init__(self,f):
self.f = f
def critical(self,msg,*args,**kwargs):
self.f.write((msg % args) + "\n")
def warning(self,msg,*args,**kwargs):
self.f.write("WARNING: "+ (msg % args) + "\n")
def error(self,msg,*args,**kwargs):
self.f.write("ERROR: " + (msg % args) + "\n")
info = critical
debug = critical
# Null logger is used when no output is generated. Does nothing.
class NullLogger(object):
def __getattribute__(self,name):
return self
def __call__(self,*args,**kwargs):
return self
# -----------------------------------------------------------------------------
# === Lexing Engine ===
#
# The following Lexer class implements the lexer runtime. There are only
# a few public methods and attributes:
#
# input() - Store a new string in the lexer
# token() - Get the next token
# clone() - Clone the lexer
#
# lineno - Current line number
# lexpos - Current position in the input string
# -----------------------------------------------------------------------------
class Lexer:
def __init__(self):
self.lexre = None # Master regular expression. This is a list of
# tuples (re,findex) where re is a compiled
# regular expression and findex is a list
# mapping regex group numbers to rules
self.lexretext = None # Current regular expression strings
self.lexstatere = {} # Dictionary mapping lexer states to master regexs
self.lexstateretext = {} # Dictionary mapping lexer states to regex strings
self.lexstaterenames = {} # Dictionary mapping lexer states to symbol names
self.lexstate = "INITIAL" # Current lexer state
self.lexstatestack = [] # Stack of lexer states
self.lexstateinfo = None # State information
self.lexstateignore = {} # Dictionary of ignored characters for each state
self.lexstateerrorf = {} # Dictionary of error functions for each state
self.lexreflags = 0 # Optional re compile flags
self.lexdata = None # Actual input data (as a string)
self.lexpos = 0 # Current position in input text
self.lexlen = 0 # Length of the input text
self.lexerrorf = None # Error rule (if any)
self.lextokens = None # List of valid tokens
self.lexignore = "" # Ignored characters
self.lexliterals = "" # Literal characters that can be passed through
self.lexmodule = None # Module
self.lineno = 1 # Current line number
self.lexoptimize = 0 # Optimized mode
def clone(self,object=None):
c = copy.copy(self)
# If the object parameter has been supplied, it means we are attaching the
# lexer to a new object. In this case, we have to rebind all methods in
# the lexstatere and lexstateerrorf tables.
if object:
newtab = { }
for key, ritem in self.lexstatere.items():
newre = []
for cre, findex in ritem:
newfindex = []
for f in findex:
if not f or not f[0]:
newfindex.append(f)
continue
newfindex.append((getattr(object,f[0].__name__),f[1]))
newre.append((cre,newfindex))
newtab[key] = newre
c.lexstatere = newtab
c.lexstateerrorf = { }
for key, ef in self.lexstateerrorf.items():
c.lexstateerrorf[key] = getattr(object,ef.__name__)
c.lexmodule = object
return c
# ------------------------------------------------------------
# writetab() - Write lexer information to a table file
# ------------------------------------------------------------
def writetab(self,tabfile,outputdir=""):
if isinstance(tabfile,types.ModuleType):
return
basetabfilename = tabfile.split(".")[-1]
filename = os.path.join(outputdir,basetabfilename)+".py"
tf = open(filename,"w")
tf.write("# %s.py. This file automatically created by PLY (version %s). Don't edit!\n" % (tabfile,__version__))
tf.write("_tabversion = %s\n" % repr(__version__))
tf.write("_lextokens = %s\n" % repr(self.lextokens))
tf.write("_lexreflags = %s\n" % repr(self.lexreflags))
tf.write("_lexliterals = %s\n" % repr(self.lexliterals))
tf.write("_lexstateinfo = %s\n" % repr(self.lexstateinfo))
tabre = { }
# Collect all functions in the initial state
initial = self.lexstatere["INITIAL"]
initialfuncs = []
for part in initial:
for f in part[1]:
if f and f[0]:
initialfuncs.append(f)
for key, lre in self.lexstatere.items():
titem = []
for i in range(len(lre)):
titem.append((self.lexstateretext[key][i],_funcs_to_names(lre[i][1],self.lexstaterenames[key][i])))
tabre[key] = titem
tf.write("_lexstatere = %s\n" % repr(tabre))
tf.write("_lexstateignore = %s\n" % repr(self.lexstateignore))
taberr = { }
for key, ef in self.lexstateerrorf.items():
if ef:
taberr[key] = ef.__name__
else:
taberr[key] = None
tf.write("_lexstateerrorf = %s\n" % repr(taberr))
tf.close()
# ------------------------------------------------------------
# readtab() - Read lexer information from a tab file
# ------------------------------------------------------------
def readtab(self,tabfile,fdict):
if isinstance(tabfile,types.ModuleType):
lextab = tabfile
else:
if sys.version_info[0] < 3:
exec("import %s as lextab" % tabfile)
else:
env = { }
exec("import %s as lextab" % tabfile, env,env)
lextab = env['lextab']
if getattr(lextab,"_tabversion","0.0") != __version__:
raise ImportError("Inconsistent PLY version")
self.lextokens = lextab._lextokens
self.lexreflags = lextab._lexreflags
self.lexliterals = lextab._lexliterals
self.lexstateinfo = lextab._lexstateinfo
self.lexstateignore = lextab._lexstateignore
self.lexstatere = { }
self.lexstateretext = { }
for key,lre in lextab._lexstatere.items():
titem = []
txtitem = []
for i in range(len(lre)):
titem.append((re.compile(lre[i][0],lextab._lexreflags | re.VERBOSE),_names_to_funcs(lre[i][1],fdict)))
txtitem.append(lre[i][0])
self.lexstatere[key] = titem
self.lexstateretext[key] = txtitem
self.lexstateerrorf = { }
for key,ef in lextab._lexstateerrorf.items():
self.lexstateerrorf[key] = fdict[ef]
self.begin('INITIAL')
# ------------------------------------------------------------
# input() - Push a new string into the lexer
# ------------------------------------------------------------
def input(self,s):
# Pull off the first character to see if s looks like a string
c = s[:1]
if not isinstance(c,StringTypes):
raise ValueError("Expected a string")
self.lexdata = s
self.lexpos = 0
self.lexlen = len(s)
# ------------------------------------------------------------
# begin() - Changes the lexing state
# ------------------------------------------------------------
def begin(self,state):
if not state in self.lexstatere:
raise ValueError("Undefined state")
self.lexre = self.lexstatere[state]
self.lexretext = self.lexstateretext[state]
self.lexignore = self.lexstateignore.get(state,"")
self.lexerrorf = self.lexstateerrorf.get(state,None)
self.lexstate = state
# ------------------------------------------------------------
# push_state() - Changes the lexing state and saves old on stack
# ------------------------------------------------------------
def push_state(self,state):
self.lexstatestack.append(self.lexstate)
self.begin(state)
# ------------------------------------------------------------
# pop_state() - Restores the previous state
# ------------------------------------------------------------
def pop_state(self):
self.begin(self.lexstatestack.pop())
# ------------------------------------------------------------
# current_state() - Returns the current lexing state
# ------------------------------------------------------------
def current_state(self):
return self.lexstate
# ------------------------------------------------------------
# skip() - Skip ahead n characters
# ------------------------------------------------------------
def skip(self,n):
self.lexpos += n
# ------------------------------------------------------------
# opttoken() - Return the next token from the Lexer
#
# Note: This function has been carefully implemented to be as fast
# as possible. Don't make changes unless you really know what
# you are doing
# ------------------------------------------------------------
def token(self):
# Make local copies of frequently referenced attributes
lexpos = self.lexpos
lexlen = self.lexlen
lexignore = self.lexignore
lexdata = self.lexdata
while lexpos < lexlen:
# This code provides some short-circuit code for whitespace, tabs, and other ignored characters
if lexdata[lexpos] in lexignore:
lexpos += 1
continue
# Look for a regular expression match
for lexre,lexindexfunc in self.lexre:
m = lexre.match(lexdata,lexpos)
if not m: continue
# Create a token for return
tok = LexToken()
tok.value = m.group()
tok.lineno = self.lineno
tok.lexpos = lexpos
i = m.lastindex
func,tok.type = lexindexfunc[i]
if not func:
# If no token type was set, it's an ignored token
if tok.type:
self.lexpos = m.end()
return tok
else:
lexpos = m.end()
break
lexpos = m.end()
# If token is processed by a function, call it
tok.lexer = self # Set additional attributes useful in token rules
self.lexmatch = m
self.lexpos = lexpos
newtok = func(tok)
# Every function must return a token, if nothing, we just move to next token
if not newtok:
lexpos = self.lexpos # This is here in case user has updated lexpos.
lexignore = self.lexignore # This is here in case there was a state change
break
# Verify type of the token. If not in the token map, raise an error
if not self.lexoptimize:
if not newtok.type in self.lextokens:
raise LexError("%s:%d: Rule '%s' returned an unknown token type '%s'" % (
func_code(func).co_filename, func_code(func).co_firstlineno,
func.__name__, newtok.type),lexdata[lexpos:])
return newtok
else:
# No match, see if in literals
if lexdata[lexpos] in self.lexliterals:
tok = LexToken()
tok.value = lexdata[lexpos]
tok.lineno = self.lineno
tok.type = tok.value
tok.lexpos = lexpos
self.lexpos = lexpos + 1
return tok
# No match. Call t_error() if defined.
if self.lexerrorf:
tok = LexToken()
tok.value = self.lexdata[lexpos:]
tok.lineno = self.lineno
tok.type = "error"
tok.lexer = self
tok.lexpos = lexpos
self.lexpos = lexpos
newtok = self.lexerrorf(tok)
if lexpos == self.lexpos:
# Error method didn't change text position at all. This is an error.
raise LexError("Scanning error. Illegal character '%s'" % (lexdata[lexpos]), lexdata[lexpos:])
lexpos = self.lexpos
if not newtok: continue
return newtok
self.lexpos = lexpos
raise LexError("Illegal character '%s' at index %d" % (lexdata[lexpos],lexpos), lexdata[lexpos:])
self.lexpos = lexpos + 1
if self.lexdata is None:
raise RuntimeError("No input string given with input()")
return None
# Iterator interface
def __iter__(self):
return self
def next(self):
t = self.token()
if t is None:
raise StopIteration
return t
__next__ = next
# -----------------------------------------------------------------------------
# ==== Lex Builder ===
#
# The functions and classes below are used to collect lexing information
# and build a Lexer object from it.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# get_caller_module_dict()
#
# This function returns a dictionary containing all of the symbols defined within
# a caller further down the call stack. This is used to get the environment
# associated with the yacc() call if none was provided.
# -----------------------------------------------------------------------------
def get_caller_module_dict(levels):
try:
raise RuntimeError
except RuntimeError:
e,b,t = sys.exc_info()
f = t.tb_frame
while levels > 0:
f = f.f_back
levels -= 1
ldict = f.f_globals.copy()
if f.f_globals != f.f_locals:
ldict.update(f.f_locals)
return ldict
# -----------------------------------------------------------------------------
# _funcs_to_names()
#
# Given a list of regular expression functions, this converts it to a list
# suitable for output to a table file
# -----------------------------------------------------------------------------
def _funcs_to_names(funclist,namelist):
result = []
for f,name in zip(funclist,namelist):
if f and f[0]:
result.append((name, f[1]))
else:
result.append(f)
return result
# -----------------------------------------------------------------------------
# _names_to_funcs()
#
# Given a list of regular expression function names, this converts it back to
# functions.
# -----------------------------------------------------------------------------
def _names_to_funcs(namelist,fdict):
result = []
for n in namelist:
if n and n[0]:
result.append((fdict[n[0]],n[1]))
else:
result.append(n)
return result
# -----------------------------------------------------------------------------
# _form_master_re()
#
# This function takes a list of all of the regex components and attempts to
# form the master regular expression. Given limitations in the Python re
# module, it may be necessary to break the master regex into separate expressions.
# -----------------------------------------------------------------------------
def _form_master_re(relist,reflags,ldict,toknames):
if not relist: return []
regex = "|".join(relist)
try:
lexre = re.compile(regex,re.VERBOSE | reflags)
# Build the index to function map for the matching engine
lexindexfunc = [ None ] * (max(lexre.groupindex.values())+1)
lexindexnames = lexindexfunc[:]
for f,i in lexre.groupindex.items():
handle = ldict.get(f,None)
if type(handle) in (types.FunctionType, types.MethodType):
lexindexfunc[i] = (handle,toknames[f])
lexindexnames[i] = f
elif handle is not None:
lexindexnames[i] = f
if f.find("ignore_") > 0:
lexindexfunc[i] = (None,None)
else:
lexindexfunc[i] = (None, toknames[f])
return [(lexre,lexindexfunc)],[regex],[lexindexnames]
except Exception:
m = int(len(relist)/2)
if m == 0: m = 1
llist, lre, lnames = _form_master_re(relist[:m],reflags,ldict,toknames)
rlist, rre, rnames = _form_master_re(relist[m:],reflags,ldict,toknames)
return llist+rlist, lre+rre, lnames+rnames
# -----------------------------------------------------------------------------
# def _statetoken(s,names)
#
# Given a declaration name s of the form "t_" and a dictionary whose keys are
# state names, this function returns a tuple (states,tokenname) where states
# is a tuple of state names and tokenname is the name of the token. For example,
# calling this with s = "t_foo_bar_SPAM" might return (('foo','bar'),'SPAM')
# -----------------------------------------------------------------------------
def _statetoken(s,names):
nonstate = 1
parts = s.split("_")
for i in range(1,len(parts)):
if not parts[i] in names and parts[i] != 'ANY': break
if i > 1:
states = tuple(parts[1:i])
else:
states = ('INITIAL',)
if 'ANY' in states:
states = tuple(names)
tokenname = "_".join(parts[i:])
return (states,tokenname)
# -----------------------------------------------------------------------------
# LexerReflect()
#
# This class represents information needed to build a lexer as extracted from a
# user's input file.
# -----------------------------------------------------------------------------
class LexerReflect(object):
def __init__(self,ldict,log=None,reflags=0):
self.ldict = ldict
self.error_func = None
self.tokens = []
self.reflags = reflags
self.stateinfo = { 'INITIAL' : 'inclusive'}
self.files = {}
self.error = 0
if log is None:
self.log = PlyLogger(sys.stderr)
else:
self.log = log
# Get all of the basic information
def get_all(self):
self.get_tokens()
self.get_literals()
self.get_states()
self.get_rules()
# Validate all of the information
def validate_all(self):
self.validate_tokens()
self.validate_literals()
self.validate_rules()
return self.error
# Get the tokens map
def get_tokens(self):
tokens = self.ldict.get("tokens",None)
if not tokens:
self.log.error("No token list is defined")
self.error = 1
return
if not isinstance(tokens,(list, tuple)):
self.log.error("tokens must be a list or tuple")
self.error = 1
return
if not tokens:
self.log.error("tokens is empty")
self.error = 1
return
self.tokens = tokens
# Validate the tokens
def validate_tokens(self):
terminals = {}
for n in self.tokens:
if not _is_identifier.match(n):
self.log.error("Bad token name '%s'",n)
self.error = 1
if n in terminals:
self.log.warning("Token '%s' multiply defined", n)
terminals[n] = 1
# Get the literals specifier
def get_literals(self):
self.literals = self.ldict.get("literals","")
# Validate literals
def validate_literals(self):
try:
for c in self.literals:
if not isinstance(c,StringTypes) or len(c) > 1:
self.log.error("Invalid literal %s. Must be a single character", repr(c))
self.error = 1
continue
except TypeError:
self.log.error("Invalid literals specification. literals must be a sequence of characters")
self.error = 1
def get_states(self):
self.states = self.ldict.get("states",None)
# Build statemap
if self.states:
if not isinstance(self.states,(tuple,list)):
self.log.error("states must be defined as a tuple or list")
self.error = 1
else:
for s in self.states:
if not isinstance(s,tuple) or len(s) != 2:
self.log.error("Invalid state specifier %s. Must be a tuple (statename,'exclusive|inclusive')",repr(s))
self.error = 1
continue
name, statetype = s
if not isinstance(name,StringTypes):
self.log.error("State name %s must be a string", repr(name))
self.error = 1
continue
if not (statetype == 'inclusive' or statetype == 'exclusive'):
self.log.error("State type for state %s must be 'inclusive' or 'exclusive'",name)
self.error = 1
continue
if name in self.stateinfo:
self.log.error("State '%s' already defined",name)
self.error = 1
continue
self.stateinfo[name] = statetype
# Get all of the symbols with a t_ prefix and sort them into various
# categories (functions, strings, error functions, and ignore characters)
def get_rules(self):
tsymbols = [f for f in self.ldict if f[:2] == 't_' ]
# Now build up a list of functions and a list of strings
self.toknames = { } # Mapping of symbols to token names
self.funcsym = { } # Symbols defined as functions
self.strsym = { } # Symbols defined as strings
self.ignore = { } # Ignore strings by state
self.errorf = { } # Error functions by state
for s in self.stateinfo:
self.funcsym[s] = []
self.strsym[s] = []
if len(tsymbols) == 0:
self.log.error("No rules of the form t_rulename are defined")
self.error = 1
return
for f in tsymbols:
t = self.ldict[f]
states, tokname = _statetoken(f,self.stateinfo)
self.toknames[f] = tokname
if hasattr(t,"__call__"):
if tokname == 'error':
for s in states:
self.errorf[s] = t
elif tokname == 'ignore':
line = func_code(t).co_firstlineno
file = func_code(t).co_filename
self.log.error("%s:%d: Rule '%s' must be defined as a string",file,line,t.__name__)
self.error = 1
else:
for s in states:
self.funcsym[s].append((f,t))
elif isinstance(t, StringTypes):
if tokname == 'ignore':
for s in states:
self.ignore[s] = t
if "\\" in t:
self.log.warning("%s contains a literal backslash '\\'",f)
elif tokname == 'error':
self.log.error("Rule '%s' must be defined as a function", f)
self.error = 1
else:
for s in states:
self.strsym[s].append((f,t))
else:
self.log.error("%s not defined as a function or string", f)
self.error = 1
# Sort the functions by line number
for f in self.funcsym.values():
if sys.version_info[0] < 3:
f.sort(lambda x,y: cmp(func_code(x[1]).co_firstlineno,func_code(y[1]).co_firstlineno))
else:
# Python 3.0
f.sort(key=lambda x: func_code(x[1]).co_firstlineno)
# Sort the strings by regular expression length
for s in self.strsym.values():
if sys.version_info[0] < 3:
s.sort(lambda x,y: (len(x[1]) < len(y[1])) - (len(x[1]) > len(y[1])))
else:
# Python 3.0
s.sort(key=lambda x: len(x[1]),reverse=True)
# Validate all of the t_rules collected
def validate_rules(self):
for state in self.stateinfo:
# Validate all rules defined by functions
for fname, f in self.funcsym[state]:
line = func_code(f).co_firstlineno
file = func_code(f).co_filename
self.files[file] = 1
tokname = self.toknames[fname]
if isinstance(f, types.MethodType):
reqargs = 2
else:
reqargs = 1
nargs = func_code(f).co_argcount
if nargs > reqargs:
self.log.error("%s:%d: Rule '%s' has too many arguments",file,line,f.__name__)
self.error = 1
continue
if nargs < reqargs:
self.log.error("%s:%d: Rule '%s' requires an argument", file,line,f.__name__)
self.error = 1
continue
if not f.__doc__:
self.log.error("%s:%d: No regular expression defined for rule '%s'",file,line,f.__name__)
self.error = 1
continue
try:
c = re.compile("(?P<%s>%s)" % (fname,f.__doc__), re.VERBOSE | self.reflags)
if c.match(""):
self.log.error("%s:%d: Regular expression for rule '%s' matches empty string", file,line,f.__name__)
self.error = 1
except re.error:
_etype, e, _etrace = sys.exc_info()
self.log.error("%s:%d: Invalid regular expression for rule '%s'. %s", file,line,f.__name__,e)
if '#' in f.__doc__:
self.log.error("%s:%d. Make sure '#' in rule '%s' is escaped with '\\#'",file,line, f.__name__)
self.error = 1
# Validate all rules defined by strings
for name,r in self.strsym[state]:
tokname = self.toknames[name]
if tokname == 'error':
self.log.error("Rule '%s' must be defined as a function", name)
self.error = 1
continue
if not tokname in self.tokens and tokname.find("ignore_") < 0:
self.log.error("Rule '%s' defined for an unspecified token %s",name,tokname)
self.error = 1
continue
try:
c = re.compile("(?P<%s>%s)" % (name,r),re.VERBOSE | self.reflags)
if (c.match("")):
self.log.error("Regular expression for rule '%s' matches empty string",name)
self.error = 1
except re.error:
_etype, e, _etrace = sys.exc_info()
self.log.error("Invalid regular expression for rule '%s'. %s",name,e)
if '#' in r:
self.log.error("Make sure '#' in rule '%s' is escaped with '\\#'",name)
self.error = 1
if not self.funcsym[state] and not self.strsym[state]:
self.log.error("No rules defined for state '%s'",state)
self.error = 1
# Validate the error function
efunc = self.errorf.get(state,None)
if efunc:
f = efunc
line = func_code(f).co_firstlineno
file = func_code(f).co_filename
self.files[file] = 1
if isinstance(f, types.MethodType):
reqargs = 2
else:
reqargs = 1
nargs = func_code(f).co_argcount
if nargs > reqargs:
self.log.error("%s:%d: Rule '%s' has too many arguments",file,line,f.__name__)
self.error = 1
if nargs < reqargs:
self.log.error("%s:%d: Rule '%s' requires an argument", file,line,f.__name__)
self.error = 1
for f in self.files:
self.validate_file(f)
# -----------------------------------------------------------------------------
# validate_file()
#
# This checks to see if there are duplicated t_rulename() functions or strings
# in the parser input file. This is done using a simple regular expression
# match on each line in the given file.
# -----------------------------------------------------------------------------
def validate_file(self,filename):
import os.path
base,ext = os.path.splitext(filename)
if ext != '.py': return # No idea what the file is. Return OK
try:
f = open(filename)
lines = f.readlines()
f.close()
except IOError:
return # Couldn't find the file. Don't worry about it
fre = re.compile(r'\s*def\s+(t_[a-zA-Z_0-9]*)\(')
sre = re.compile(r'\s*(t_[a-zA-Z_0-9]*)\s*=')
counthash = { }
linen = 1
for l in lines:
m = fre.match(l)
if not m:
m = sre.match(l)
if m:
name = m.group(1)
prev = counthash.get(name)
if not prev:
counthash[name] = linen
else:
self.log.error("%s:%d: Rule %s redefined. Previously defined on line %d",filename,linen,name,prev)
self.error = 1
linen += 1
# -----------------------------------------------------------------------------
# lex(module)
#
# Build all of the regular expression rules from definitions in the supplied module
# -----------------------------------------------------------------------------
def lex(module=None,object=None,debug=0,optimize=0,lextab="lextab",reflags=0,nowarn=0,outputdir="", debuglog=None, errorlog=None):
global lexer
ldict = None
stateinfo = { 'INITIAL' : 'inclusive'}
lexobj = Lexer()
lexobj.lexoptimize = optimize
global token,input
if errorlog is None:
errorlog = PlyLogger(sys.stderr)
if debug:
if debuglog is None:
debuglog = PlyLogger(sys.stderr)
# Get the module dictionary used for the lexer
if object: module = object
if module:
_items = [(k,getattr(module,k)) for k in dir(module)]
ldict = dict(_items)
else:
ldict = get_caller_module_dict(2)
# Collect parser information from the dictionary
linfo = LexerReflect(ldict,log=errorlog,reflags=reflags)
linfo.get_all()
if not optimize:
if linfo.validate_all():
raise SyntaxError("Can't build lexer")
if optimize and lextab:
try:
lexobj.readtab(lextab,ldict)
token = lexobj.token
input = lexobj.input
lexer = lexobj
return lexobj
except ImportError:
pass
# Dump some basic debugging information
if debug:
debuglog.info("lex: tokens = %r", linfo.tokens)
debuglog.info("lex: literals = %r", linfo.literals)
debuglog.info("lex: states = %r", linfo.stateinfo)
# Build a dictionary of valid token names
lexobj.lextokens = { }
for n in linfo.tokens:
lexobj.lextokens[n] = 1
# Get literals specification
if isinstance(linfo.literals,(list,tuple)):
lexobj.lexliterals = type(linfo.literals[0])().join(linfo.literals)
else:
lexobj.lexliterals = linfo.literals
# Get the stateinfo dictionary
stateinfo = linfo.stateinfo
regexs = { }
# Build the master regular expressions
for state in stateinfo:
regex_list = []
# Add rules defined by functions first
for fname, f in linfo.funcsym[state]:
line = func_code(f).co_firstlineno
file = func_code(f).co_filename
regex_list.append("(?P<%s>%s)" % (fname,f.__doc__))
if debug:
debuglog.info("lex: Adding rule %s -> '%s' (state '%s')",fname,f.__doc__, state)
# Now add all of the simple rules
for name,r in linfo.strsym[state]:
regex_list.append("(?P<%s>%s)" % (name,r))
if debug:
debuglog.info("lex: Adding rule %s -> '%s' (state '%s')",name,r, state)
regexs[state] = regex_list
# Build the master regular expressions
if debug:
debuglog.info("lex: ==== MASTER REGEXS FOLLOW ====")
for state in regexs:
lexre, re_text, re_names = _form_master_re(regexs[state],reflags,ldict,linfo.toknames)
lexobj.lexstatere[state] = lexre
lexobj.lexstateretext[state] = re_text
lexobj.lexstaterenames[state] = re_names
if debug:
for i in range(len(re_text)):
debuglog.info("lex: state '%s' : regex[%d] = '%s'",state, i, re_text[i])
# For inclusive states, we need to add the regular expressions from the INITIAL state
for state,stype in stateinfo.items():
if state != "INITIAL" and stype == 'inclusive':
lexobj.lexstatere[state].extend(lexobj.lexstatere['INITIAL'])
lexobj.lexstateretext[state].extend(lexobj.lexstateretext['INITIAL'])
lexobj.lexstaterenames[state].extend(lexobj.lexstaterenames['INITIAL'])
lexobj.lexstateinfo = stateinfo
lexobj.lexre = lexobj.lexstatere["INITIAL"]
lexobj.lexretext = lexobj.lexstateretext["INITIAL"]
lexobj.lexreflags = reflags
# Set up ignore variables
lexobj.lexstateignore = linfo.ignore
lexobj.lexignore = lexobj.lexstateignore.get("INITIAL","")
# Set up error functions
lexobj.lexstateerrorf = linfo.errorf
lexobj.lexerrorf = linfo.errorf.get("INITIAL",None)
if not lexobj.lexerrorf:
errorlog.warning("No t_error rule is defined")
# Check state information for ignore and error rules
for s,stype in stateinfo.items():
if stype == 'exclusive':
if not s in linfo.errorf:
errorlog.warning("No error rule is defined for exclusive state '%s'", s)
if not s in linfo.ignore and lexobj.lexignore:
errorlog.warning("No ignore rule is defined for exclusive state '%s'", s)
elif stype == 'inclusive':
if not s in linfo.errorf:
linfo.errorf[s] = linfo.errorf.get("INITIAL",None)
if not s in linfo.ignore:
linfo.ignore[s] = linfo.ignore.get("INITIAL","")
# Create global versions of the token() and input() functions
token = lexobj.token
input = lexobj.input
lexer = lexobj
# If in optimize mode, we write the lextab
if lextab and optimize:
lexobj.writetab(lextab,outputdir)
return lexobj
# -----------------------------------------------------------------------------
# runmain()
#
# This runs the lexer as a main program
# -----------------------------------------------------------------------------
def runmain(lexer=None,data=None):
if not data:
try:
filename = sys.argv[1]
f = open(filename)
data = f.read()
f.close()
except IndexError:
sys.stdout.write("Reading from standard input (type EOF to end):\n")
data = sys.stdin.read()
if lexer:
_input = lexer.input
else:
_input = input
_input(data)
if lexer:
_token = lexer.token
else:
_token = token
while 1:
tok = _token()
if not tok: break
sys.stdout.write("(%s,%r,%d,%d)\n" % (tok.type, tok.value, tok.lineno,tok.lexpos))
# -----------------------------------------------------------------------------
# @TOKEN(regex)
#
# This decorator function can be used to set the regex expression on a function
# when its docstring might need to be set in an alternative way
# -----------------------------------------------------------------------------
def TOKEN(r):
def set_doc(f):
if hasattr(r,"__call__"):
f.__doc__ = r.__doc__
else:
f.__doc__ = r
return f
return set_doc
# Alternative spelling of the TOKEN decorator
Token = TOKEN
| mpl-2.0 |
berteh/ScribusGenerator | ScribusGeneratorCLI.py | 1 | 9360 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Mail-Merge for Scribus. This file provides the Command Line Interface (all OS)
#
# For further information (manual, description, etc.) please visit:
# https://github.com/berteh/ScribusGenerator/
#
# v2.0 (2015-12-02): added features (merge, range, clean, save/load)
# v1.9 (2015-08-03): initial command-line support (SLA only, use GUI version to generate PDF)
#
"""
The MIT License
Copyright (c) 2014 Berteh (https://github.com/berteh/)
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import argparse
import sys
import os
import traceback
import ScribusGeneratorBackend
from ScribusGeneratorBackend import CONST, ScribusGenerator, GeneratorDataObject
# defaults
outDir = os.getcwd()
# parse options
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter,
description=''' Generate Scribus (SLA) documents automatically from external (csv) data.
Mail-Merge-like extension to Scribus.''',
usage="%(prog)s [options] infiles+",
epilog='''requirements
This program requires Python 2.7+
examples:
%(prog)s my-template.sla
generates Scribus (SLA) files for each line of 'my-template.csv'
by subsituting the provides values into 'my-template.sla' to the
current directory.
%(prog)s --outDir "/home/user/tmp" example/Business_Card.sla
generates Scribus files for each line of example/Business_Card.csv
in the "/home/user/tmp" subdirectory.
%(prog)s --outName "card_%%VAR_email%%" */*.sla
generates Scribus files for each sla file in any subdirectory
that has a csv file with a similar name in the same directory.
Generated files will have a name constructed from the "email" field
data, and are stored in their respective sla file directory.
%(prog)s --single -c translations.csv -n doc_ lang/*.sla
generates a single Scribus file for each sla file in the lang/ subdirectory
using all rows of the translations.csv data file.
Generated files will have a name constructed from the "doc_" prefix
and the input sla file name.
more information: https://github.com/berteh/ScribusGenerator/
''')
parser.add_argument('infiles', nargs='+',
help='SLA file(s) to use as template(s) for the generation, wildcards are supported')
parser.add_argument('-c', '--csvFile', default=None,
help='CSV file containing the data to substitute in each template during generation. Default is scribus source file(s) name with "csv" extension instead of "sla". If csv file is not found, generation from this particular template is skipped.')
parser.add_argument('-d', '--csvDelimiter', default=CONST.CSV_SEP,
help='CSV field delimiter character. Default is comma: ","')
# parser.add_argument('-f', '--fast', '--noPdf', action='store_true', default=False, # commented utile Scribus allows pdf generation from command line
# help='no PDF generation, scribus SLA only (much faster)')
parser.add_argument('-n', '--outName', default=CONST.EMPTY,
help='name of the generated files, with no extension. Default is a simple incremental index. Using SG variables is allowed to define the name of generated documents.')
parser.add_argument('-o', '--outDir', default=None,
help='directory were generated files are stored. Default is the directory of the scribus source file. outputDir will be created if it does not exist.')
# parser.add_argument('-p', '--pdfOnly', '--noSla', action='store_true', default=False, # for pdf from CLI
# help='discard Scribus SLA, generate PDF only. This option is not used when --fast or --noPdf is used.')
parser.add_argument('-m', '--merge', '--single', action='store_true', default=False,
help='generate a single output (SLA) file that combines all data rows, for each source file.')
parser.add_argument('-from', '--firstrow', default=CONST.EMPTY, dest='firstRow',
help='Starting row of data to merge (not counting the header row), first row by default.')
parser.add_argument('-to', '--lastrow', default=CONST.EMPTY, dest='lastRow',
help='Last row of data to merge (not counting the header row), last row by default.')
parser.add_argument('-s', '--save', action='store_true', default=False,
help='Save current generator settings in (each) Scribus input file(s).')
parser.add_argument('-l', '--load', action='store_true', default=False,
help='Load generator settings from (each) Scribus input file(s). Overloads all options (but -h).')
def ife(test, if_result, else_result):
""" Utility if-then-else syntactic sugar
"""
if(test):
return if_result
return else_result
# handle arguments
args = parser.parse_args()
# if(args.pdfOnly or (not args.fast)): # for pdf from CLI
# print("\nPDF generation is currently not available from command line, but SLA is. \nSimply add the '--noPdf' option to your command and it will run just fine.\n")
# sys.exit()
# create outDir if needed
if ((not(args.outDir is None)) and (not os.path.exists(args.outDir))):
#print('creating output directory: '+args.outDir)
os.makedirs(args.outDir)
# generate
# Collect the settings made and build the Data Object
dataObject = GeneratorDataObject(
dataSourceFile=ife(not(args.csvFile is None), args.csvFile, CONST.EMPTY),
outputDirectory=ife(not(args.outDir is None), args.outDir, CONST.EMPTY),
outputFileName=args.outName, # is CONST.EMPTY by default
# ife(args.fast, CONST.FORMAT_SLA, CONST.FORMAT_PDF),
outputFormat=CONST.FORMAT_SLA,
# ife(args.pdfOnly, CONST.FALSE, CONST.TRUE), # not used if outputFormat is sla.
keepGeneratedScribusFiles=CONST.TRUE,
csvSeparator=args.csvDelimiter, # is CONST.CSV_SEP by default
singleOutput=args.merge,
firstRow=args.firstRow,
lastRow=args.lastRow,
saveSettings=args.save)
generator = ScribusGenerator(dataObject)
log = generator.getLog()
log.debug("ScribusGenerator is starting generation for %s template(s)." %
(str(len(args.infiles))))
for infile in args.infiles:
dataObject.setScribusSourceFile(infile)
if(args.load):
saved = generator.getSavedSettings()
if (saved):
dataObject.loadFromString(saved)
log.info("settings loaded from %s:" % (os.path.split(infile)[1]))
else:
log.warning("could not load settings from %s. using arguments and defaults instead" % (
os.path.split(infile)[1]))
if(dataObject.getDataSourceFile() is CONST.EMPTY): # default data file is template-sla+csv
dataObject.setDataSourceFile(os.path.splitext(infile)[0]+".csv")
if not(os.path.exists(dataObject.getDataSourceFile()) and os.path.isfile(dataObject.getDataSourceFile())):
log.warning("found no data file for %s. skipped. was looking for %s" % (
os.path.split(infile)[1], dataObject.getDataSourceFile()))
continue # skip current template for lack of matching data.
if(dataObject.getOutputDirectory() is CONST.EMPTY): # default outDir is template dir
dataObject.setOutputDirectory(os.path.split(infile)[0])
if not os.path.exists(dataObject.getOutputDirectory()):
log.info("creating output directory: %s" %
(dataObject.getOutputDirectory()))
os.makedirs(dataObject.getOutputDirectory())
if(dataObject.getSingleOutput() and (len(args.infiles) > 1)):
dataObject.setOutputFileName(
args.outName+'__'+os.path.split(infile)[1])
log.info("Generating all files for %s in directory %s" %
(os.path.split(infile)[1], dataObject.getOutputDirectory()))
try:
generator.run()
log.info("Scribus Generation completed. Congrats!")
except ValueError as e:
log.error("\nerror: could likely not replace a variable with its value.\nplease check your CSV data and CSV separator. moreover: "+e.message+"\n")
except IndexError as e:
log.error("\nerror: could likely not find the value for one variable.\nplease check your CSV data and CSV separator.\n moreover: "+e.message+"\n")
except Exception:
log.error("\nerror: "+traceback.format_exc())
| mit |
ntt-pf-lab/openstack-dashboard | django-openstack/django_openstack/utils.py | 11 | 1418 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2011 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
def time():
'''Overrideable version of datetime.datetime.today'''
if time.override_time:
return time.override_time
return datetime.time()
time.override_time = None
def today():
'''Overridable version of datetime.datetime.today'''
if today.override_time:
return today.override_time
return datetime.datetime.today()
today.override_time = None
def utcnow():
'''Overridable version of datetime.datetime.utcnow'''
if utcnow.override_time:
return utcnow.override_time
return datetime.datetime.utcnow()
utcnow.override_time = None
| apache-2.0 |
schleichdi2/OpenNfr_E2_Gui-6.0 | lib/python/Plugins/Extensions/MediaPortal/additions/porn/hdporn.py | 1 | 4569 | # -*- coding: utf-8 -*-
from Plugins.Extensions.MediaPortal.plugin import _
from Plugins.Extensions.MediaPortal.resources.imports import *
class hdpornGenreScreen(MPScreen):
def __init__(self, session):
MPScreen.__init__(self, session, skin='MP_Plugin')
self["actions"] = ActionMap(["MP_Actions"], {
"ok" : self.keyOK,
"0" : self.closeAll,
"cancel" : self.keyCancel,
"up" : self.keyUp,
"down" : self.keyDown,
"right" : self.keyRight,
"left" : self.keyLeft
}, -1)
self['title'] = Label("HDPorn.net")
self['ContentTitle'] = Label("Genre:")
self.keyLocked = True
self.suchString = ''
self.genreliste = []
self.ml = MenuList([], enableWrapAround=True, content=eListboxPythonMultiContent)
self['liste'] = self.ml
self.onLayoutFinish.append(self.layoutFinished)
def layoutFinished(self):
self.keyLocked = True
url = "http://www.hdporn.net/channels/"
getPage(url).addCallback(self.genreData).addErrback(self.dataError)
def genreData(self, data):
Cats = re.findall('class="content">.*?href="(.*?)".*?src="(.*?)".*?alt="(.*?)"', data, re.S)
if Cats:
for (Url, Image, Title) in Cats:
self.genreliste.append((Title, Url, Image))
self.genreliste.sort()
self.genreliste.insert(0, ("Top Rated", "/top-rated/", None))
self.genreliste.insert(0, ("Most Popular", "/most-viewed/", None))
self.genreliste.insert(0, ("Newest", "/", None))
self.ml.setList(map(self._defaultlistcenter, self.genreliste))
self.ml.moveToIndex(0)
self.keyLocked = False
self.showInfos()
def showInfos(self):
Image = self['liste'].getCurrent()[0][2]
CoverHelper(self['coverArt']).getCover(Image)
def keyOK(self):
if self.keyLocked:
return
Name = self['liste'].getCurrent()[0][0]
Link = self['liste'].getCurrent()[0][1]
self.session.open(hdpornFilmScreen, Link, Name)
class hdpornFilmScreen(MPScreen, ThumbsHelper):
def __init__(self, session, Link, Name):
self.Link = Link
self.Name = Name
MPScreen.__init__(self, session, skin='MP_PluginDescr')
ThumbsHelper.__init__(self)
self["actions"] = ActionMap(["MP_Actions"], {
"ok" : self.keyOK,
"0" : self.closeAll,
"cancel" : self.keyCancel,
"5" : self.keyShowThumb,
"up" : self.keyUp,
"down" : self.keyDown,
"right" : self.keyRight,
"left" : self.keyLeft,
"nextBouquet" : self.keyPageUp,
"prevBouquet" : self.keyPageDown,
"green" : self.keyPageNumber
}, -1)
self['title'] = Label("HDPorn.net")
self['ContentTitle'] = Label("Genre: %s" % self.Name)
self['F2'] = Label(_("Page"))
self['Page'] = Label(_("Page:"))
self.keyLocked = True
self.page = 1
self.lastpage = 1
self.filmliste = []
self.ml = MenuList([], enableWrapAround=True, content=eListboxPythonMultiContent)
self['liste'] = self.ml
self.onLayoutFinish.append(self.loadPage)
def loadPage(self):
self.keyLocked = True
self['name'].setText(_('Please wait...'))
self.filmliste = []
url = "http://www.hdporn.net%s/page%s.html" % (self.Link, str(self.page))
getPage(url).addCallback(self.loadData).addErrback(self.dataError)
def loadData(self, data):
self.getLastPage(data, 'id="pagination">(.*?)</div>')
Movies = re.findall('class="content.*?<a\shref="(.*?)"\stitle="(.*?)".*?class="img-responsive.*?src="(.*?)".*?TIME:\s\s(.*?)</div>', data, re.S)
if Movies:
for (Url, Title, Image, Runtime) in Movies:
Url = "http://www.hdporn.net%s" % Url
self.filmliste.append((decodeHtml(Title), Url, Image, Runtime))
if len(self.filmliste) == 0:
self.filmliste.append((_("No videos found!"), "", "", ""))
self.ml.setList(map(self._defaultlistleft, self.filmliste))
self.ml.moveToIndex(0)
self.keyLocked = False
self.th_ThumbsQuery(self.filmliste, 0, 1, 2, None, None, self.page, self.lastpage, mode=1)
self.showInfos()
def showInfos(self):
title = self['liste'].getCurrent()[0][0]
pic = self['liste'].getCurrent()[0][2]
runtime = self['liste'].getCurrent()[0][3]
self['name'].setText(title)
self['handlung'].setText("Runtime: %s" % (runtime))
CoverHelper(self['coverArt']).getCover(pic)
def keyOK(self):
if self.keyLocked:
return
Link = self['liste'].getCurrent()[0][1]
self.keyLocked = True
getPage(Link).addCallback(self.getVideoPage).addErrback(self.dataError)
def getVideoPage(self, data):
videoPage = re.findall('<source\ssrc="(.*?)"', data, re.S)
if videoPage:
for url in videoPage:
self.keyLocked = False
Title = self['liste'].getCurrent()[0][0]
self.session.open(SimplePlayer, [(Title, url)], showPlaylist=False, ltype='hdporn') | gpl-2.0 |
Morgan-Stanley/treadmill | lib/python/treadmill/api/server.py | 2 | 2469 | """Implementation of server API.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from treadmill import context
from treadmill import schema
class API:
"""Treadmill Server REST api."""
def __init__(self):
def _admin_svr():
"""Lazily return admin object."""
return context.GLOBAL.admin.server()
@schema.schema(
cell={'anyOf': [
{'type': 'null'},
{'$ref': 'server.json#/resource/properties/cell'}
]},
partition={'anyOf': [
{'type': 'null'},
{'$ref': 'server.json#/resource/properties/partition'}
]}
)
def _list(cell=None, partition=None):
"""List servers by cell and/or features."""
filter_ = {}
if cell:
filter_['cell'] = cell
result = _admin_svr().list(filter_)
if partition:
result = [x for x in result if
(x['partition'] == partition)]
return result
@schema.schema({'$ref': 'server.json#/resource_id'})
def get(rsrc_id):
"""Get server configuration."""
result = _admin_svr().get(rsrc_id)
result['_id'] = rsrc_id
return result
@schema.schema({'$ref': 'server.json#/resource_id'},
{'allOf': [{'$ref': 'server.json#/resource'},
{'$ref': 'server.json#/verbs/create'}]})
def create(rsrc_id, rsrc):
"""Create server."""
_admin_svr().create(rsrc_id, rsrc)
return _admin_svr().get(rsrc_id, dirty=True)
@schema.schema({'$ref': 'server.json#/resource_id'},
{'allOf': [{'$ref': 'server.json#/resource'},
{'$ref': 'server.json#/verbs/update'}]})
def update(rsrc_id, rsrc):
"""Update server."""
_admin_svr().update(rsrc_id, rsrc)
return _admin_svr().get(rsrc_id, dirty=True)
@schema.schema({'$ref': 'server.json#/resource_id'})
def delete(rsrc_id):
"""Delete server."""
_admin_svr().delete(rsrc_id)
self.list = _list
self.get = get
self.create = create
self.update = update
self.delete = delete
| apache-2.0 |
asnorkin/sentiment_analysis | site/lib/python2.7/site-packages/numpy/core/tests/test_datetime.py | 12 | 91070 | from __future__ import division, absolute_import, print_function
import pickle
import numpy
import numpy as np
import datetime
from numpy.compat import asbytes
from numpy.testing import (
TestCase, run_module_suite, assert_, assert_equal, assert_raises,
assert_warns, dec, suppress_warnings
)
# Use pytz to test out various time zones if available
try:
from pytz import timezone as tz
_has_pytz = True
except ImportError:
_has_pytz = False
class TestDateTime(TestCase):
def test_datetime_dtype_creation(self):
for unit in ['Y', 'M', 'W', 'D',
'h', 'm', 's', 'ms', 'us',
'ns', 'ps', 'fs', 'as']:
dt1 = np.dtype('M8[750%s]' % unit)
assert_(dt1 == np.dtype('datetime64[750%s]' % unit))
dt2 = np.dtype('m8[%s]' % unit)
assert_(dt2 == np.dtype('timedelta64[%s]' % unit))
# Generic units shouldn't add [] to the end
assert_equal(str(np.dtype("M8")), "datetime64")
# Should be possible to specify the endianness
assert_equal(np.dtype("=M8"), np.dtype("M8"))
assert_equal(np.dtype("=M8[s]"), np.dtype("M8[s]"))
assert_(np.dtype(">M8") == np.dtype("M8") or
np.dtype("<M8") == np.dtype("M8"))
assert_(np.dtype(">M8[D]") == np.dtype("M8[D]") or
np.dtype("<M8[D]") == np.dtype("M8[D]"))
assert_(np.dtype(">M8") != np.dtype("<M8"))
assert_equal(np.dtype("=m8"), np.dtype("m8"))
assert_equal(np.dtype("=m8[s]"), np.dtype("m8[s]"))
assert_(np.dtype(">m8") == np.dtype("m8") or
np.dtype("<m8") == np.dtype("m8"))
assert_(np.dtype(">m8[D]") == np.dtype("m8[D]") or
np.dtype("<m8[D]") == np.dtype("m8[D]"))
assert_(np.dtype(">m8") != np.dtype("<m8"))
# Check that the parser rejects bad datetime types
assert_raises(TypeError, np.dtype, 'M8[badunit]')
assert_raises(TypeError, np.dtype, 'm8[badunit]')
assert_raises(TypeError, np.dtype, 'M8[YY]')
assert_raises(TypeError, np.dtype, 'm8[YY]')
assert_raises(TypeError, np.dtype, 'm4')
assert_raises(TypeError, np.dtype, 'M7')
assert_raises(TypeError, np.dtype, 'm7')
assert_raises(TypeError, np.dtype, 'M16')
assert_raises(TypeError, np.dtype, 'm16')
def test_datetime_casting_rules(self):
# Cannot cast safely/same_kind between timedelta and datetime
assert_(not np.can_cast('m8', 'M8', casting='same_kind'))
assert_(not np.can_cast('M8', 'm8', casting='same_kind'))
assert_(not np.can_cast('m8', 'M8', casting='safe'))
assert_(not np.can_cast('M8', 'm8', casting='safe'))
# Can cast safely/same_kind from integer to timedelta
assert_(np.can_cast('i8', 'm8', casting='same_kind'))
assert_(np.can_cast('i8', 'm8', casting='safe'))
# Cannot cast safely/same_kind from float to timedelta
assert_(not np.can_cast('f4', 'm8', casting='same_kind'))
assert_(not np.can_cast('f4', 'm8', casting='safe'))
# Cannot cast safely/same_kind from integer to datetime
assert_(not np.can_cast('i8', 'M8', casting='same_kind'))
assert_(not np.can_cast('i8', 'M8', casting='safe'))
# Cannot cast safely/same_kind from bool to datetime
assert_(not np.can_cast('b1', 'M8', casting='same_kind'))
assert_(not np.can_cast('b1', 'M8', casting='safe'))
# Can cast safely/same_kind from bool to timedelta
assert_(np.can_cast('b1', 'm8', casting='same_kind'))
assert_(np.can_cast('b1', 'm8', casting='safe'))
# Can cast datetime safely from months/years to days
assert_(np.can_cast('M8[M]', 'M8[D]', casting='safe'))
assert_(np.can_cast('M8[Y]', 'M8[D]', casting='safe'))
# Cannot cast timedelta safely from months/years to days
assert_(not np.can_cast('m8[M]', 'm8[D]', casting='safe'))
assert_(not np.can_cast('m8[Y]', 'm8[D]', casting='safe'))
# Can cast datetime same_kind from months/years to days
assert_(np.can_cast('M8[M]', 'M8[D]', casting='same_kind'))
assert_(np.can_cast('M8[Y]', 'M8[D]', casting='same_kind'))
# Can't cast timedelta same_kind from months/years to days
assert_(not np.can_cast('m8[M]', 'm8[D]', casting='same_kind'))
assert_(not np.can_cast('m8[Y]', 'm8[D]', casting='same_kind'))
# Can cast datetime same_kind across the date/time boundary
assert_(np.can_cast('M8[D]', 'M8[h]', casting='same_kind'))
# Can cast timedelta same_kind across the date/time boundary
assert_(np.can_cast('m8[D]', 'm8[h]', casting='same_kind'))
assert_(np.can_cast('m8[h]', 'm8[D]', casting='same_kind'))
# Cannot cast safely if the integer multiplier doesn't divide
assert_(not np.can_cast('M8[7h]', 'M8[3h]', casting='safe'))
assert_(not np.can_cast('M8[3h]', 'M8[6h]', casting='safe'))
# But can cast same_kind
assert_(np.can_cast('M8[7h]', 'M8[3h]', casting='same_kind'))
# Can cast safely if the integer multiplier does divide
assert_(np.can_cast('M8[6h]', 'M8[3h]', casting='safe'))
# We can always cast types with generic units (corresponding to NaT) to
# more specific types
assert_(np.can_cast('m8', 'm8[h]', casting='same_kind'))
assert_(np.can_cast('m8', 'm8[h]', casting='safe'))
assert_(np.can_cast('M8', 'M8[h]', casting='same_kind'))
assert_(np.can_cast('M8', 'M8[h]', casting='safe'))
# but not the other way around
assert_(not np.can_cast('m8[h]', 'm8', casting='same_kind'))
assert_(not np.can_cast('m8[h]', 'm8', casting='safe'))
assert_(not np.can_cast('M8[h]', 'M8', casting='same_kind'))
assert_(not np.can_cast('M8[h]', 'M8', casting='safe'))
def test_compare_generic_nat(self):
# regression tests for GH6452
assert_equal(np.datetime64('NaT'),
np.datetime64('2000') + np.timedelta64('NaT'))
# nb. we may want to make NaT != NaT true in the future
with suppress_warnings() as sup:
sup.filter(FutureWarning, ".*NAT ==")
assert_(np.datetime64('NaT') == np.datetime64('NaT', 'us'))
assert_(np.datetime64('NaT', 'us') == np.datetime64('NaT'))
def test_datetime_scalar_construction(self):
# Construct with different units
assert_equal(np.datetime64('1950-03-12', 'D'),
np.datetime64('1950-03-12'))
assert_equal(np.datetime64('1950-03-12T13', 's'),
np.datetime64('1950-03-12T13', 'm'))
# Default construction means NaT
assert_equal(np.datetime64(), np.datetime64('NaT'))
# Some basic strings and repr
assert_equal(str(np.datetime64('NaT')), 'NaT')
assert_equal(repr(np.datetime64('NaT')),
"numpy.datetime64('NaT')")
assert_equal(str(np.datetime64('2011-02')), '2011-02')
assert_equal(repr(np.datetime64('2011-02')),
"numpy.datetime64('2011-02')")
# None gets constructed as NaT
assert_equal(np.datetime64(None), np.datetime64('NaT'))
# Default construction of NaT is in generic units
assert_equal(np.datetime64().dtype, np.dtype('M8'))
assert_equal(np.datetime64('NaT').dtype, np.dtype('M8'))
# Construction from integers requires a specified unit
assert_raises(ValueError, np.datetime64, 17)
# When constructing from a scalar or zero-dimensional array,
# it either keeps the units or you can override them.
a = np.datetime64('2000-03-18T16', 'h')
b = np.array('2000-03-18T16', dtype='M8[h]')
assert_equal(a.dtype, np.dtype('M8[h]'))
assert_equal(b.dtype, np.dtype('M8[h]'))
assert_equal(np.datetime64(a), a)
assert_equal(np.datetime64(a).dtype, np.dtype('M8[h]'))
assert_equal(np.datetime64(b), a)
assert_equal(np.datetime64(b).dtype, np.dtype('M8[h]'))
assert_equal(np.datetime64(a, 's'), a)
assert_equal(np.datetime64(a, 's').dtype, np.dtype('M8[s]'))
assert_equal(np.datetime64(b, 's'), a)
assert_equal(np.datetime64(b, 's').dtype, np.dtype('M8[s]'))
# Construction from datetime.date
assert_equal(np.datetime64('1945-03-25'),
np.datetime64(datetime.date(1945, 3, 25)))
assert_equal(np.datetime64('2045-03-25', 'D'),
np.datetime64(datetime.date(2045, 3, 25), 'D'))
# Construction from datetime.datetime
assert_equal(np.datetime64('1980-01-25T14:36:22.5'),
np.datetime64(datetime.datetime(1980, 1, 25,
14, 36, 22, 500000)))
# Construction with time units from a date is okay
assert_equal(np.datetime64('1920-03-13', 'h'),
np.datetime64('1920-03-13T00'))
assert_equal(np.datetime64('1920-03', 'm'),
np.datetime64('1920-03-01T00:00'))
assert_equal(np.datetime64('1920', 's'),
np.datetime64('1920-01-01T00:00:00'))
assert_equal(np.datetime64(datetime.date(2045, 3, 25), 'ms'),
np.datetime64('2045-03-25T00:00:00.000'))
# Construction with date units from a datetime is also okay
assert_equal(np.datetime64('1920-03-13T18', 'D'),
np.datetime64('1920-03-13'))
assert_equal(np.datetime64('1920-03-13T18:33:12', 'M'),
np.datetime64('1920-03'))
assert_equal(np.datetime64('1920-03-13T18:33:12.5', 'Y'),
np.datetime64('1920'))
def test_datetime_scalar_construction_timezone(self):
# verify that supplying an explicit timezone works, but is deprecated
with assert_warns(DeprecationWarning):
assert_equal(np.datetime64('2000-01-01T00Z'),
np.datetime64('2000-01-01T00'))
with assert_warns(DeprecationWarning):
assert_equal(np.datetime64('2000-01-01T00-08'),
np.datetime64('2000-01-01T08'))
def test_datetime_array_find_type(self):
dt = np.datetime64('1970-01-01', 'M')
arr = np.array([dt])
assert_equal(arr.dtype, np.dtype('M8[M]'))
# at the moment, we don't automatically convert these to datetime64
dt = datetime.date(1970, 1, 1)
arr = np.array([dt])
assert_equal(arr.dtype, np.dtype('O'))
dt = datetime.datetime(1970, 1, 1, 12, 30, 40)
arr = np.array([dt])
assert_equal(arr.dtype, np.dtype('O'))
# find "supertype" for non-dates and dates
b = np.bool_(True)
dt = np.datetime64('1970-01-01', 'M')
arr = np.array([b, dt])
assert_equal(arr.dtype, np.dtype('O'))
dt = datetime.date(1970, 1, 1)
arr = np.array([b, dt])
assert_equal(arr.dtype, np.dtype('O'))
dt = datetime.datetime(1970, 1, 1, 12, 30, 40)
arr = np.array([b, dt])
assert_equal(arr.dtype, np.dtype('O'))
def test_timedelta_scalar_construction(self):
# Construct with different units
assert_equal(np.timedelta64(7, 'D'),
np.timedelta64(1, 'W'))
assert_equal(np.timedelta64(120, 's'),
np.timedelta64(2, 'm'))
# Default construction means 0
assert_equal(np.timedelta64(), np.timedelta64(0))
# None gets constructed as NaT
assert_equal(np.timedelta64(None), np.timedelta64('NaT'))
# Some basic strings and repr
assert_equal(str(np.timedelta64('NaT')), 'NaT')
assert_equal(repr(np.timedelta64('NaT')),
"numpy.timedelta64('NaT')")
assert_equal(str(np.timedelta64(3, 's')), '3 seconds')
assert_equal(repr(np.timedelta64(-3, 's')),
"numpy.timedelta64(-3,'s')")
assert_equal(repr(np.timedelta64(12)),
"numpy.timedelta64(12)")
# Construction from an integer produces generic units
assert_equal(np.timedelta64(12).dtype, np.dtype('m8'))
# When constructing from a scalar or zero-dimensional array,
# it either keeps the units or you can override them.
a = np.timedelta64(2, 'h')
b = np.array(2, dtype='m8[h]')
assert_equal(a.dtype, np.dtype('m8[h]'))
assert_equal(b.dtype, np.dtype('m8[h]'))
assert_equal(np.timedelta64(a), a)
assert_equal(np.timedelta64(a).dtype, np.dtype('m8[h]'))
assert_equal(np.timedelta64(b), a)
assert_equal(np.timedelta64(b).dtype, np.dtype('m8[h]'))
assert_equal(np.timedelta64(a, 's'), a)
assert_equal(np.timedelta64(a, 's').dtype, np.dtype('m8[s]'))
assert_equal(np.timedelta64(b, 's'), a)
assert_equal(np.timedelta64(b, 's').dtype, np.dtype('m8[s]'))
# Construction from datetime.timedelta
assert_equal(np.timedelta64(5, 'D'),
np.timedelta64(datetime.timedelta(days=5)))
assert_equal(np.timedelta64(102347621, 's'),
np.timedelta64(datetime.timedelta(seconds=102347621)))
assert_equal(np.timedelta64(-10234760000, 'us'),
np.timedelta64(datetime.timedelta(
microseconds=-10234760000)))
assert_equal(np.timedelta64(10234760000, 'us'),
np.timedelta64(datetime.timedelta(
microseconds=10234760000)))
assert_equal(np.timedelta64(1023476, 'ms'),
np.timedelta64(datetime.timedelta(milliseconds=1023476)))
assert_equal(np.timedelta64(10, 'm'),
np.timedelta64(datetime.timedelta(minutes=10)))
assert_equal(np.timedelta64(281, 'h'),
np.timedelta64(datetime.timedelta(hours=281)))
assert_equal(np.timedelta64(28, 'W'),
np.timedelta64(datetime.timedelta(weeks=28)))
# Cannot construct across nonlinear time unit boundaries
a = np.timedelta64(3, 's')
assert_raises(TypeError, np.timedelta64, a, 'M')
assert_raises(TypeError, np.timedelta64, a, 'Y')
a = np.timedelta64(6, 'M')
assert_raises(TypeError, np.timedelta64, a, 'D')
assert_raises(TypeError, np.timedelta64, a, 'h')
a = np.timedelta64(1, 'Y')
assert_raises(TypeError, np.timedelta64, a, 'D')
assert_raises(TypeError, np.timedelta64, a, 'm')
def test_timedelta_scalar_construction_units(self):
# String construction detecting units
assert_equal(np.datetime64('2010').dtype,
np.dtype('M8[Y]'))
assert_equal(np.datetime64('2010-03').dtype,
np.dtype('M8[M]'))
assert_equal(np.datetime64('2010-03-12').dtype,
np.dtype('M8[D]'))
assert_equal(np.datetime64('2010-03-12T17').dtype,
np.dtype('M8[h]'))
assert_equal(np.datetime64('2010-03-12T17:15').dtype,
np.dtype('M8[m]'))
assert_equal(np.datetime64('2010-03-12T17:15:08').dtype,
np.dtype('M8[s]'))
assert_equal(np.datetime64('2010-03-12T17:15:08.1').dtype,
np.dtype('M8[ms]'))
assert_equal(np.datetime64('2010-03-12T17:15:08.12').dtype,
np.dtype('M8[ms]'))
assert_equal(np.datetime64('2010-03-12T17:15:08.123').dtype,
np.dtype('M8[ms]'))
assert_equal(np.datetime64('2010-03-12T17:15:08.1234').dtype,
np.dtype('M8[us]'))
assert_equal(np.datetime64('2010-03-12T17:15:08.12345').dtype,
np.dtype('M8[us]'))
assert_equal(np.datetime64('2010-03-12T17:15:08.123456').dtype,
np.dtype('M8[us]'))
assert_equal(np.datetime64('1970-01-01T00:00:02.1234567').dtype,
np.dtype('M8[ns]'))
assert_equal(np.datetime64('1970-01-01T00:00:02.12345678').dtype,
np.dtype('M8[ns]'))
assert_equal(np.datetime64('1970-01-01T00:00:02.123456789').dtype,
np.dtype('M8[ns]'))
assert_equal(np.datetime64('1970-01-01T00:00:02.1234567890').dtype,
np.dtype('M8[ps]'))
assert_equal(np.datetime64('1970-01-01T00:00:02.12345678901').dtype,
np.dtype('M8[ps]'))
assert_equal(np.datetime64('1970-01-01T00:00:02.123456789012').dtype,
np.dtype('M8[ps]'))
assert_equal(np.datetime64(
'1970-01-01T00:00:02.1234567890123').dtype,
np.dtype('M8[fs]'))
assert_equal(np.datetime64(
'1970-01-01T00:00:02.12345678901234').dtype,
np.dtype('M8[fs]'))
assert_equal(np.datetime64(
'1970-01-01T00:00:02.123456789012345').dtype,
np.dtype('M8[fs]'))
assert_equal(np.datetime64(
'1970-01-01T00:00:02.1234567890123456').dtype,
np.dtype('M8[as]'))
assert_equal(np.datetime64(
'1970-01-01T00:00:02.12345678901234567').dtype,
np.dtype('M8[as]'))
assert_equal(np.datetime64(
'1970-01-01T00:00:02.123456789012345678').dtype,
np.dtype('M8[as]'))
# Python date object
assert_equal(np.datetime64(datetime.date(2010, 4, 16)).dtype,
np.dtype('M8[D]'))
# Python datetime object
assert_equal(np.datetime64(
datetime.datetime(2010, 4, 16, 13, 45, 18)).dtype,
np.dtype('M8[us]'))
# 'today' special value
assert_equal(np.datetime64('today').dtype,
np.dtype('M8[D]'))
# 'now' special value
assert_equal(np.datetime64('now').dtype,
np.dtype('M8[s]'))
def test_datetime_nat_casting(self):
a = np.array('NaT', dtype='M8[D]')
b = np.datetime64('NaT', '[D]')
# Arrays
assert_equal(a.astype('M8[s]'), np.array('NaT', dtype='M8[s]'))
assert_equal(a.astype('M8[ms]'), np.array('NaT', dtype='M8[ms]'))
assert_equal(a.astype('M8[M]'), np.array('NaT', dtype='M8[M]'))
assert_equal(a.astype('M8[Y]'), np.array('NaT', dtype='M8[Y]'))
assert_equal(a.astype('M8[W]'), np.array('NaT', dtype='M8[W]'))
# Scalars -> Scalars
assert_equal(np.datetime64(b, '[s]'), np.datetime64('NaT', '[s]'))
assert_equal(np.datetime64(b, '[ms]'), np.datetime64('NaT', '[ms]'))
assert_equal(np.datetime64(b, '[M]'), np.datetime64('NaT', '[M]'))
assert_equal(np.datetime64(b, '[Y]'), np.datetime64('NaT', '[Y]'))
assert_equal(np.datetime64(b, '[W]'), np.datetime64('NaT', '[W]'))
# Arrays -> Scalars
assert_equal(np.datetime64(a, '[s]'), np.datetime64('NaT', '[s]'))
assert_equal(np.datetime64(a, '[ms]'), np.datetime64('NaT', '[ms]'))
assert_equal(np.datetime64(a, '[M]'), np.datetime64('NaT', '[M]'))
assert_equal(np.datetime64(a, '[Y]'), np.datetime64('NaT', '[Y]'))
assert_equal(np.datetime64(a, '[W]'), np.datetime64('NaT', '[W]'))
def test_days_creation(self):
assert_equal(np.array('1599', dtype='M8[D]').astype('i8'),
(1600-1970)*365 - (1972-1600)/4 + 3 - 365)
assert_equal(np.array('1600', dtype='M8[D]').astype('i8'),
(1600-1970)*365 - (1972-1600)/4 + 3)
assert_equal(np.array('1601', dtype='M8[D]').astype('i8'),
(1600-1970)*365 - (1972-1600)/4 + 3 + 366)
assert_equal(np.array('1900', dtype='M8[D]').astype('i8'),
(1900-1970)*365 - (1970-1900)//4)
assert_equal(np.array('1901', dtype='M8[D]').astype('i8'),
(1900-1970)*365 - (1970-1900)//4 + 365)
assert_equal(np.array('1967', dtype='M8[D]').astype('i8'), -3*365 - 1)
assert_equal(np.array('1968', dtype='M8[D]').astype('i8'), -2*365 - 1)
assert_equal(np.array('1969', dtype='M8[D]').astype('i8'), -1*365)
assert_equal(np.array('1970', dtype='M8[D]').astype('i8'), 0*365)
assert_equal(np.array('1971', dtype='M8[D]').astype('i8'), 1*365)
assert_equal(np.array('1972', dtype='M8[D]').astype('i8'), 2*365)
assert_equal(np.array('1973', dtype='M8[D]').astype('i8'), 3*365 + 1)
assert_equal(np.array('1974', dtype='M8[D]').astype('i8'), 4*365 + 1)
assert_equal(np.array('2000', dtype='M8[D]').astype('i8'),
(2000 - 1970)*365 + (2000 - 1972)//4)
assert_equal(np.array('2001', dtype='M8[D]').astype('i8'),
(2000 - 1970)*365 + (2000 - 1972)//4 + 366)
assert_equal(np.array('2400', dtype='M8[D]').astype('i8'),
(2400 - 1970)*365 + (2400 - 1972)//4 - 3)
assert_equal(np.array('2401', dtype='M8[D]').astype('i8'),
(2400 - 1970)*365 + (2400 - 1972)//4 - 3 + 366)
assert_equal(np.array('1600-02-29', dtype='M8[D]').astype('i8'),
(1600-1970)*365 - (1972-1600)//4 + 3 + 31 + 28)
assert_equal(np.array('1600-03-01', dtype='M8[D]').astype('i8'),
(1600-1970)*365 - (1972-1600)//4 + 3 + 31 + 29)
assert_equal(np.array('2000-02-29', dtype='M8[D]').astype('i8'),
(2000 - 1970)*365 + (2000 - 1972)//4 + 31 + 28)
assert_equal(np.array('2000-03-01', dtype='M8[D]').astype('i8'),
(2000 - 1970)*365 + (2000 - 1972)//4 + 31 + 29)
assert_equal(np.array('2001-03-22', dtype='M8[D]').astype('i8'),
(2000 - 1970)*365 + (2000 - 1972)//4 + 366 + 31 + 28 + 21)
def test_days_to_pydate(self):
assert_equal(np.array('1599', dtype='M8[D]').astype('O'),
datetime.date(1599, 1, 1))
assert_equal(np.array('1600', dtype='M8[D]').astype('O'),
datetime.date(1600, 1, 1))
assert_equal(np.array('1601', dtype='M8[D]').astype('O'),
datetime.date(1601, 1, 1))
assert_equal(np.array('1900', dtype='M8[D]').astype('O'),
datetime.date(1900, 1, 1))
assert_equal(np.array('1901', dtype='M8[D]').astype('O'),
datetime.date(1901, 1, 1))
assert_equal(np.array('2000', dtype='M8[D]').astype('O'),
datetime.date(2000, 1, 1))
assert_equal(np.array('2001', dtype='M8[D]').astype('O'),
datetime.date(2001, 1, 1))
assert_equal(np.array('1600-02-29', dtype='M8[D]').astype('O'),
datetime.date(1600, 2, 29))
assert_equal(np.array('1600-03-01', dtype='M8[D]').astype('O'),
datetime.date(1600, 3, 1))
assert_equal(np.array('2001-03-22', dtype='M8[D]').astype('O'),
datetime.date(2001, 3, 22))
def test_dtype_comparison(self):
assert_(not (np.dtype('M8[us]') == np.dtype('M8[ms]')))
assert_(np.dtype('M8[us]') != np.dtype('M8[ms]'))
assert_(np.dtype('M8[2D]') != np.dtype('M8[D]'))
assert_(np.dtype('M8[D]') != np.dtype('M8[2D]'))
def test_pydatetime_creation(self):
a = np.array(['1960-03-12', datetime.date(1960, 3, 12)], dtype='M8[D]')
assert_equal(a[0], a[1])
a = np.array(['1999-12-31', datetime.date(1999, 12, 31)], dtype='M8[D]')
assert_equal(a[0], a[1])
a = np.array(['2000-01-01', datetime.date(2000, 1, 1)], dtype='M8[D]')
assert_equal(a[0], a[1])
# Will fail if the date changes during the exact right moment
a = np.array(['today', datetime.date.today()], dtype='M8[D]')
assert_equal(a[0], a[1])
# datetime.datetime.now() returns local time, not UTC
#a = np.array(['now', datetime.datetime.now()], dtype='M8[s]')
#assert_equal(a[0], a[1])
# we can give a datetime.date time units
assert_equal(np.array(datetime.date(1960, 3, 12), dtype='M8[s]'),
np.array(np.datetime64('1960-03-12T00:00:00')))
def test_datetime_string_conversion(self):
a = ['2011-03-16', '1920-01-01', '2013-05-19']
str_a = np.array(a, dtype='S')
dt_a = np.array(a, dtype='M')
str_b = np.empty_like(str_a)
dt_b = np.empty_like(dt_a)
# String to datetime
assert_equal(dt_a, str_a.astype('M'))
assert_equal(dt_a.dtype, str_a.astype('M').dtype)
dt_b[...] = str_a
assert_equal(dt_a, dt_b)
# Datetime to string
assert_equal(str_a, dt_a.astype('S0'))
str_b[...] = dt_a
assert_equal(str_a, str_b)
# Convert the 'S' to 'U'
str_a = str_a.astype('U')
str_b = str_b.astype('U')
# Unicode to datetime
assert_equal(dt_a, str_a.astype('M'))
assert_equal(dt_a.dtype, str_a.astype('M').dtype)
dt_b[...] = str_a
assert_equal(dt_a, dt_b)
# Datetime to unicode
assert_equal(str_a, dt_a.astype('U'))
str_b[...] = dt_a
assert_equal(str_a, str_b)
def test_datetime_array_str(self):
a = np.array(['2011-03-16', '1920-01-01', '2013-05-19'], dtype='M')
assert_equal(str(a), "['2011-03-16' '1920-01-01' '2013-05-19']")
a = np.array(['2011-03-16T13:55', '1920-01-01T03:12'], dtype='M')
assert_equal(np.array2string(a, separator=', ',
formatter={'datetime': lambda x:
"'%s'" % np.datetime_as_string(x, timezone='UTC')}),
"['2011-03-16T13:55Z', '1920-01-01T03:12Z']")
# Check that one NaT doesn't corrupt subsequent entries
a = np.array(['2010', 'NaT', '2030']).astype('M')
assert_equal(str(a), "['2010' 'NaT' '2030']")
def test_timedelta_array_str(self):
a = np.array([-1, 0, 100], dtype='m')
assert_equal(str(a), "[ -1 0 100]")
a = np.array(['NaT', 'NaT'], dtype='m')
assert_equal(str(a), "['NaT' 'NaT']")
# Check right-alignment with NaTs
a = np.array([-1, 'NaT', 0], dtype='m')
assert_equal(str(a), "[ -1 'NaT' 0]")
a = np.array([-1, 'NaT', 1234567], dtype='m')
assert_equal(str(a), "[ -1 'NaT' 1234567]")
# Test with other byteorder:
a = np.array([-1, 'NaT', 1234567], dtype='>m')
assert_equal(str(a), "[ -1 'NaT' 1234567]")
a = np.array([-1, 'NaT', 1234567], dtype='<m')
assert_equal(str(a), "[ -1 'NaT' 1234567]")
def test_pickle(self):
# Check that pickle roundtripping works
dt = np.dtype('M8[7D]')
assert_equal(pickle.loads(pickle.dumps(dt)), dt)
dt = np.dtype('M8[W]')
assert_equal(pickle.loads(pickle.dumps(dt)), dt)
# Check that loading pickles from 1.6 works
pkl = "cnumpy\ndtype\np0\n(S'M8'\np1\nI0\nI1\ntp2\nRp3\n" + \
"(I4\nS'<'\np4\nNNNI-1\nI-1\nI0\n((dp5\n(S'D'\np6\n" + \
"I7\nI1\nI1\ntp7\ntp8\ntp9\nb."
assert_equal(pickle.loads(asbytes(pkl)), np.dtype('<M8[7D]'))
pkl = "cnumpy\ndtype\np0\n(S'M8'\np1\nI0\nI1\ntp2\nRp3\n" + \
"(I4\nS'<'\np4\nNNNI-1\nI-1\nI0\n((dp5\n(S'W'\np6\n" + \
"I1\nI1\nI1\ntp7\ntp8\ntp9\nb."
assert_equal(pickle.loads(asbytes(pkl)), np.dtype('<M8[W]'))
pkl = "cnumpy\ndtype\np0\n(S'M8'\np1\nI0\nI1\ntp2\nRp3\n" + \
"(I4\nS'>'\np4\nNNNI-1\nI-1\nI0\n((dp5\n(S'us'\np6\n" + \
"I1\nI1\nI1\ntp7\ntp8\ntp9\nb."
assert_equal(pickle.loads(asbytes(pkl)), np.dtype('>M8[us]'))
def test_setstate(self):
"Verify that datetime dtype __setstate__ can handle bad arguments"
dt = np.dtype('>M8[us]')
assert_raises(ValueError, dt.__setstate__, (4, '>', None, None, None, -1, -1, 0, 1))
assert_(dt.__reduce__()[2] == np.dtype('>M8[us]').__reduce__()[2])
assert_raises(TypeError, dt.__setstate__, (4, '>', None, None, None, -1, -1, 0, ({}, 'xxx')))
assert_(dt.__reduce__()[2] == np.dtype('>M8[us]').__reduce__()[2])
def test_dtype_promotion(self):
# datetime <op> datetime computes the metadata gcd
# timedelta <op> timedelta computes the metadata gcd
for mM in ['m', 'M']:
assert_equal(
np.promote_types(np.dtype(mM+'8[2Y]'), np.dtype(mM+'8[2Y]')),
np.dtype(mM+'8[2Y]'))
assert_equal(
np.promote_types(np.dtype(mM+'8[12Y]'), np.dtype(mM+'8[15Y]')),
np.dtype(mM+'8[3Y]'))
assert_equal(
np.promote_types(np.dtype(mM+'8[62M]'), np.dtype(mM+'8[24M]')),
np.dtype(mM+'8[2M]'))
assert_equal(
np.promote_types(np.dtype(mM+'8[1W]'), np.dtype(mM+'8[2D]')),
np.dtype(mM+'8[1D]'))
assert_equal(
np.promote_types(np.dtype(mM+'8[W]'), np.dtype(mM+'8[13s]')),
np.dtype(mM+'8[s]'))
assert_equal(
np.promote_types(np.dtype(mM+'8[13W]'), np.dtype(mM+'8[49s]')),
np.dtype(mM+'8[7s]'))
# timedelta <op> timedelta raises when there is no reasonable gcd
assert_raises(TypeError, np.promote_types,
np.dtype('m8[Y]'), np.dtype('m8[D]'))
assert_raises(TypeError, np.promote_types,
np.dtype('m8[M]'), np.dtype('m8[W]'))
# timedelta <op> timedelta may overflow with big unit ranges
assert_raises(OverflowError, np.promote_types,
np.dtype('m8[W]'), np.dtype('m8[fs]'))
assert_raises(OverflowError, np.promote_types,
np.dtype('m8[s]'), np.dtype('m8[as]'))
def test_cast_overflow(self):
# gh-4486
def cast():
numpy.datetime64("1971-01-01 00:00:00.000000000000000").astype("<M8[D]")
assert_raises(OverflowError, cast)
def cast2():
numpy.datetime64("2014").astype("<M8[fs]")
assert_raises(OverflowError, cast2)
def test_pyobject_roundtrip(self):
# All datetime types should be able to roundtrip through object
a = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0,
-1020040340, -2942398, -1, 0, 1, 234523453, 1199164176],
dtype=np.int64)
# With date units
for unit in ['M8[D]', 'M8[W]', 'M8[M]', 'M8[Y]']:
b = a.copy().view(dtype=unit)
b[0] = '-0001-01-01'
b[1] = '-0001-12-31'
b[2] = '0000-01-01'
b[3] = '0001-01-01'
b[4] = '1969-12-31'
b[5] = '1970-01-01'
b[6] = '9999-12-31'
b[7] = '10000-01-01'
b[8] = 'NaT'
assert_equal(b.astype(object).astype(unit), b,
"Error roundtripping unit %s" % unit)
# With time units
for unit in ['M8[as]', 'M8[16fs]', 'M8[ps]', 'M8[us]',
'M8[300as]', 'M8[20us]']:
b = a.copy().view(dtype=unit)
b[0] = '-0001-01-01T00'
b[1] = '-0001-12-31T00'
b[2] = '0000-01-01T00'
b[3] = '0001-01-01T00'
b[4] = '1969-12-31T23:59:59.999999'
b[5] = '1970-01-01T00'
b[6] = '9999-12-31T23:59:59.999999'
b[7] = '10000-01-01T00'
b[8] = 'NaT'
assert_equal(b.astype(object).astype(unit), b,
"Error roundtripping unit %s" % unit)
def test_month_truncation(self):
# Make sure that months are truncating correctly
assert_equal(np.array('1945-03-01', dtype='M8[M]'),
np.array('1945-03-31', dtype='M8[M]'))
assert_equal(np.array('1969-11-01', dtype='M8[M]'),
np.array('1969-11-30T23:59:59.99999', dtype='M').astype('M8[M]'))
assert_equal(np.array('1969-12-01', dtype='M8[M]'),
np.array('1969-12-31T23:59:59.99999', dtype='M').astype('M8[M]'))
assert_equal(np.array('1970-01-01', dtype='M8[M]'),
np.array('1970-01-31T23:59:59.99999', dtype='M').astype('M8[M]'))
assert_equal(np.array('1980-02-01', dtype='M8[M]'),
np.array('1980-02-29T23:59:59.99999', dtype='M').astype('M8[M]'))
def test_different_unit_comparison(self):
# Check some years with date units
for unit1 in ['Y', 'M', 'D']:
dt1 = np.dtype('M8[%s]' % unit1)
for unit2 in ['Y', 'M', 'D']:
dt2 = np.dtype('M8[%s]' % unit2)
assert_equal(np.array('1945', dtype=dt1),
np.array('1945', dtype=dt2))
assert_equal(np.array('1970', dtype=dt1),
np.array('1970', dtype=dt2))
assert_equal(np.array('9999', dtype=dt1),
np.array('9999', dtype=dt2))
assert_equal(np.array('10000', dtype=dt1),
np.array('10000-01-01', dtype=dt2))
assert_equal(np.datetime64('1945', unit1),
np.datetime64('1945', unit2))
assert_equal(np.datetime64('1970', unit1),
np.datetime64('1970', unit2))
assert_equal(np.datetime64('9999', unit1),
np.datetime64('9999', unit2))
assert_equal(np.datetime64('10000', unit1),
np.datetime64('10000-01-01', unit2))
# Check some datetimes with time units
for unit1 in ['6h', 'h', 'm', 's', '10ms', 'ms', 'us']:
dt1 = np.dtype('M8[%s]' % unit1)
for unit2 in ['h', 'm', 's', 'ms', 'us']:
dt2 = np.dtype('M8[%s]' % unit2)
assert_equal(np.array('1945-03-12T18', dtype=dt1),
np.array('1945-03-12T18', dtype=dt2))
assert_equal(np.array('1970-03-12T18', dtype=dt1),
np.array('1970-03-12T18', dtype=dt2))
assert_equal(np.array('9999-03-12T18', dtype=dt1),
np.array('9999-03-12T18', dtype=dt2))
assert_equal(np.array('10000-01-01T00', dtype=dt1),
np.array('10000-01-01T00', dtype=dt2))
assert_equal(np.datetime64('1945-03-12T18', unit1),
np.datetime64('1945-03-12T18', unit2))
assert_equal(np.datetime64('1970-03-12T18', unit1),
np.datetime64('1970-03-12T18', unit2))
assert_equal(np.datetime64('9999-03-12T18', unit1),
np.datetime64('9999-03-12T18', unit2))
assert_equal(np.datetime64('10000-01-01T00', unit1),
np.datetime64('10000-01-01T00', unit2))
# Check some days with units that won't overflow
for unit1 in ['D', '12h', 'h', 'm', 's', '4s', 'ms', 'us']:
dt1 = np.dtype('M8[%s]' % unit1)
for unit2 in ['D', 'h', 'm', 's', 'ms', 'us']:
dt2 = np.dtype('M8[%s]' % unit2)
assert_(np.equal(np.array('1932-02-17', dtype='M').astype(dt1),
np.array('1932-02-17T00:00:00', dtype='M').astype(dt2),
casting='unsafe'))
assert_(np.equal(np.array('10000-04-27', dtype='M').astype(dt1),
np.array('10000-04-27T00:00:00', dtype='M').astype(dt2),
casting='unsafe'))
# Shouldn't be able to compare datetime and timedelta
# TODO: Changing to 'same_kind' or 'safe' casting in the ufuncs by
# default is needed to properly catch this kind of thing...
a = np.array('2012-12-21', dtype='M8[D]')
b = np.array(3, dtype='m8[D]')
#assert_raises(TypeError, np.less, a, b)
assert_raises(TypeError, np.less, a, b, casting='same_kind')
def test_datetime_like(self):
a = np.array([3], dtype='m8[4D]')
b = np.array(['2012-12-21'], dtype='M8[D]')
assert_equal(np.ones_like(a).dtype, a.dtype)
assert_equal(np.zeros_like(a).dtype, a.dtype)
assert_equal(np.empty_like(a).dtype, a.dtype)
assert_equal(np.ones_like(b).dtype, b.dtype)
assert_equal(np.zeros_like(b).dtype, b.dtype)
assert_equal(np.empty_like(b).dtype, b.dtype)
def test_datetime_unary(self):
for tda, tdb, tdzero, tdone, tdmone in \
[
# One-dimensional arrays
(np.array([3], dtype='m8[D]'),
np.array([-3], dtype='m8[D]'),
np.array([0], dtype='m8[D]'),
np.array([1], dtype='m8[D]'),
np.array([-1], dtype='m8[D]')),
# NumPy scalars
(np.timedelta64(3, '[D]'),
np.timedelta64(-3, '[D]'),
np.timedelta64(0, '[D]'),
np.timedelta64(1, '[D]'),
np.timedelta64(-1, '[D]'))]:
# negative ufunc
assert_equal(-tdb, tda)
assert_equal((-tdb).dtype, tda.dtype)
assert_equal(np.negative(tdb), tda)
assert_equal(np.negative(tdb).dtype, tda.dtype)
# absolute ufunc
assert_equal(np.absolute(tdb), tda)
assert_equal(np.absolute(tdb).dtype, tda.dtype)
# sign ufunc
assert_equal(np.sign(tda), tdone)
assert_equal(np.sign(tdb), tdmone)
assert_equal(np.sign(tdzero), tdzero)
assert_equal(np.sign(tda).dtype, tda.dtype)
# The ufuncs always produce native-endian results
assert_
def test_datetime_add(self):
for dta, dtb, dtc, dtnat, tda, tdb, tdc in \
[
# One-dimensional arrays
(np.array(['2012-12-21'], dtype='M8[D]'),
np.array(['2012-12-24'], dtype='M8[D]'),
np.array(['2012-12-21T11'], dtype='M8[h]'),
np.array(['NaT'], dtype='M8[D]'),
np.array([3], dtype='m8[D]'),
np.array([11], dtype='m8[h]'),
np.array([3*24 + 11], dtype='m8[h]')),
# NumPy scalars
(np.datetime64('2012-12-21', '[D]'),
np.datetime64('2012-12-24', '[D]'),
np.datetime64('2012-12-21T11', '[h]'),
np.datetime64('NaT', '[D]'),
np.timedelta64(3, '[D]'),
np.timedelta64(11, '[h]'),
np.timedelta64(3*24 + 11, '[h]'))]:
# m8 + m8
assert_equal(tda + tdb, tdc)
assert_equal((tda + tdb).dtype, np.dtype('m8[h]'))
# m8 + bool
assert_equal(tdb + True, tdb + 1)
assert_equal((tdb + True).dtype, np.dtype('m8[h]'))
# m8 + int
assert_equal(tdb + 3*24, tdc)
assert_equal((tdb + 3*24).dtype, np.dtype('m8[h]'))
# bool + m8
assert_equal(False + tdb, tdb)
assert_equal((False + tdb).dtype, np.dtype('m8[h]'))
# int + m8
assert_equal(3*24 + tdb, tdc)
assert_equal((3*24 + tdb).dtype, np.dtype('m8[h]'))
# M8 + bool
assert_equal(dta + True, dta + 1)
assert_equal(dtnat + True, dtnat)
assert_equal((dta + True).dtype, np.dtype('M8[D]'))
# M8 + int
assert_equal(dta + 3, dtb)
assert_equal(dtnat + 3, dtnat)
assert_equal((dta + 3).dtype, np.dtype('M8[D]'))
# bool + M8
assert_equal(False + dta, dta)
assert_equal(False + dtnat, dtnat)
assert_equal((False + dta).dtype, np.dtype('M8[D]'))
# int + M8
assert_equal(3 + dta, dtb)
assert_equal(3 + dtnat, dtnat)
assert_equal((3 + dta).dtype, np.dtype('M8[D]'))
# M8 + m8
assert_equal(dta + tda, dtb)
assert_equal(dtnat + tda, dtnat)
assert_equal((dta + tda).dtype, np.dtype('M8[D]'))
# m8 + M8
assert_equal(tda + dta, dtb)
assert_equal(tda + dtnat, dtnat)
assert_equal((tda + dta).dtype, np.dtype('M8[D]'))
# In M8 + m8, the result goes to higher precision
assert_equal(np.add(dta, tdb, casting='unsafe'), dtc)
assert_equal(np.add(dta, tdb, casting='unsafe').dtype,
np.dtype('M8[h]'))
assert_equal(np.add(tdb, dta, casting='unsafe'), dtc)
assert_equal(np.add(tdb, dta, casting='unsafe').dtype,
np.dtype('M8[h]'))
# M8 + M8
assert_raises(TypeError, np.add, dta, dtb)
def test_datetime_subtract(self):
for dta, dtb, dtc, dtd, dte, dtnat, tda, tdb, tdc in \
[
# One-dimensional arrays
(np.array(['2012-12-21'], dtype='M8[D]'),
np.array(['2012-12-24'], dtype='M8[D]'),
np.array(['1940-12-24'], dtype='M8[D]'),
np.array(['1940-12-24T00'], dtype='M8[h]'),
np.array(['1940-12-23T13'], dtype='M8[h]'),
np.array(['NaT'], dtype='M8[D]'),
np.array([3], dtype='m8[D]'),
np.array([11], dtype='m8[h]'),
np.array([3*24 - 11], dtype='m8[h]')),
# NumPy scalars
(np.datetime64('2012-12-21', '[D]'),
np.datetime64('2012-12-24', '[D]'),
np.datetime64('1940-12-24', '[D]'),
np.datetime64('1940-12-24T00', '[h]'),
np.datetime64('1940-12-23T13', '[h]'),
np.datetime64('NaT', '[D]'),
np.timedelta64(3, '[D]'),
np.timedelta64(11, '[h]'),
np.timedelta64(3*24 - 11, '[h]'))]:
# m8 - m8
assert_equal(tda - tdb, tdc)
assert_equal((tda - tdb).dtype, np.dtype('m8[h]'))
assert_equal(tdb - tda, -tdc)
assert_equal((tdb - tda).dtype, np.dtype('m8[h]'))
# m8 - bool
assert_equal(tdc - True, tdc - 1)
assert_equal((tdc - True).dtype, np.dtype('m8[h]'))
# m8 - int
assert_equal(tdc - 3*24, -tdb)
assert_equal((tdc - 3*24).dtype, np.dtype('m8[h]'))
# int - m8
assert_equal(False - tdb, -tdb)
assert_equal((False - tdb).dtype, np.dtype('m8[h]'))
# int - m8
assert_equal(3*24 - tdb, tdc)
assert_equal((3*24 - tdb).dtype, np.dtype('m8[h]'))
# M8 - bool
assert_equal(dtb - True, dtb - 1)
assert_equal(dtnat - True, dtnat)
assert_equal((dtb - True).dtype, np.dtype('M8[D]'))
# M8 - int
assert_equal(dtb - 3, dta)
assert_equal(dtnat - 3, dtnat)
assert_equal((dtb - 3).dtype, np.dtype('M8[D]'))
# M8 - m8
assert_equal(dtb - tda, dta)
assert_equal(dtnat - tda, dtnat)
assert_equal((dtb - tda).dtype, np.dtype('M8[D]'))
# In M8 - m8, the result goes to higher precision
assert_equal(np.subtract(dtc, tdb, casting='unsafe'), dte)
assert_equal(np.subtract(dtc, tdb, casting='unsafe').dtype,
np.dtype('M8[h]'))
# M8 - M8 with different goes to higher precision
assert_equal(np.subtract(dtc, dtd, casting='unsafe'),
np.timedelta64(0, 'h'))
assert_equal(np.subtract(dtc, dtd, casting='unsafe').dtype,
np.dtype('m8[h]'))
assert_equal(np.subtract(dtd, dtc, casting='unsafe'),
np.timedelta64(0, 'h'))
assert_equal(np.subtract(dtd, dtc, casting='unsafe').dtype,
np.dtype('m8[h]'))
# m8 - M8
assert_raises(TypeError, np.subtract, tda, dta)
# bool - M8
assert_raises(TypeError, np.subtract, False, dta)
# int - M8
assert_raises(TypeError, np.subtract, 3, dta)
def test_datetime_multiply(self):
for dta, tda, tdb, tdc in \
[
# One-dimensional arrays
(np.array(['2012-12-21'], dtype='M8[D]'),
np.array([6], dtype='m8[h]'),
np.array([9], dtype='m8[h]'),
np.array([12], dtype='m8[h]')),
# NumPy scalars
(np.datetime64('2012-12-21', '[D]'),
np.timedelta64(6, '[h]'),
np.timedelta64(9, '[h]'),
np.timedelta64(12, '[h]'))]:
# m8 * int
assert_equal(tda * 2, tdc)
assert_equal((tda * 2).dtype, np.dtype('m8[h]'))
# int * m8
assert_equal(2 * tda, tdc)
assert_equal((2 * tda).dtype, np.dtype('m8[h]'))
# m8 * float
assert_equal(tda * 1.5, tdb)
assert_equal((tda * 1.5).dtype, np.dtype('m8[h]'))
# float * m8
assert_equal(1.5 * tda, tdb)
assert_equal((1.5 * tda).dtype, np.dtype('m8[h]'))
# m8 * m8
assert_raises(TypeError, np.multiply, tda, tdb)
# m8 * M8
assert_raises(TypeError, np.multiply, dta, tda)
# M8 * m8
assert_raises(TypeError, np.multiply, tda, dta)
# M8 * int
assert_raises(TypeError, np.multiply, dta, 2)
# int * M8
assert_raises(TypeError, np.multiply, 2, dta)
# M8 * float
assert_raises(TypeError, np.multiply, dta, 1.5)
# float * M8
assert_raises(TypeError, np.multiply, 1.5, dta)
# NaTs
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "invalid value encountered in multiply")
nat = np.timedelta64('NaT')
def check(a, b, res):
assert_equal(a * b, res)
assert_equal(b * a, res)
for tp in (int, float):
check(nat, tp(2), nat)
check(nat, tp(0), nat)
for f in (float('inf'), float('nan')):
check(np.timedelta64(1), f, nat)
check(np.timedelta64(0), f, nat)
check(nat, f, nat)
def test_datetime_divide(self):
for dta, tda, tdb, tdc, tdd in \
[
# One-dimensional arrays
(np.array(['2012-12-21'], dtype='M8[D]'),
np.array([6], dtype='m8[h]'),
np.array([9], dtype='m8[h]'),
np.array([12], dtype='m8[h]'),
np.array([6], dtype='m8[m]')),
# NumPy scalars
(np.datetime64('2012-12-21', '[D]'),
np.timedelta64(6, '[h]'),
np.timedelta64(9, '[h]'),
np.timedelta64(12, '[h]'),
np.timedelta64(6, '[m]'))]:
# m8 / int
assert_equal(tdc / 2, tda)
assert_equal((tdc / 2).dtype, np.dtype('m8[h]'))
# m8 / float
assert_equal(tda / 0.5, tdc)
assert_equal((tda / 0.5).dtype, np.dtype('m8[h]'))
# m8 / m8
assert_equal(tda / tdb, 6.0 / 9.0)
assert_equal(np.divide(tda, tdb), 6.0 / 9.0)
assert_equal(np.true_divide(tda, tdb), 6.0 / 9.0)
assert_equal(tdb / tda, 9.0 / 6.0)
assert_equal((tda / tdb).dtype, np.dtype('f8'))
assert_equal(tda / tdd, 60.0)
assert_equal(tdd / tda, 1.0 / 60.0)
# m8 // m8
assert_raises(TypeError, np.floor_divide, tda, tdb)
# int / m8
assert_raises(TypeError, np.divide, 2, tdb)
# float / m8
assert_raises(TypeError, np.divide, 0.5, tdb)
# m8 / M8
assert_raises(TypeError, np.divide, dta, tda)
# M8 / m8
assert_raises(TypeError, np.divide, tda, dta)
# M8 / int
assert_raises(TypeError, np.divide, dta, 2)
# int / M8
assert_raises(TypeError, np.divide, 2, dta)
# M8 / float
assert_raises(TypeError, np.divide, dta, 1.5)
# float / M8
assert_raises(TypeError, np.divide, 1.5, dta)
# NaTs
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, r".*encountered in true\_divide")
nat = np.timedelta64('NaT')
for tp in (int, float):
assert_equal(np.timedelta64(1) / tp(0), nat)
assert_equal(np.timedelta64(0) / tp(0), nat)
assert_equal(nat / tp(0), nat)
assert_equal(nat / tp(2), nat)
# Division by inf
assert_equal(np.timedelta64(1) / float('inf'), np.timedelta64(0))
assert_equal(np.timedelta64(0) / float('inf'), np.timedelta64(0))
assert_equal(nat / float('inf'), nat)
# Division by nan
assert_equal(np.timedelta64(1) / float('nan'), nat)
assert_equal(np.timedelta64(0) / float('nan'), nat)
assert_equal(nat / float('nan'), nat)
def test_datetime_compare(self):
# Test all the comparison operators
a = np.datetime64('2000-03-12T18:00:00.000000')
b = np.array(['2000-03-12T18:00:00.000000',
'2000-03-12T17:59:59.999999',
'2000-03-12T18:00:00.000001',
'1970-01-11T12:00:00.909090',
'2016-01-11T12:00:00.909090'],
dtype='datetime64[us]')
assert_equal(np.equal(a, b), [1, 0, 0, 0, 0])
assert_equal(np.not_equal(a, b), [0, 1, 1, 1, 1])
assert_equal(np.less(a, b), [0, 0, 1, 0, 1])
assert_equal(np.less_equal(a, b), [1, 0, 1, 0, 1])
assert_equal(np.greater(a, b), [0, 1, 0, 1, 0])
assert_equal(np.greater_equal(a, b), [1, 1, 0, 1, 0])
def test_datetime_compare_nat(self):
dt_nat = np.datetime64('NaT', 'D')
dt_other = np.datetime64('2000-01-01')
td_nat = np.timedelta64('NaT', 'h')
td_other = np.timedelta64(1, 'h')
with suppress_warnings() as sup:
# The assert warns contexts will again see the warning:
sup.filter(FutureWarning, ".*NAT")
for op in [np.equal, np.less, np.less_equal,
np.greater, np.greater_equal]:
if op(dt_nat, dt_nat):
assert_warns(FutureWarning, op, dt_nat, dt_nat)
if op(dt_nat, dt_other):
assert_warns(FutureWarning, op, dt_nat, dt_other)
if op(dt_other, dt_nat):
assert_warns(FutureWarning, op, dt_other, dt_nat)
if op(td_nat, td_nat):
assert_warns(FutureWarning, op, td_nat, td_nat)
if op(td_nat, td_other):
assert_warns(FutureWarning, op, td_nat, td_other)
if op(td_other, td_nat):
assert_warns(FutureWarning, op, td_other, td_nat)
assert_warns(FutureWarning, np.not_equal, dt_nat, dt_nat)
assert_warns(FutureWarning, np.not_equal, td_nat, td_nat)
with suppress_warnings() as sup:
sup.record(FutureWarning)
assert_(np.not_equal(dt_nat, dt_other))
assert_(np.not_equal(dt_other, dt_nat))
assert_(np.not_equal(td_nat, td_other))
assert_(np.not_equal(td_other, td_nat))
self.assertEqual(len(sup.log), 0)
def test_datetime_minmax(self):
# The metadata of the result should become the GCD
# of the operand metadata
a = np.array('1999-03-12T13', dtype='M8[2m]')
b = np.array('1999-03-12T12', dtype='M8[s]')
assert_equal(np.minimum(a, b), b)
assert_equal(np.minimum(a, b).dtype, np.dtype('M8[s]'))
assert_equal(np.fmin(a, b), b)
assert_equal(np.fmin(a, b).dtype, np.dtype('M8[s]'))
assert_equal(np.maximum(a, b), a)
assert_equal(np.maximum(a, b).dtype, np.dtype('M8[s]'))
assert_equal(np.fmax(a, b), a)
assert_equal(np.fmax(a, b).dtype, np.dtype('M8[s]'))
# Viewed as integers, the comparison is opposite because
# of the units chosen
assert_equal(np.minimum(a.view('i8'), b.view('i8')), a.view('i8'))
# Interaction with NaT
a = np.array('1999-03-12T13', dtype='M8[2m]')
dtnat = np.array('NaT', dtype='M8[h]')
assert_equal(np.minimum(a, dtnat), a)
assert_equal(np.minimum(dtnat, a), a)
assert_equal(np.maximum(a, dtnat), a)
assert_equal(np.maximum(dtnat, a), a)
# Also do timedelta
a = np.array(3, dtype='m8[h]')
b = np.array(3*3600 - 3, dtype='m8[s]')
assert_equal(np.minimum(a, b), b)
assert_equal(np.minimum(a, b).dtype, np.dtype('m8[s]'))
assert_equal(np.fmin(a, b), b)
assert_equal(np.fmin(a, b).dtype, np.dtype('m8[s]'))
assert_equal(np.maximum(a, b), a)
assert_equal(np.maximum(a, b).dtype, np.dtype('m8[s]'))
assert_equal(np.fmax(a, b), a)
assert_equal(np.fmax(a, b).dtype, np.dtype('m8[s]'))
# Viewed as integers, the comparison is opposite because
# of the units chosen
assert_equal(np.minimum(a.view('i8'), b.view('i8')), a.view('i8'))
# should raise between datetime and timedelta
#
# TODO: Allowing unsafe casting by
# default in ufuncs strikes again... :(
a = np.array(3, dtype='m8[h]')
b = np.array('1999-03-12T12', dtype='M8[s]')
#assert_raises(TypeError, np.minimum, a, b)
#assert_raises(TypeError, np.maximum, a, b)
#assert_raises(TypeError, np.fmin, a, b)
#assert_raises(TypeError, np.fmax, a, b)
assert_raises(TypeError, np.minimum, a, b, casting='same_kind')
assert_raises(TypeError, np.maximum, a, b, casting='same_kind')
assert_raises(TypeError, np.fmin, a, b, casting='same_kind')
assert_raises(TypeError, np.fmax, a, b, casting='same_kind')
def test_hours(self):
t = np.ones(3, dtype='M8[s]')
t[0] = 60*60*24 + 60*60*10
assert_(t[0].item().hour == 10)
def test_divisor_conversion_year(self):
assert_(np.dtype('M8[Y/4]') == np.dtype('M8[3M]'))
assert_(np.dtype('M8[Y/13]') == np.dtype('M8[4W]'))
assert_(np.dtype('M8[3Y/73]') == np.dtype('M8[15D]'))
def test_divisor_conversion_month(self):
assert_(np.dtype('M8[M/2]') == np.dtype('M8[2W]'))
assert_(np.dtype('M8[M/15]') == np.dtype('M8[2D]'))
assert_(np.dtype('M8[3M/40]') == np.dtype('M8[54h]'))
def test_divisor_conversion_week(self):
assert_(np.dtype('m8[W/7]') == np.dtype('m8[D]'))
assert_(np.dtype('m8[3W/14]') == np.dtype('m8[36h]'))
assert_(np.dtype('m8[5W/140]') == np.dtype('m8[360m]'))
def test_divisor_conversion_day(self):
assert_(np.dtype('M8[D/12]') == np.dtype('M8[2h]'))
assert_(np.dtype('M8[D/120]') == np.dtype('M8[12m]'))
assert_(np.dtype('M8[3D/960]') == np.dtype('M8[270s]'))
def test_divisor_conversion_hour(self):
assert_(np.dtype('m8[h/30]') == np.dtype('m8[2m]'))
assert_(np.dtype('m8[3h/300]') == np.dtype('m8[36s]'))
def test_divisor_conversion_minute(self):
assert_(np.dtype('m8[m/30]') == np.dtype('m8[2s]'))
assert_(np.dtype('m8[3m/300]') == np.dtype('m8[600ms]'))
def test_divisor_conversion_second(self):
assert_(np.dtype('m8[s/100]') == np.dtype('m8[10ms]'))
assert_(np.dtype('m8[3s/10000]') == np.dtype('m8[300us]'))
def test_divisor_conversion_fs(self):
assert_(np.dtype('M8[fs/100]') == np.dtype('M8[10as]'))
self.assertRaises(ValueError, lambda: np.dtype('M8[3fs/10000]'))
def test_divisor_conversion_as(self):
self.assertRaises(ValueError, lambda: np.dtype('M8[as/10]'))
def test_string_parser_variants(self):
# Allow space instead of 'T' between date and time
assert_equal(np.array(['1980-02-29T01:02:03'], np.dtype('M8[s]')),
np.array(['1980-02-29 01:02:03'], np.dtype('M8[s]')))
# Allow negative years
assert_equal(np.array(['-1980-02-29T01:02:03'], np.dtype('M8[s]')),
np.array(['-1980-02-29 01:02:03'], np.dtype('M8[s]')))
# UTC specifier
with assert_warns(DeprecationWarning):
assert_equal(
np.array(['-1980-02-29T01:02:03'], np.dtype('M8[s]')),
np.array(['-1980-02-29 01:02:03Z'], np.dtype('M8[s]')))
# Time zone offset
with assert_warns(DeprecationWarning):
assert_equal(
np.array(['1980-02-29T02:02:03'], np.dtype('M8[s]')),
np.array(['1980-02-29 00:32:03-0130'], np.dtype('M8[s]')))
with assert_warns(DeprecationWarning):
assert_equal(
np.array(['1980-02-28T22:32:03'], np.dtype('M8[s]')),
np.array(['1980-02-29 00:02:03+01:30'], np.dtype('M8[s]')))
with assert_warns(DeprecationWarning):
assert_equal(
np.array(['1980-02-29T02:32:03.506'], np.dtype('M8[s]')),
np.array(['1980-02-29 00:32:03.506-02'], np.dtype('M8[s]')))
with assert_warns(DeprecationWarning):
assert_equal(np.datetime64('1977-03-02T12:30-0230'),
np.datetime64('1977-03-02T15:00'))
def test_string_parser_error_check(self):
# Arbitrary bad string
assert_raises(ValueError, np.array, ['badvalue'], np.dtype('M8[us]'))
# Character after year must be '-'
assert_raises(ValueError, np.array, ['1980X'], np.dtype('M8[us]'))
# Cannot have trailing '-'
assert_raises(ValueError, np.array, ['1980-'], np.dtype('M8[us]'))
# Month must be in range [1,12]
assert_raises(ValueError, np.array, ['1980-00'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-13'], np.dtype('M8[us]'))
# Month must have two digits
assert_raises(ValueError, np.array, ['1980-1'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-1-02'], np.dtype('M8[us]'))
# 'Mor' is not a valid month
assert_raises(ValueError, np.array, ['1980-Mor'], np.dtype('M8[us]'))
# Cannot have trailing '-'
assert_raises(ValueError, np.array, ['1980-01-'], np.dtype('M8[us]'))
# Day must be in range [1,len(month)]
assert_raises(ValueError, np.array, ['1980-01-0'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-01-00'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-01-32'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1979-02-29'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-02-30'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-03-32'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-04-31'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-05-32'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-06-31'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-07-32'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-08-32'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-09-31'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-10-32'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-11-31'], np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-12-32'], np.dtype('M8[us]'))
# Cannot have trailing characters
assert_raises(ValueError, np.array, ['1980-02-03%'],
np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-02-03 q'],
np.dtype('M8[us]'))
# Hours must be in range [0, 23]
assert_raises(ValueError, np.array, ['1980-02-03 25'],
np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-02-03T25'],
np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-02-03 24:01'],
np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-02-03T24:01'],
np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-02-03 -1'],
np.dtype('M8[us]'))
# No trailing ':'
assert_raises(ValueError, np.array, ['1980-02-03 01:'],
np.dtype('M8[us]'))
# Minutes must be in range [0, 59]
assert_raises(ValueError, np.array, ['1980-02-03 01:-1'],
np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-02-03 01:60'],
np.dtype('M8[us]'))
# No trailing ':'
assert_raises(ValueError, np.array, ['1980-02-03 01:60:'],
np.dtype('M8[us]'))
# Seconds must be in range [0, 59]
assert_raises(ValueError, np.array, ['1980-02-03 01:10:-1'],
np.dtype('M8[us]'))
assert_raises(ValueError, np.array, ['1980-02-03 01:01:60'],
np.dtype('M8[us]'))
# Timezone offset must within a reasonable range
with assert_warns(DeprecationWarning):
assert_raises(ValueError, np.array, ['1980-02-03 01:01:00+0661'],
np.dtype('M8[us]'))
with assert_warns(DeprecationWarning):
assert_raises(ValueError, np.array, ['1980-02-03 01:01:00+2500'],
np.dtype('M8[us]'))
with assert_warns(DeprecationWarning):
assert_raises(ValueError, np.array, ['1980-02-03 01:01:00-0070'],
np.dtype('M8[us]'))
with assert_warns(DeprecationWarning):
assert_raises(ValueError, np.array, ['1980-02-03 01:01:00-3000'],
np.dtype('M8[us]'))
with assert_warns(DeprecationWarning):
assert_raises(ValueError, np.array, ['1980-02-03 01:01:00-25:00'],
np.dtype('M8[us]'))
def test_creation_overflow(self):
date = '1980-03-23 20:00:00'
timesteps = np.array([date], dtype='datetime64[s]')[0].astype(np.int64)
for unit in ['ms', 'us', 'ns']:
timesteps *= 1000
x = np.array([date], dtype='datetime64[%s]' % unit)
assert_equal(timesteps, x[0].astype(np.int64),
err_msg='Datetime conversion error for unit %s' % unit)
assert_equal(x[0].astype(np.int64), 322689600000000000)
def test_datetime_as_string(self):
# Check all the units with default string conversion
date = '1959-10-13'
datetime = '1959-10-13T12:34:56.789012345678901234'
assert_equal(np.datetime_as_string(np.datetime64(date, 'Y')),
'1959')
assert_equal(np.datetime_as_string(np.datetime64(date, 'M')),
'1959-10')
assert_equal(np.datetime_as_string(np.datetime64(date, 'D')),
'1959-10-13')
assert_equal(np.datetime_as_string(np.datetime64(datetime, 'h')),
'1959-10-13T12')
assert_equal(np.datetime_as_string(np.datetime64(datetime, 'm')),
'1959-10-13T12:34')
assert_equal(np.datetime_as_string(np.datetime64(datetime, 's')),
'1959-10-13T12:34:56')
assert_equal(np.datetime_as_string(np.datetime64(datetime, 'ms')),
'1959-10-13T12:34:56.789')
assert_equal(np.datetime_as_string(np.datetime64(datetime, 'us')),
'1959-10-13T12:34:56.789012')
datetime = '1969-12-31T23:34:56.789012345678901234'
assert_equal(np.datetime_as_string(np.datetime64(datetime, 'ns')),
'1969-12-31T23:34:56.789012345')
assert_equal(np.datetime_as_string(np.datetime64(datetime, 'ps')),
'1969-12-31T23:34:56.789012345678')
assert_equal(np.datetime_as_string(np.datetime64(datetime, 'fs')),
'1969-12-31T23:34:56.789012345678901')
datetime = '1969-12-31T23:59:57.789012345678901234'
assert_equal(np.datetime_as_string(np.datetime64(datetime, 'as')),
datetime)
datetime = '1970-01-01T00:34:56.789012345678901234'
assert_equal(np.datetime_as_string(np.datetime64(datetime, 'ns')),
'1970-01-01T00:34:56.789012345')
assert_equal(np.datetime_as_string(np.datetime64(datetime, 'ps')),
'1970-01-01T00:34:56.789012345678')
assert_equal(np.datetime_as_string(np.datetime64(datetime, 'fs')),
'1970-01-01T00:34:56.789012345678901')
datetime = '1970-01-01T00:00:05.789012345678901234'
assert_equal(np.datetime_as_string(np.datetime64(datetime, 'as')),
datetime)
# String conversion with the unit= parameter
a = np.datetime64('2032-07-18T12:23:34.123456', 'us')
assert_equal(np.datetime_as_string(a, unit='Y', casting='unsafe'),
'2032')
assert_equal(np.datetime_as_string(a, unit='M', casting='unsafe'),
'2032-07')
assert_equal(np.datetime_as_string(a, unit='W', casting='unsafe'),
'2032-07-18')
assert_equal(np.datetime_as_string(a, unit='D', casting='unsafe'),
'2032-07-18')
assert_equal(np.datetime_as_string(a, unit='h'), '2032-07-18T12')
assert_equal(np.datetime_as_string(a, unit='m'),
'2032-07-18T12:23')
assert_equal(np.datetime_as_string(a, unit='s'),
'2032-07-18T12:23:34')
assert_equal(np.datetime_as_string(a, unit='ms'),
'2032-07-18T12:23:34.123')
assert_equal(np.datetime_as_string(a, unit='us'),
'2032-07-18T12:23:34.123456')
assert_equal(np.datetime_as_string(a, unit='ns'),
'2032-07-18T12:23:34.123456000')
assert_equal(np.datetime_as_string(a, unit='ps'),
'2032-07-18T12:23:34.123456000000')
assert_equal(np.datetime_as_string(a, unit='fs'),
'2032-07-18T12:23:34.123456000000000')
assert_equal(np.datetime_as_string(a, unit='as'),
'2032-07-18T12:23:34.123456000000000000')
# unit='auto' parameter
assert_equal(np.datetime_as_string(
np.datetime64('2032-07-18T12:23:34.123456', 'us'), unit='auto'),
'2032-07-18T12:23:34.123456')
assert_equal(np.datetime_as_string(
np.datetime64('2032-07-18T12:23:34.12', 'us'), unit='auto'),
'2032-07-18T12:23:34.120')
assert_equal(np.datetime_as_string(
np.datetime64('2032-07-18T12:23:34', 'us'), unit='auto'),
'2032-07-18T12:23:34')
assert_equal(np.datetime_as_string(
np.datetime64('2032-07-18T12:23:00', 'us'), unit='auto'),
'2032-07-18T12:23')
# 'auto' doesn't split up hour and minute
assert_equal(np.datetime_as_string(
np.datetime64('2032-07-18T12:00:00', 'us'), unit='auto'),
'2032-07-18T12:00')
assert_equal(np.datetime_as_string(
np.datetime64('2032-07-18T00:00:00', 'us'), unit='auto'),
'2032-07-18')
# 'auto' doesn't split up the date
assert_equal(np.datetime_as_string(
np.datetime64('2032-07-01T00:00:00', 'us'), unit='auto'),
'2032-07-01')
assert_equal(np.datetime_as_string(
np.datetime64('2032-01-01T00:00:00', 'us'), unit='auto'),
'2032-01-01')
@dec.skipif(not _has_pytz, "The pytz module is not available.")
def test_datetime_as_string_timezone(self):
# timezone='local' vs 'UTC'
a = np.datetime64('2010-03-15T06:30', 'm')
assert_equal(np.datetime_as_string(a),
'2010-03-15T06:30')
assert_equal(np.datetime_as_string(a, timezone='naive'),
'2010-03-15T06:30')
assert_equal(np.datetime_as_string(a, timezone='UTC'),
'2010-03-15T06:30Z')
assert_(np.datetime_as_string(a, timezone='local') !=
'2010-03-15T06:30')
b = np.datetime64('2010-02-15T06:30', 'm')
assert_equal(np.datetime_as_string(a, timezone=tz('US/Central')),
'2010-03-15T01:30-0500')
assert_equal(np.datetime_as_string(a, timezone=tz('US/Eastern')),
'2010-03-15T02:30-0400')
assert_equal(np.datetime_as_string(a, timezone=tz('US/Pacific')),
'2010-03-14T23:30-0700')
assert_equal(np.datetime_as_string(b, timezone=tz('US/Central')),
'2010-02-15T00:30-0600')
assert_equal(np.datetime_as_string(b, timezone=tz('US/Eastern')),
'2010-02-15T01:30-0500')
assert_equal(np.datetime_as_string(b, timezone=tz('US/Pacific')),
'2010-02-14T22:30-0800')
# Dates to strings with a timezone attached is disabled by default
assert_raises(TypeError, np.datetime_as_string, a, unit='D',
timezone=tz('US/Pacific'))
# Check that we can print out the date in the specified time zone
assert_equal(np.datetime_as_string(a, unit='D',
timezone=tz('US/Pacific'), casting='unsafe'),
'2010-03-14')
assert_equal(np.datetime_as_string(b, unit='D',
timezone=tz('US/Central'), casting='unsafe'),
'2010-02-15')
def test_datetime_arange(self):
# With two datetimes provided as strings
a = np.arange('2010-01-05', '2010-01-10', dtype='M8[D]')
assert_equal(a.dtype, np.dtype('M8[D]'))
assert_equal(a,
np.array(['2010-01-05', '2010-01-06', '2010-01-07',
'2010-01-08', '2010-01-09'], dtype='M8[D]'))
a = np.arange('1950-02-10', '1950-02-06', -1, dtype='M8[D]')
assert_equal(a.dtype, np.dtype('M8[D]'))
assert_equal(a,
np.array(['1950-02-10', '1950-02-09', '1950-02-08',
'1950-02-07'], dtype='M8[D]'))
# Unit should be detected as months here
a = np.arange('1969-05', '1970-05', 2, dtype='M8')
assert_equal(a.dtype, np.dtype('M8[M]'))
assert_equal(a,
np.datetime64('1969-05') + np.arange(12, step=2))
# datetime, integer|timedelta works as well
# produces arange (start, start + stop) in this case
a = np.arange('1969', 18, 3, dtype='M8')
assert_equal(a.dtype, np.dtype('M8[Y]'))
assert_equal(a,
np.datetime64('1969') + np.arange(18, step=3))
a = np.arange('1969-12-19', 22, np.timedelta64(2), dtype='M8')
assert_equal(a.dtype, np.dtype('M8[D]'))
assert_equal(a,
np.datetime64('1969-12-19') + np.arange(22, step=2))
# Step of 0 is disallowed
assert_raises(ValueError, np.arange, np.datetime64('today'),
np.datetime64('today') + 3, 0)
# Promotion across nonlinear unit boundaries is disallowed
assert_raises(TypeError, np.arange, np.datetime64('2011-03-01', 'D'),
np.timedelta64(5, 'M'))
assert_raises(TypeError, np.arange,
np.datetime64('2012-02-03T14', 's'),
np.timedelta64(5, 'Y'))
def test_datetime_arange_no_dtype(self):
d = np.array('2010-01-04', dtype="M8[D]")
assert_equal(np.arange(d, d + 1), d)
assert_raises(ValueError, np.arange, d)
def test_timedelta_arange(self):
a = np.arange(3, 10, dtype='m8')
assert_equal(a.dtype, np.dtype('m8'))
assert_equal(a, np.timedelta64(0) + np.arange(3, 10))
a = np.arange(np.timedelta64(3, 's'), 10, 2, dtype='m8')
assert_equal(a.dtype, np.dtype('m8[s]'))
assert_equal(a, np.timedelta64(0, 's') + np.arange(3, 10, 2))
# Step of 0 is disallowed
assert_raises(ValueError, np.arange, np.timedelta64(0),
np.timedelta64(5), 0)
# Promotion across nonlinear unit boundaries is disallowed
assert_raises(TypeError, np.arange, np.timedelta64(0, 'D'),
np.timedelta64(5, 'M'))
assert_raises(TypeError, np.arange, np.timedelta64(0, 'Y'),
np.timedelta64(5, 'D'))
def test_timedelta_arange_no_dtype(self):
d = np.array(5, dtype="m8[D]")
assert_equal(np.arange(d, d + 1), d)
assert_raises(ValueError, np.arange, d)
def test_datetime_maximum_reduce(self):
a = np.array(['2010-01-02', '1999-03-14', '1833-03'], dtype='M8[D]')
assert_equal(np.maximum.reduce(a).dtype, np.dtype('M8[D]'))
assert_equal(np.maximum.reduce(a),
np.datetime64('2010-01-02'))
a = np.array([1, 4, 0, 7, 2], dtype='m8[s]')
assert_equal(np.maximum.reduce(a).dtype, np.dtype('m8[s]'))
assert_equal(np.maximum.reduce(a),
np.timedelta64(7, 's'))
def test_datetime_busday_offset(self):
# First Monday in June
assert_equal(
np.busday_offset('2011-06', 0, roll='forward', weekmask='Mon'),
np.datetime64('2011-06-06'))
# Last Monday in June
assert_equal(
np.busday_offset('2011-07', -1, roll='forward', weekmask='Mon'),
np.datetime64('2011-06-27'))
assert_equal(
np.busday_offset('2011-07', -1, roll='forward', weekmask='Mon'),
np.datetime64('2011-06-27'))
# Default M-F business days, different roll modes
assert_equal(np.busday_offset('2010-08', 0, roll='backward'),
np.datetime64('2010-07-30'))
assert_equal(np.busday_offset('2010-08', 0, roll='preceding'),
np.datetime64('2010-07-30'))
assert_equal(np.busday_offset('2010-08', 0, roll='modifiedpreceding'),
np.datetime64('2010-08-02'))
assert_equal(np.busday_offset('2010-08', 0, roll='modifiedfollowing'),
np.datetime64('2010-08-02'))
assert_equal(np.busday_offset('2010-08', 0, roll='forward'),
np.datetime64('2010-08-02'))
assert_equal(np.busday_offset('2010-08', 0, roll='following'),
np.datetime64('2010-08-02'))
assert_equal(np.busday_offset('2010-10-30', 0, roll='following'),
np.datetime64('2010-11-01'))
assert_equal(
np.busday_offset('2010-10-30', 0, roll='modifiedfollowing'),
np.datetime64('2010-10-29'))
assert_equal(
np.busday_offset('2010-10-30', 0, roll='modifiedpreceding'),
np.datetime64('2010-10-29'))
assert_equal(
np.busday_offset('2010-10-16', 0, roll='modifiedfollowing'),
np.datetime64('2010-10-18'))
assert_equal(
np.busday_offset('2010-10-16', 0, roll='modifiedpreceding'),
np.datetime64('2010-10-15'))
# roll='raise' by default
assert_raises(ValueError, np.busday_offset, '2011-06-04', 0)
# Bigger offset values
assert_equal(np.busday_offset('2006-02-01', 25),
np.datetime64('2006-03-08'))
assert_equal(np.busday_offset('2006-03-08', -25),
np.datetime64('2006-02-01'))
assert_equal(np.busday_offset('2007-02-25', 11, weekmask='SatSun'),
np.datetime64('2007-04-07'))
assert_equal(np.busday_offset('2007-04-07', -11, weekmask='SatSun'),
np.datetime64('2007-02-25'))
# NaT values when roll is not raise
assert_equal(np.busday_offset(np.datetime64('NaT'), 1, roll='nat'),
np.datetime64('NaT'))
assert_equal(np.busday_offset(np.datetime64('NaT'), 1, roll='following'),
np.datetime64('NaT'))
assert_equal(np.busday_offset(np.datetime64('NaT'), 1, roll='preceding'),
np.datetime64('NaT'))
def test_datetime_busdaycalendar(self):
# Check that it removes NaT, duplicates, and weekends
# and sorts the result.
bdd = np.busdaycalendar(
holidays=['NaT', '2011-01-17', '2011-03-06', 'NaT',
'2011-12-26', '2011-05-30', '2011-01-17'])
assert_equal(bdd.holidays,
np.array(['2011-01-17', '2011-05-30', '2011-12-26'], dtype='M8'))
# Default M-F weekmask
assert_equal(bdd.weekmask, np.array([1, 1, 1, 1, 1, 0, 0], dtype='?'))
# Check string weekmask with varying whitespace.
bdd = np.busdaycalendar(weekmask="Sun TueWed Thu\tFri")
assert_equal(bdd.weekmask, np.array([0, 1, 1, 1, 1, 0, 1], dtype='?'))
# Check length 7 0/1 string
bdd = np.busdaycalendar(weekmask="0011001")
assert_equal(bdd.weekmask, np.array([0, 0, 1, 1, 0, 0, 1], dtype='?'))
# Check length 7 string weekmask.
bdd = np.busdaycalendar(weekmask="Mon Tue")
assert_equal(bdd.weekmask, np.array([1, 1, 0, 0, 0, 0, 0], dtype='?'))
# All-zeros weekmask should raise
assert_raises(ValueError, np.busdaycalendar, weekmask=[0, 0, 0, 0, 0, 0, 0])
# weekday names must be correct case
assert_raises(ValueError, np.busdaycalendar, weekmask="satsun")
# All-zeros weekmask should raise
assert_raises(ValueError, np.busdaycalendar, weekmask="")
# Invalid weekday name codes should raise
assert_raises(ValueError, np.busdaycalendar, weekmask="Mon Tue We")
assert_raises(ValueError, np.busdaycalendar, weekmask="Max")
assert_raises(ValueError, np.busdaycalendar, weekmask="Monday Tue")
def test_datetime_busday_holidays_offset(self):
# With exactly one holiday
assert_equal(
np.busday_offset('2011-11-10', 1, holidays=['2011-11-11']),
np.datetime64('2011-11-14'))
assert_equal(
np.busday_offset('2011-11-04', 5, holidays=['2011-11-11']),
np.datetime64('2011-11-14'))
assert_equal(
np.busday_offset('2011-11-10', 5, holidays=['2011-11-11']),
np.datetime64('2011-11-18'))
assert_equal(
np.busday_offset('2011-11-14', -1, holidays=['2011-11-11']),
np.datetime64('2011-11-10'))
assert_equal(
np.busday_offset('2011-11-18', -5, holidays=['2011-11-11']),
np.datetime64('2011-11-10'))
assert_equal(
np.busday_offset('2011-11-14', -5, holidays=['2011-11-11']),
np.datetime64('2011-11-04'))
# With the holiday appearing twice
assert_equal(
np.busday_offset('2011-11-10', 1,
holidays=['2011-11-11', '2011-11-11']),
np.datetime64('2011-11-14'))
assert_equal(
np.busday_offset('2011-11-14', -1,
holidays=['2011-11-11', '2011-11-11']),
np.datetime64('2011-11-10'))
# With a NaT holiday
assert_equal(
np.busday_offset('2011-11-10', 1,
holidays=['2011-11-11', 'NaT']),
np.datetime64('2011-11-14'))
assert_equal(
np.busday_offset('2011-11-14', -1,
holidays=['NaT', '2011-11-11']),
np.datetime64('2011-11-10'))
# With another holiday after
assert_equal(
np.busday_offset('2011-11-10', 1,
holidays=['2011-11-11', '2011-11-24']),
np.datetime64('2011-11-14'))
assert_equal(
np.busday_offset('2011-11-14', -1,
holidays=['2011-11-11', '2011-11-24']),
np.datetime64('2011-11-10'))
# With another holiday before
assert_equal(
np.busday_offset('2011-11-10', 1,
holidays=['2011-10-10', '2011-11-11']),
np.datetime64('2011-11-14'))
assert_equal(
np.busday_offset('2011-11-14', -1,
holidays=['2011-10-10', '2011-11-11']),
np.datetime64('2011-11-10'))
# With another holiday before and after
assert_equal(
np.busday_offset('2011-11-10', 1,
holidays=['2011-10-10', '2011-11-11', '2011-11-24']),
np.datetime64('2011-11-14'))
assert_equal(
np.busday_offset('2011-11-14', -1,
holidays=['2011-10-10', '2011-11-11', '2011-11-24']),
np.datetime64('2011-11-10'))
# A bigger forward jump across more than one week/holiday
holidays = ['2011-10-10', '2011-11-11', '2011-11-24',
'2011-12-25', '2011-05-30', '2011-02-21',
'2011-12-26', '2012-01-02']
bdd = np.busdaycalendar(weekmask='1111100', holidays=holidays)
assert_equal(
np.busday_offset('2011-10-03', 4, holidays=holidays),
np.busday_offset('2011-10-03', 4))
assert_equal(
np.busday_offset('2011-10-03', 5, holidays=holidays),
np.busday_offset('2011-10-03', 5 + 1))
assert_equal(
np.busday_offset('2011-10-03', 27, holidays=holidays),
np.busday_offset('2011-10-03', 27 + 1))
assert_equal(
np.busday_offset('2011-10-03', 28, holidays=holidays),
np.busday_offset('2011-10-03', 28 + 2))
assert_equal(
np.busday_offset('2011-10-03', 35, holidays=holidays),
np.busday_offset('2011-10-03', 35 + 2))
assert_equal(
np.busday_offset('2011-10-03', 36, holidays=holidays),
np.busday_offset('2011-10-03', 36 + 3))
assert_equal(
np.busday_offset('2011-10-03', 56, holidays=holidays),
np.busday_offset('2011-10-03', 56 + 3))
assert_equal(
np.busday_offset('2011-10-03', 57, holidays=holidays),
np.busday_offset('2011-10-03', 57 + 4))
assert_equal(
np.busday_offset('2011-10-03', 60, holidays=holidays),
np.busday_offset('2011-10-03', 60 + 4))
assert_equal(
np.busday_offset('2011-10-03', 61, holidays=holidays),
np.busday_offset('2011-10-03', 61 + 5))
assert_equal(
np.busday_offset('2011-10-03', 61, busdaycal=bdd),
np.busday_offset('2011-10-03', 61 + 5))
# A bigger backward jump across more than one week/holiday
assert_equal(
np.busday_offset('2012-01-03', -1, holidays=holidays),
np.busday_offset('2012-01-03', -1 - 1))
assert_equal(
np.busday_offset('2012-01-03', -4, holidays=holidays),
np.busday_offset('2012-01-03', -4 - 1))
assert_equal(
np.busday_offset('2012-01-03', -5, holidays=holidays),
np.busday_offset('2012-01-03', -5 - 2))
assert_equal(
np.busday_offset('2012-01-03', -25, holidays=holidays),
np.busday_offset('2012-01-03', -25 - 2))
assert_equal(
np.busday_offset('2012-01-03', -26, holidays=holidays),
np.busday_offset('2012-01-03', -26 - 3))
assert_equal(
np.busday_offset('2012-01-03', -33, holidays=holidays),
np.busday_offset('2012-01-03', -33 - 3))
assert_equal(
np.busday_offset('2012-01-03', -34, holidays=holidays),
np.busday_offset('2012-01-03', -34 - 4))
assert_equal(
np.busday_offset('2012-01-03', -56, holidays=holidays),
np.busday_offset('2012-01-03', -56 - 4))
assert_equal(
np.busday_offset('2012-01-03', -57, holidays=holidays),
np.busday_offset('2012-01-03', -57 - 5))
assert_equal(
np.busday_offset('2012-01-03', -57, busdaycal=bdd),
np.busday_offset('2012-01-03', -57 - 5))
# Can't supply both a weekmask/holidays and busdaycal
assert_raises(ValueError, np.busday_offset, '2012-01-03', -15,
weekmask='1111100', busdaycal=bdd)
assert_raises(ValueError, np.busday_offset, '2012-01-03', -15,
holidays=holidays, busdaycal=bdd)
# Roll with the holidays
assert_equal(
np.busday_offset('2011-12-25', 0,
roll='forward', holidays=holidays),
np.datetime64('2011-12-27'))
assert_equal(
np.busday_offset('2011-12-26', 0,
roll='forward', holidays=holidays),
np.datetime64('2011-12-27'))
assert_equal(
np.busday_offset('2011-12-26', 0,
roll='backward', holidays=holidays),
np.datetime64('2011-12-23'))
assert_equal(
np.busday_offset('2012-02-27', 0,
roll='modifiedfollowing',
holidays=['2012-02-27', '2012-02-26', '2012-02-28',
'2012-03-01', '2012-02-29']),
np.datetime64('2012-02-24'))
assert_equal(
np.busday_offset('2012-03-06', 0,
roll='modifiedpreceding',
holidays=['2012-03-02', '2012-03-03', '2012-03-01',
'2012-03-05', '2012-03-07', '2012-03-06']),
np.datetime64('2012-03-08'))
def test_datetime_busday_holidays_count(self):
holidays = ['2011-01-01', '2011-10-10', '2011-11-11', '2011-11-24',
'2011-12-25', '2011-05-30', '2011-02-21', '2011-01-17',
'2011-12-26', '2012-01-02', '2011-02-21', '2011-05-30',
'2011-07-01', '2011-07-04', '2011-09-05', '2011-10-10']
bdd = np.busdaycalendar(weekmask='1111100', holidays=holidays)
# Validate against busday_offset broadcast against
# a range of offsets
dates = np.busday_offset('2011-01-01', np.arange(366),
roll='forward', busdaycal=bdd)
assert_equal(np.busday_count('2011-01-01', dates, busdaycal=bdd),
np.arange(366))
# Returns negative value when reversed
assert_equal(np.busday_count(dates, '2011-01-01', busdaycal=bdd),
-np.arange(366))
dates = np.busday_offset('2011-12-31', -np.arange(366),
roll='forward', busdaycal=bdd)
assert_equal(np.busday_count(dates, '2011-12-31', busdaycal=bdd),
np.arange(366))
# Returns negative value when reversed
assert_equal(np.busday_count('2011-12-31', dates, busdaycal=bdd),
-np.arange(366))
# Can't supply both a weekmask/holidays and busdaycal
assert_raises(ValueError, np.busday_offset, '2012-01-03', '2012-02-03',
weekmask='1111100', busdaycal=bdd)
assert_raises(ValueError, np.busday_offset, '2012-01-03', '2012-02-03',
holidays=holidays, busdaycal=bdd)
# Number of Mondays in March 2011
assert_equal(np.busday_count('2011-03', '2011-04', weekmask='Mon'), 4)
# Returns negative value when reversed
assert_equal(np.busday_count('2011-04', '2011-03', weekmask='Mon'), -4)
def test_datetime_is_busday(self):
holidays = ['2011-01-01', '2011-10-10', '2011-11-11', '2011-11-24',
'2011-12-25', '2011-05-30', '2011-02-21', '2011-01-17',
'2011-12-26', '2012-01-02', '2011-02-21', '2011-05-30',
'2011-07-01', '2011-07-04', '2011-09-05', '2011-10-10',
'NaT']
bdd = np.busdaycalendar(weekmask='1111100', holidays=holidays)
# Weekend/weekday tests
assert_equal(np.is_busday('2011-01-01'), False)
assert_equal(np.is_busday('2011-01-02'), False)
assert_equal(np.is_busday('2011-01-03'), True)
# All the holidays are not business days
assert_equal(np.is_busday(holidays, busdaycal=bdd),
np.zeros(len(holidays), dtype='?'))
def test_datetime_y2038(self):
# Test parsing on either side of the Y2038 boundary
a = np.datetime64('2038-01-19T03:14:07')
assert_equal(a.view(np.int64), 2**31 - 1)
a = np.datetime64('2038-01-19T03:14:08')
assert_equal(a.view(np.int64), 2**31)
# Test parsing on either side of the Y2038 boundary with
# a manually specified timezone offset
with assert_warns(DeprecationWarning):
a = np.datetime64('2038-01-19T04:14:07+0100')
assert_equal(a.view(np.int64), 2**31 - 1)
with assert_warns(DeprecationWarning):
a = np.datetime64('2038-01-19T04:14:08+0100')
assert_equal(a.view(np.int64), 2**31)
# Test parsing a date after Y2038
a = np.datetime64('2038-01-20T13:21:14')
assert_equal(str(a), '2038-01-20T13:21:14')
class TestDateTimeData(TestCase):
def test_basic(self):
a = np.array(['1980-03-23'], dtype=np.datetime64)
assert_equal(np.datetime_data(a.dtype), ('D', 1))
if __name__ == "__main__":
run_module_suite()
| mit |
damirda/ansible-modules-core | network/dellos10/dellos10_facts.py | 11 | 13932 | #!/usr/bin/python
#
# (c) 2015 Peter Sprygada, <psprygada@ansible.com>
#
# Copyright (c) 2016 Dell Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = """
---
module: dellos10_facts
version_added: "2.2"
author: "Senthil Kumar Ganesan (@skg-net)"
short_description: Collect facts from remote devices running Dell OS10
description:
- Collects a base set of device facts from a remote device that
is running Dell OS10. This module prepends all of the
base network fact keys with C(ansible_net_<fact>). The facts
module will always collect a base set of facts from the device
and can enable or disable collection of additional facts.
extends_documentation_fragment: dellos10
options:
gather_subset:
description:
- When supplied, this argument will restrict the facts collected
to a given subset. Possible values for this argument inlcude
all, hardware, config, and interfaces. Can specify a list of
values to include a larger subset. Values can also be used
with an initial M(!) to specify that a specific subset should
not be collected.
required: false
default: '!config'
"""
EXAMPLES = """
# Collect all facts from the device
- dellos10_facts:
gather_subset: all
# Collect only the config and default facts
- dellos10_facts:
gather_subset:
- config
# Do not collect hardware facts
- dellos10_facts:
gather_subset:
- "!hardware"
"""
RETURN = """
ansible_net_gather_subset:
description: The list of fact subsets collected from the device
returned: always
type: list
# default
ansible_net_name:
description: The name of the OS which is running
returned: always
type: str
ansible_net_version:
description: The operating system version running on the remote device
returned: always
type: str
ansible_net_servicetag:
description: The service tag number of the remote device
returned: always
type: str
ansible_net_model:
description: The model name returned from the device
returned: always
type: str
ansible_net_hostname:
description: The configured hostname of the device
returned: always
type: str
# hardware
ansible_net_cpu_arch:
description: Cpu Architecture of the remote device
returned: when hardware is configured
type: str
ansible_net_memfree_mb:
description: The available free memory on the remote device in Mb
returned: when hardware is configured
type: int
ansible_net_memtotal_mb:
description: The total memory on the remote device in Mb
returned: when hardware is configured
type: int
# config
ansible_net_config:
description: The current active config from the device
returned: when config is configured
type: str
# interfaces
ansible_net_all_ipv4_addresses:
description: All IPv4 addresses configured on the device
returned: when interfaces is configured
type: list
ansible_net_all_ipv6_addresses:
description: All IPv6 addresses configured on the device
returned: when interfaces is configured
type: list
ansible_net_interfaces:
description: A hash of all interfaces running on the system
returned: when interfaces is configured
type: dict
ansible_net_neighbors:
description: The list of LLDP neighbors from the remote device
returned: when interfaces is configured
type: dict
"""
import re
from ansible.module_utils.basic import get_exception
from ansible.module_utils.netcli import CommandRunner
from ansible.module_utils.network import NetworkModule
import ansible.module_utils.dellos10
try:
from lxml import etree as ET
except ImportError:
import xml.etree.ElementTree as ET
class FactsBase(object):
def __init__(self, runner):
self.runner = runner
self.facts = dict()
self.commands()
class Default(FactsBase):
def commands(self):
self.runner.add_command('show version | display-xml')
self.runner.add_command('show system | display-xml')
self.runner.add_command('show running-configuration | grep hostname')
def populate(self):
data = self.runner.get_command('show version | display-xml')
xml_data = ET.fromstring(data)
self.facts['name'] = self.parse_name(xml_data)
self.facts['version'] = self.parse_version(xml_data)
data = self.runner.get_command('show system | display-xml')
xml_data = ET.fromstring(data)
self.facts['servicetag'] = self.parse_serialnum(xml_data)
self.facts['model'] = self.parse_model(xml_data)
data = self.runner.get_command('show running-configuration | grep hostname')
self.facts['hostname'] = self.parse_hostname(data)
def parse_name(self, data):
sw_name = data.find('./data/system-sw-state/sw-version/sw-name')
if sw_name is not None:
return sw_name.text
else:
return ""
def parse_version(self, data):
sw_ver = data.find('./data/system-sw-state/sw-version/sw-version')
if sw_ver is not None:
return sw_ver.text
else:
return ""
def parse_hostname(self, data):
match = re.search(r'hostname\s+(\S+)', data, re.M)
if match:
return match.group(1)
def parse_model(self, data):
prod_name = data.find('./data/system/node/mfg-info/product-name')
if prod_name is not None:
return prod_name.text
else:
return ""
def parse_serialnum(self, data):
svc_tag = data.find('./data/system/node/unit/mfg-info/service-tag')
if svc_tag is not None:
return svc_tag.text
else:
return ""
class Hardware(FactsBase):
def commands(self):
self.runner.add_command('show processes memory | grep Total')
def populate(self):
data = self.runner.get_command('show version | display-xml')
xml_data = ET.fromstring(data)
self.facts['cpu_arch'] = self.parse_cpu_arch(xml_data)
data = self.runner.get_command('show processes memory | grep Total')
match = self.parse_memory(data)
if match:
self.facts['memtotal_mb'] = int(match[0]) / 1024
self.facts['memfree_mb'] = int(match[2]) / 1024
def parse_cpu_arch(self, data):
cpu_arch = data.find('./data/system-sw-state/sw-version/cpu-arch')
if cpu_arch is not None:
return cpu_arch.text
else:
return ""
def parse_memory(self, data):
return re.findall(r'\:\s*(\d+)', data, re.M)
class Config(FactsBase):
def commands(self):
self.runner.add_command('show running-config')
def populate(self):
config = self.runner.get_command('show running-config')
self.facts['config'] = config
class Interfaces(FactsBase):
def commands(self):
self.runner.add_command('show interface | display-xml')
def populate(self):
self.facts['all_ipv4_addresses'] = list()
self.facts['all_ipv6_addresses'] = list()
data = self.runner.get_command('show interface | display-xml')
xml_data = ET.fromstring(data)
self.facts['interfaces'] = self.populate_interfaces(xml_data)
self.facts['neighbors'] = self.populate_neighbors(xml_data)
def populate_interfaces(self, interfaces):
int_facts = dict()
for interface in interfaces.findall('./data/interfaces/interface'):
intf = dict()
name = self.parse_item(interface, 'name')
intf['description'] = self.parse_item(interface, 'description')
intf['duplex'] = self.parse_item(interface, 'duplex')
intf['primary_ipv4'] = self.parse_primary_ipv4(interface)
intf['secondary_ipv4'] = self.parse_secondary_ipv4(interface)
intf['ipv6'] = self.parse_ipv6_address(interface)
intf['mtu'] = self.parse_item(interface, 'mtu')
intf['type'] = self.parse_item(interface, 'type')
int_facts[name] = intf
for interface in interfaces.findall('./data/interfaces-state/interface'):
name = self.parse_item(interface, 'name')
intf = int_facts[name]
intf['bandwidth'] = self.parse_item(interface, 'speed')
intf['adminstatus'] = self.parse_item(interface, 'admin-status')
intf['operstatus'] = self.parse_item(interface, 'oper-status')
intf['macaddress'] = self.parse_item(interface, 'phys-address')
for interface in interfaces.findall('./data/ports/ports-state/port'):
name = self.parse_item(interface, 'name')
fanout = self.parse_item(interface, 'fanout-state')
mediatype = self.parse_item(interface, 'media-type')
typ, sname = name.split('-eth')
if fanout == "BREAKOUT_1x1":
name = "ethernet" + sname
intf = int_facts[name]
intf['mediatype'] = mediatype
else:
#TODO: Loop for the exact subport
for subport in xrange(1, 5):
name = "ethernet" + sname + ":" + str(subport)
intf = int_facts[name]
intf['mediatype'] = mediatype
return int_facts
def add_ip_address(self, address, family):
if family == 'ipv4':
self.facts['all_ipv4_addresses'].append(address)
else:
self.facts['all_ipv6_addresses'].append(address)
def parse_item(self, interface, item):
elem = interface.find(item)
if elem is not None:
return elem.text
else:
return ""
def parse_primary_ipv4(self, interface):
ipv4 = interface.find('ipv4')
ip_address = ""
if ipv4 is not None:
prim_ipaddr = ipv4.find('./address/primary-addr')
if prim_ipaddr is not None:
ip_address = prim_ipaddr.text
self.add_ip_address(ip_address, 'ipv4')
return ip_address
def parse_secondary_ipv4(self, interface):
ipv4 = interface.find('ipv4')
ip_address = ""
if ipv4 is not None:
sec_ipaddr = ipv4.find('./address/secondary-addr')
if sec_ipaddr is not None:
ip_address = sec_ipaddr.text
self.add_ip_address(ip_address, 'ipv4')
return ip_address
def parse_ipv6_address(self, interface):
ipv6 = interface.find('ipv6')
ip_address = ""
if ipv6 is not None:
ipv6_addr = ipv6.find('./address/ipv6-address')
if ipv6_addr is not None:
ip_address = ipv6_addr.text
self.add_ip_address(ip_address, 'ipv6')
return ip_address
def populate_neighbors(self, interfaces):
lldp_facts = dict()
for interface in interfaces.findall('./data/interfaces-state/interface'):
name = interface.find('name').text
rem_sys_name = interface.find('./lldp-rem-neighbor-info/info/rem-system-name')
if rem_sys_name is not None:
lldp_facts[name] = list()
fact = dict()
fact['host'] = rem_sys_name.text
rem_sys_port = interface.find('./lldp-rem-neighbor-info/info/rem-lldp-port-id')
fact['port'] = rem_sys_port.text
lldp_facts[name].append(fact)
return lldp_facts
FACT_SUBSETS = dict(
default=Default,
hardware=Hardware,
interfaces=Interfaces,
config=Config,
)
VALID_SUBSETS = frozenset(FACT_SUBSETS.keys())
def main():
spec = dict(
gather_subset=dict(default=['!config'], type='list')
)
module = NetworkModule(argument_spec=spec, supports_check_mode=True)
gather_subset = module.params['gather_subset']
runable_subsets = set()
exclude_subsets = set()
for subset in gather_subset:
if subset == 'all':
runable_subsets.update(VALID_SUBSETS)
continue
if subset.startswith('!'):
subset = subset[1:]
if subset == 'all':
exclude_subsets.update(VALID_SUBSETS)
continue
exclude = True
else:
exclude = False
if subset not in VALID_SUBSETS:
module.fail_json(msg='Bad subset')
if exclude:
exclude_subsets.add(subset)
else:
runable_subsets.add(subset)
if not runable_subsets:
runable_subsets.update(VALID_SUBSETS)
runable_subsets.difference_update(exclude_subsets)
runable_subsets.add('default')
facts = dict()
facts['gather_subset'] = list(runable_subsets)
runner = CommandRunner(module)
instances = list()
for key in runable_subsets:
runs = FACT_SUBSETS[key](runner)
instances.append(runs)
runner.run()
try:
for inst in instances:
inst.populate()
facts.update(inst.facts)
except Exception:
module.exit_json(out=module.from_json(runner.items))
ansible_facts = dict()
for key, value in facts.iteritems():
key = 'ansible_net_%s' % key
ansible_facts[key] = value
module.exit_json(ansible_facts=ansible_facts)
if __name__ == '__main__':
main()
| gpl-3.0 |
davy39/eric | Helpviewer/SpeedDial/PageThumbnailer.py | 1 | 4110 | # -*- coding: utf-8 -*-
# Copyright (c) 2012 - 2014 Detlev Offenbach <detlev@die-offenbachs.de>
#
"""
Module implementing an object to create a thumbnail image of a web site.
"""
from __future__ import unicode_literals
from PyQt5.QtCore import pyqtSignal, QObject, QSize, Qt, QUrl
from PyQt5.QtGui import QPixmap, QImage, QPainter
from PyQt5.QtWebKitWidgets import QWebPage
from ..Network.NetworkAccessManagerProxy import NetworkAccessManagerProxy
class PageThumbnailer(QObject):
"""
Class implementing a thumbnail creator for web sites.
@signal thumbnailCreated(QPixmap) emitted after the thumbnail has been
created
"""
thumbnailCreated = pyqtSignal(QPixmap)
def __init__(self, parent=None):
"""
Constructor
@param parent reference to the parent object (QObject)
"""
super(PageThumbnailer, self).__init__(parent)
self.__page = QWebPage(self)
self.__size = QSize(231, 130)
self.__loadTitle = False
self.__title = ""
self.__url = QUrl()
self.__proxy = NetworkAccessManagerProxy(self)
import Helpviewer.HelpWindow
self.__proxy.setPrimaryNetworkAccessManager(
Helpviewer.HelpWindow.HelpWindow.networkAccessManager())
self.__page.setNetworkAccessManager(self.__proxy)
self.__page.mainFrame().setScrollBarPolicy(
Qt.Horizontal, Qt.ScrollBarAlwaysOff)
self.__page.mainFrame().setScrollBarPolicy(
Qt.Vertical, Qt.ScrollBarAlwaysOff)
# Full HD
# Every page should fit in this resolution
self.__page.setViewportSize(QSize(1920, 1080))
def setSize(self, size):
"""
Public method to set the size of the image.
@param size size of the image (QSize)
"""
if size.isValid():
self.__size = QSize(size)
def setUrl(self, url):
"""
Public method to set the URL of the site to be thumbnailed.
@param url URL of the web site (QUrl)
"""
if url.isValid():
self.__url = QUrl(url)
def url(self):
"""
Public method to get the URL of the thumbnail.
@return URL of the thumbnail (QUrl)
"""
return QUrl(self.__url)
def loadTitle(self):
"""
Public method to check, if the title is loaded from the web site.
@return flag indicating, that the title is loaded (boolean)
"""
return self.__loadTitle
def setLoadTitle(self, load):
"""
Public method to set a flag indicating to load the title from
the web site.
@param load flag indicating to load the title (boolean)
"""
self.__loadTitle = load
def title(self):
"""
Public method to get the title of the thumbnail.
@return title of the thumbnail (string)
"""
return self.__title
def start(self):
"""
Public method to start the thumbnailing action.
"""
self.__page.loadFinished.connect(self.__createThumbnail)
self.__page.mainFrame().load(self.__url)
def __createThumbnail(self, status):
"""
Private slot creating the thumbnail of the web site.
@param status flag indicating a successful load of the web site
(boolean)
"""
if not status:
self.thumbnailCreated.emit(QPixmap())
return
self.__title = self.__page.mainFrame().title()
image = QImage(self.__page.viewportSize(), QImage.Format_ARGB32)
painter = QPainter(image)
self.__page.mainFrame().render(painter)
painter.end()
scaledImage = image.scaled(self.__size,
Qt.KeepAspectRatioByExpanding,
Qt.SmoothTransformation)
self.thumbnailCreated.emit(QPixmap.fromImage(scaledImage))
| gpl-3.0 |
awolfe76/hmda-viz-processing | msa_indexing.py | 3 | 2458 | class MSA_info(object): #contains functions for setting aggregate information for the MSA
def __init__(self):
pass
def app_income_to_MSA(self, inputs): #set income bracket index
if inputs['income'] == 'NA ' or inputs['income'] == ' ':
return 5 #applicant income unavailable, feeds to 'income not available'
elif inputs['MSA median income'] == 'NA ' or inputs['MSA median income'] == ' ' :
return 6 #placeholder for MSA median income unavailable, feeds to 'income not available'
else:
inputs['percent MSA income'] = (float(inputs['income']) / (float(inputs['MSA median income'] )/1000)) *100 #common size median income and create ##.##% format ratio
#determine income bracket for use as an index in the JSON object
if inputs['percent MSA income'] < 50:
return 0
elif inputs['percent MSA income'] <= 80:
return 1
elif inputs['percent MSA income'] <= 100:
return 2
elif inputs['percent MSA income'] <= 120:
return 3
elif inputs['percent MSA income'] >= 120:
return 4
else:
print 'error setting percent MSA income bracket for index'
def minority_percent(self, inputs): #set index codes for minority population percent
if inputs['minority percent'] == ' ':#if no information is available use an out of bounds index
return 5
elif inputs['minority percent'] == 'NA ': #if tract minority percent is NA then it is aggregated as <10%
return 0
elif float(inputs['minority percent']) < 10.0: #less than 10%
return 0
elif float(inputs['minority percent']) <20.0: # 10-19%
return 1
elif float(inputs['minority percent']) < 50.0: # 20-49%
return 2
elif float(inputs['minority percent']) < 80.0: # 50-79
return 3
elif float(inputs['minority percent']) <= 100.0: # 80-100
return 4
else:
print "minority percent index not set"
def tract_to_MSA_income(self, inputs): #set census MSA income level: low, moderate, middle, upper
if inputs['tract to MSA income'] == ' ' or inputs['tract to MSA income'] == 'NA ': #if no information is available use an out of bounds index
return 4 #not stored in report 3-1
elif float(inputs['tract to MSA income']) < 50.0:
return 0
elif float(inputs['tract to MSA income']) <= 79.0:
return 1
elif float(inputs['tract to MSA income']) <= 119.0:
return 2
elif float(inputs['tract to MSA income']) >= 119.0:
return 3
else:
print "error setting tract to MSA income index"
| cc0-1.0 |
showp1984/bricked-pyramid-3.0 | tools/perf/scripts/python/check-perf-trace.py | 11214 | 2503 | # perf script event handlers, generated by perf script -g python
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# This script tests basic functionality such as flag and symbol
# strings, common_xxx() calls back into perf, begin, end, unhandled
# events, etc. Basically, if this script runs successfully and
# displays expected results, Python scripting support should be ok.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Core import *
from perf_trace_context import *
unhandled = autodict()
def trace_begin():
print "trace_begin"
pass
def trace_end():
print_unhandled()
def irq__softirq_entry(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
vec):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "vec=%s\n" % \
(symbol_str("irq__softirq_entry", "vec", vec)),
def kmem__kmalloc(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
call_site, ptr, bytes_req, bytes_alloc,
gfp_flags):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "call_site=%u, ptr=%u, bytes_req=%u, " \
"bytes_alloc=%u, gfp_flags=%s\n" % \
(call_site, ptr, bytes_req, bytes_alloc,
flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)),
def trace_unhandled(event_name, context, event_fields_dict):
try:
unhandled[event_name] += 1
except TypeError:
unhandled[event_name] = 1
def print_header(event_name, cpu, secs, nsecs, pid, comm):
print "%-20s %5u %05u.%09u %8u %-20s " % \
(event_name, cpu, secs, nsecs, pid, comm),
# print trace fields not included in handler args
def print_uncommon(context):
print "common_preempt_count=%d, common_flags=%s, common_lock_depth=%d, " \
% (common_pc(context), trace_flag_str(common_flags(context)), \
common_lock_depth(context))
def print_unhandled():
keys = unhandled.keys()
if not keys:
return
print "\nunhandled events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for event_name in keys:
print "%-40s %10d\n" % (event_name, unhandled[event_name])
| gpl-2.0 |
er432/TASSELpy | TASSELpy/toolkit/filetransform_singlefile.py | 1 | 58109 | # Help ur copy and paste process from .java file to .py file
# import plyj.parser
__helper__ = "edit: inputfile and outputfile at the beginning"
import os
import os.path
import sys
import getopt
# file_transform input is the target .java file and the output is the root dir of the TASSELpy
# for example:
# python /address/filetransform_singlefile.py --input=root_address\targetfile.java --output=root\TASSELpy\
# get java_import from enxisting .py file and build dictionary "JAVA_IMPORT_KEY"
# for example:
# python C:\TASSELpy\toolkit\filetransform_singlefile.py -j C:\Users\bb576\Dropbox\TASSELpy\net\maizegenetics\
#
# tip: since there are some typo in java_import the "JAVA_IMPORT_KEY" have to be modified by hand and be copied to
# this file in this developing version
# only for test
# get java functions' root dir
# python C:\TASSELpy\toolkit\filetransform_singlefile.py -i D:\TASSEL\src\net\maizegenetics\
#
# for extraction of function title in different file, help develop "java function title parser"
#
# In development, the dictionaries below are waiting for modification
#
# for some input arguments
JAVA_TYPE_KEY = {'int': "metaInteger",
'long': "metaInteger",
"short": "metaInteger",
'String': "metaString",
'boolean': "metaBoolean"}
# for some output argument
JAVA_TYPE_OUT_KEY = {'int': 'None',
'long': 'None',
'String': 'None',
'double': 'np.float64',
'boolean': 'None',
'Byte': 'np.int8',
"short": 'np.int16'}
# some " form package import * " words
JAVA_IMPORT_STRING = {'int': "from TASSELpy.java.lang.Integer import metaInteger\n",
'long': "from TASSELpy.java.lang.Integer import metaInteger\n",
"short": "from TASSELpy.java.lang.Integer import metaInteger\n",
'String': "from TASSELpy.java.lang.String import metaString\n",
'boolean': "from TASSELpy.java.lang.Boolean import metaBoolean\n"}
# for array in input, it will have some transform if its output
JAVA_ARR_KEY = {'int[]': 'javaPrimativeArray.get_array_type(\'byte\')',
'byte[]': 'javaPrimativeArray.get_array_type(\'byte\')',
'long[]': 'javaPrimativeArray.get_array_type(\'long\')',
'double[]': 'javaPrimativeArray.make_array_from_obj(\'double\')',
'String[]': 'javaPrimativeArray.make_array_from_obj(\'string\')',
'double[][]': 'javaArray.get_array_type(javaPrimativeArray.get_array_type(\'double\'))',
'long[][]': 'javaArray.get_array_type(javaPrimativeArray.get_array_type(\'long\'))',
'int[][]': 'javaArray.get_array_type(javaPrimativeArray.get_array_type(\'int\'))',
'Object[][]': 'javaArray.get_array_type(javaPrimativeArray.get_array_type(\'Object\'))'
}
#
# 7/22: Bokan: This part is generated by "-j" commend and the output dir is "TASSELpy/toolkit/javaimportkey".
# Since I don't know how to import it from original file, so I just copy it by hand.
# Since I don't know how to import it from original file, so I just copy it by hand.
#
JAVA_IMPORT_KEY = {'Phenotype': 'net/maizegenetics/trait/Phenotype',
'Tags': 'net/maizegenetics/dna/tag/Tags',
'Map': 'java/util/Map',
'Chromosome': 'net/maizegenetics/dna/map/Chromosome',
'TableReport': 'net/maizegenetics/util/TableReport.java',
'PositionList': 'net/maizegenetics/dna/map/PositionList',
'DoubleMatrix2D': 'cern/colt/matrix/DoubleMatrix2D',
'UnmodifiableBitSet': 'net/maizegenetics/util/UnmodifiableBitSet',
'TagsByTaxa': 'net/maizegenetics/dna/tag/TagsByTaxa',
'BitStorage': 'net/maizegenetics/dna/snp/bit/BitStorage',
'FilterGenotypeTable': 'net/maizegenetics/dna/snp/FilterGenotypeTable',
'IBSDistanceMatrix': 'net/maizegenetics/analysis/distance/IBSDistanceMatrix',
'BitSet': 'net/maizegenetics/util/BitSet',
'Set': 'java/util/Set',
'WHICH_ALLELE': 'net/maizegenetics/dna/WHICH_ALLELE',
'TagsByTaxaByte': 'net/maizegenetics/dna/tag/TagsByTaxaByte',
'Kinship': 'net/maizegenetics/analysis/distance/Kinship',
'TaxaListMatrix': 'net/maizegenetics/taxa/distance/TaxaListMatrix',
'AbstractPhenotype': 'net/maizegenetics/trait/AbstractPhenotype',
'MaxNumAlleles': 'net/maizegenetics/dna/snp/NucleotideAlignmentConstants/NUMBER_NUCLEOTIDE_ALLELES',
'TOPMInterfacelols': '/net/maizegenetics/dna/map/TOPMInterfacelols',
'TaxaListBuilder': 'net/maizegenetics/taxa/TaxaListBuilder',
'DoubleMatrixFactory': 'net/maizegenetics/matrixalgebra/Matrix/DoubleMatrixFactory',
'SimplePhenotype': 'net/maizegenetics/trait/SimplePhenotype',
'GenotypeCallTable': 'net/maizegenetics/dna/snp/genotypecall/GenotypeCallTable',
'FisherExact': 'net/maizegenetics/stats/statistics/FisherExact',
'ByteGenotypeCallTable': 'net/maizegenetics/dna/dnp/genotypecall/ByteGenotypeCallTable',
'TOPMInterface': 'net/maizegenetics/dna/map/TOPMInterface',
'ImportUtils': 'net/maizegenetics/dna/snp/ImportUtils',
'AbstractGenotypeCallTable': 'net/maizegenetics/dna/snp/genotypecall/AbstractGenotypeCallTable',
'List': 'java/util/List',
'TagsOnGeneticMap': '/net/maizegenetics/dna/map/TagsOnGeneticMap',
'ProgressListener': 'net/maizegenetics/util/ProgressListener',
'LDResult': 'net/maizegenetics/analysis/popgen/LDResult',
'SuperByteMatrixBuilder': 'net/maizegenetics/util/SuperByteMatrixBulider',
'DoubleMatrix': 'net/maizegenetics/matrixalgebra/Matrix/DoubleMatrix',
'ArrayList': 'java/util/ArrayList',
'Boolean': 'java/lang/Boolean',
'Byte': 'java/lang/Byte',
'Taxon': 'net/maizegenetics/taxa/Taxon',
'Object': 'java/lang/Object',
'TaxaList': 'net/maizegenetics/taxa/TaxaList',
'String': 'java/lang/String',
'CoreGenotypeTable': 'net/maizegenetics/dna/snp/CoreGenotypeTable',
'genotypecall': 'net/maizegenetics/dna/snp/genotypecall',
'SetMultimap': 'com/google/common/collect/SetMultimap',
'AbstractPETags': 'net/maizegenetics/dna/tag/AbstractPETags',
'Trait': 'net/maizegenetics/trait/Trait',
'SuperByteMatrix': 'net/maizegenetics/util/SuperByteMatrix',
'DistanceMatrix': 'net/maizegenetics/taxa/distance/DistanceMatrix',
'EigenvalueDecomposition': 'net/maizegenetics/matrixalgebra/decomposition/EigenvalueDecomposition',
'OpenBitSet': 'net/maizegenetics/util/OpenBitSet',
'Position': 'net/maizegenetics/dna/map/Position',
'SingularValueDecomposition': 'net/maizegenetics/matrixalgebra/decomposition/SingularValueDecomposition',
'GeneralAnnotation': 'net/maizegenetics/util/GeneralAnnotation',
'LinkageDisequilibrium': 'net/maizegenetics/analysis/popgen/LinkageDisequilibrium',
'SiteScore': 'net/maizegenetics/dna/snp/score/SiteScore',
'AlleleDepth': 'net/maizegenetics/dna/snp/depth/AlleleDepth',
'GenotypeTable': 'net/maizegenetics/dna/snp/GenotypeTable'}
# delete them if found in title throw remove_sys_char
SYS_KEYWORDS = ["private", "public", "static", "abstract", "final", "synchronized", "enum", "final"]
# used in detection of java function title
TITLE_KEYWORDS = ['public', 'interface', 'private', 'extend', "static", "interface", "class"]
SPACE = " "
DOUBLE_SPACE = SPACE * 2
TRIPLE_SPACE = SPACE * 3
TRI_QUOTE = "\"\"\""
ENTER = "\n"
TAB = "\t"
key = []
def _if_main(string):
"""
this is a help function mainly used in title_processing tell whether this function is main function or not
Args:
string -- function name's raw string
for example:
"public class TagsByTaxaByteHDF5TagGroups extends AbstractTagsByTaxa {"
" public boolean isInCurrentBlock(int queryIndex) {"
Returns:
Boolean
"""
if string.find(SPACE) == 0:
is_main = False
string = string[4:]
elif string[0] == "\t":
is_main = False
string = string[1:]
else:
is_main = True
return is_main, string
def _prepare_name_string(string):
"""
this is a help function mainly used in title_processing
remove the "{" or ";" and redundant space
Args:
string -- function name's raw string
for example:
"public class TagsByTaxaByteHDF5TagGroups extends AbstractTagsByTaxa {"
" public boolean isInCurrentBlock(int queryIndex) {"
Returns:
string
for example:
"public class TagsByTaxaByteHDF5TagGroups extends AbstractTagsByTaxa"
"public boolean isInCurrentBlock(int queryIndex)"
"""
if "{" in string:
string = string[:string.find("{")]
if ";" in string:
string = string[:string.find(";")]
if string[-1] == '\n':
string = string[:-1]
string = _remove_redun_space(string)
return string
def _remove_redun_space(string):
"""
this is a help function mainly used in title_processing
remove redundant space
Args:
string -- function name's raw string
for example:
" public class TagsByTaxaByteHDF5TagGroups extends AbstractTagsByTaxa "
" public boolean isInCurrentBlock(int queryIndex) "
Returns:
string -- string
for example:
"public class TagsByTaxaByteHDF5TagGroups extends AbstractTagsByTaxa"
"public boolean isInCurrentBlock(int queryIndex)"
"""
string = string.split(" ")
string[:] = [x for x in string if x != ""]
return " ".join(string)
def _remove_sys_char(string):
"""
this is a help function mainly used in title_processing
remove the redundant java characters such as: ["private", "public", "abstract", "final", "synchronized"]
and prepare title string for building python function
Args:
string -- function name's modified string
for example:
"public class TagsByTaxaByteHDF5TagGroups extends AbstractTagsByTaxa"
"public boolean isInCurrentBlock(int queryIndex)"
Returns:
string -- string
for example:
"class TagsByTaxaByteHDF5TagGroups extends AbstractTagsByTaxa"
"boolean isInCurrentBlock(int queryIndex)"
"""
string = string.split(" ")
edit_string = list(string)
sys_char = []
for i in string:
if i in SYS_KEYWORDS:
sys_char.append(i)
edit_string.remove(i)
return _remove_redun_space(" ".join(edit_string)), sys_char
def _title_parser(string):
"""
this is a help function mainly used in title_processing
parser the words; simply seperate the title into name, input and output
Args:
string -- function name's modified string
for example:
"class TagsByTaxaByteHDF5TagGroups extends AbstractTagsByTaxa"
"boolean isInCurrentBlock(int queryIndex)"
Returns:
list of string -- string
for example:
["class", "TagsByTaxaByteHDF5TagGroups", "extends", "AbstractTagsByTaxa"]
["boolean", "isInCurrentBlock(int queryIndex)"]
"""
in_bra = 0
word = ""
add = ("(", "<")
min = (")", ">")
word_list = []
for i in string:
if i in add:
if i == "(":
# something(
if word != "":
word_list.append(word)
word = "("
else:
word += i
else:
# something<
word += i
in_bra += 1
elif i in min:
in_bra -= 1
word += i
if in_bra == 0:
word_list.append(word)
word = ""
elif i == " ":
if word != "":
if word[-1] == ",":
word += i
else:
if in_bra == 0:
word_list.append(word)
word = ""
else:
word += i if word[-1] != " " else ""
else:
word += i
if word != "":
word_list.append(word)
return word_list
def class_interf_detec(string_list):
"""
this is a help function mainly used in title_processing
detect whether its class or interface, get the function's name, super class's name and interfaces' name
and clarify the function type into class, interface and normal
Args:
string_list -- list of string parsed by _title_parser
for example:
["class", "TagsByTaxaByteHDF5TagGroups", "extends", "AbstractTagsByTaxa"]
["boolean", "isInCurrentBlock", "(int queryIndex)"]
Returns:
output -- None or string
None if function is class or interface,
"void" if function is "normal" with "void" return
or something else
None, "void", "boolean"
name -- string
function's name:
"isInCurrentBlock", "TagsByTaxaByteHDF5TagGroups"
input -- string
contain the input's type and name or superclass or interface waiting to be implemented
"(int queryIndex)", "AbstractTagsByTaxa"
fun_type -- string:
function's type: "class" or "interface" or "normal"
"""
if "class" in string_list or "interface" in string_list:
output = None
type_fun = ""
if "class" in string_list:
name = string_list[string_list.index("class") + 1]
type_fun = "class"
else:
# print string_list
name = string_list[string_list.index("interface") + 1]
assert type_fun == "", "type exists"
type_fun = "interface"
input_entry = []
if "extends" in string_list:
# print string_list
#print string_list
input_entry.append(string_list[string_list.index("extends") + 1])
if "implements" in string_list:
input_entry.append(string_list[string_list.index("implements") + 1])
else:
type_fun = "normal"
if len(string_list) != 3:
if len(string_list) == 2:
output, name, input_entry = (None,) + tuple(string_list)
elif len(string_list) == 1:
output, name, input_entry = (None, string_list[0], [])
elif len(string_list) > 3:
output, name, input_entry = (string_list[-3], string_list[-2], string_list[-1])
else:
output, name, input_entry = tuple(string_list)
return output, name, input_entry, type_fun
# use recursion to solve?
def _input_parser(string, input_list=None):
"""
this is a help function mainly used in title_processing
parser the words; simply seperate the arguments into list
Args:
string -- string
for example:
(byte[] source, int chunkSize)
(String directoryName, TagsByTaxa.FilePacking format)
(int arraySize, double proportionToFill, int maxValue)
Returns:
input_list -- list of string
for example:
['byte[] source', 'int chunkSize']
['String directoryName', 'TagsByTaxa.FilePacking format']
['int arraySize', 'double proportionToFill', 'int maxValue']
"""
if not input_list: input_list = []
# print type(string)
if string == "()":
return []
elif type(string) == list:
for i in string:
input_list = _input_parser(i, input_list)
return input_list
elif type(string) == str:
if string[0] == "(":
assert string[-1] == ")"
string = string[1:-1]
# print string
in_bra = 0
word = ""
i_pre = ''
for i in string:
if i_pre == '':
i_pre = i
continue
i_next = i
i = i_pre
if i == "<":
# something<
assert word != ""
word += i
in_bra += 1
elif i == ">":
in_bra -= 1
word += i
if in_bra == 0:
if i_next == ' ':
pass
else:
input_list.append(word)
word = ""
elif i == ",":
if in_bra == 0 and word != "":
input_list.append(word)
word = ""
elif i == " ":
if word != "":
word += i
else:
word += i
i_pre = i_next
i = i_pre
if i == "<":
# something<
assert word != ""
word += i
in_bra += 1
elif i == ">":
in_bra -= 1
word += i
if in_bra == 0:
input_list.append(word)
word = ""
elif i == ",":
if in_bra == 0 and word != "":
input_list.append(word)
word = ""
elif i == " ":
if word != "":
word += i
else:
word += i
if word != "":
input_list.append(word)
return input_list
def input_parser(string):
return _input_parser(string)
def _input_list_parser(string):
"""
this is a help function mainly used in title_processing
parser one argument string and return;
simply seperate the type and name into list,
detect the type if exists in import_prepare_list and java_imports_list
Args:
string -- string
for example:
'byte[] source'
'String directoryName'
'int arraySize'
Returns:
input_list -- list ofstring
for example:
['byte[]', 'source']
['String', 'directoryName']
['int','arraySize']
"""
global import_prepare_list, java_imports_list
try:
#detect if exists, used in Test
a = import_prepare_list
b = java_imports_list
except:
import_prepare_list = []
java_imports_list = []
if string.find('<') == -1:
aa = string.split(" ")
import_prepare_list.append(aa[0])
java_imports_list.append(aa[0])
return aa
else:
assert string.find(">") != -1, "format error: " + string
if string.find(">") == len(string) - 1:
return [string, ""]
string1 = string[:string.find(">") + 1]
string2 = string[string.find(">") + 2:]
import_prepare_list.append(string1)
java_imports_list.append(string1)
return [string1, string2]
def title_processing(string, main_name):
"""
transform title string into java_function class
Args:
string -- string
for example:
"public class TagsByTaxaByte extends AbstractTagsByTaxa {"
Returns:
class -- MainFunction: if this title represent a main function
ConFunction: if this title represent a constructor of class
SubFunction: if this title represent a subfunction
EnumFunction: if this title represent a enumerate format
"""
is_main, string = _if_main(string)
# delete redundant space
name_string = string = _prepare_name_string(string)
#remove_system_character
string, sys_char = _remove_sys_char(string)
#parser into list
string_list = _title_parser(string)
return_type, name, input_list, type_fun = class_interf_detec(string_list)
#detcet it's whether a construtor, enumerate or not
if not is_main and type_fun == 'normal' and name == main_name:
type_fun = 'constructor'
if 'enum' in sys_char:
type_fun = 'enum'
# parser input string into list
if input_list:
input_list = _input_parser(input_list)
input_arg = []
for i in input_list:
input_arg.append(_input_list_parser(i))
# constructor args prepare
args = (name_string, name, return_type, input_arg, is_main, type_fun, sys_char)
print args
# yield str(java_function(*args))
if type_fun in ['class', ' interface'] or is_main:
return MainFunction(*args)
elif name == main_name:
assert type_fun == 'constructor'
return ConFunction(*args)
elif type_fun == 'normal':
return SubFunction(*args)
elif type_fun == 'enum':
return EnumFunction(*args)
class JavaFunction():
"""
Attributes:
sys_char: string in SYS_KEYWORDS
type_fun:
string in ['interface', 'class', 'normal', 'constructor', 'enum']
return_type:
e.g. 'string[]'
input_list:
e.g. [['String', 'taxon']]
namestring:
e.g. public String[] getTaxaNames()
name:
e.g. getTaxaNames
ismain: True if is main function(which is writen at first), False otherwise
implement: information found above the function's signature
"""
def __init__(self, name_string, name, return_type, input_list, ismain, type_fun, sys_char):
assert type(name_string) == str, name_string
assert type(return_type) == str or return_type is None, return_type
assert type(name) == str, name
assert type(input_list) == list, "Wrong type of inputlist"
assert type(ismain) == bool
assert type_fun in ['interface', 'class', 'normal', 'constructor', 'enum'], "error in type_fun"
if type_fun in ['class', 'interface']:
assert return_type is None, "format error " + name_string
else:
assert type_fun in ["constructor", "enum"] if return_type is None else "normal", (type_fun, name_string)
# test whether is constructure
self.sys_char = sys_char
self.type_fun = type_fun
self.return_type = return_type
self.input_list = input_list
self.namestring = name_string
self.name = name
self.ismain = ismain
self.implement = ''
def implement_output(self):
pass
def implement_input(self, string):
assert type(string) == str
self.implement = string
def __str__(self):
return self.namestring
def overload(self):
pass
def overload_output(self):
pass
def imple_ouput(self):
pass
class MainFunction(JavaFunction):
"""
Attributes:
fun: list of function below the main function(the first function in the file)
"""
def __init__(self, *args):
JavaFunction.__init__(self, *args)
self.fun = []
# Return:
# e.g. (make_sig(['int','int','int'],'void'),(metaInteger,metaInteger,metaInteger))
def overload(self):
# print self.type_fun
global import_prepare_list, java_imports_list
if self.type_fun not in ['normal', 'constructor']:
return ""
else:
inp_tp_list = []
inp_nm_list = []
if self.input_list:
for i in self.input_list:
if i[0] in JAVA_TYPE_KEY:
import_prepare_list.append(i[0])
t1 = "\'" + i[0] + "\'"
n1 = JAVA_TYPE_KEY[i[0]]
elif i[0] in JAVA_IMPORT_KEY:
t1 = "java_imports[\'" + i[0] + "\']"
import_prepare_list.append(i[0])
java_imports_list.append(i[0])
n1 = 'object'
elif i[0] in JAVA_ARR_KEY:
import_prepare_list.append(i[0])
t1 = "\'" + i[0] + "\'"
n1 = JAVA_ARR_KEY[i[0]]
else:
t1 = "\'Please Check\'"
n1 = "\'Please Check\'"
inp_tp_list.append(t1)
inp_nm_list.append(n1)
if self.return_type == 'void' or self.return_type is None:
out_tp = '\'void\''
output_string = "(make_sig([" + ",".join(inp_tp_list) + "]," + out_tp + "),(" + \
",".join(inp_nm_list) + "))"
else:
if self.return_type in JAVA_TYPE_OUT_KEY:
out_tp = '\'' + self.return_type + '\''
out_nm = JAVA_TYPE_OUT_KEY[self.return_type]
elif self.return_type in JAVA_IMPORT_KEY:
out_tp = "java_imports[\'" + self.return_type + "\']"
out_nm = "\'Please Check\'"
elif self.return_type in JAVA_ARR_KEY:
out_tp = '\'' + self.return_type + '\''
tip = JAVA_ARR_KEY[self.return_type]
out_nm = "lambda x: " + tip[:tip.find(")")] + ", x" + tip[tip.find(")"):]
# print out_nm
#a = raw_input("aa")
else:
out_tp = '\'' + self.return_type + '\''
out_nm = "\'Please Check\'"
output_string = "(make_sig([" + ",".join(inp_tp_list) + "]," + out_tp + "),(" + \
",".join(inp_nm_list) + ")," + out_nm + ")"
return output_string
def overload_output(self):
return SPACE + "_java_name = java_imports[\'" + self.name + "\']\n"
def check_exists(self, fun_list):
self.overload()
def name_output(self):
pass
def infun_imple_putput(self):
pass
# Return:
# String -- otString
# in MainFunction it outputs a name
# e.g. def truncateTaxonNames(self, *args):
def format_output(self):
assert type(self.input_list) == list
# print 'name', self.name
#print self.input_list
ol = []
for i in self.input_list:
ol.append(' '.join(i))
otstring = 'class ' + self.name + "(" + ", ".join(ol) + "):\n"
return otstring
class SubFunction(MainFunction):
def __init__(self, *args):
MainFunction.__init__(self, *args)
assert self.type_fun == 'normal' or "constructor", "subfunction should be normal or constructor" + str(self)
# check_return the function(s) sharing the same name
# and new combined fun itself
def check_exists(self, fun_list):
ex = 0
for i in fun_list:
if self.name == i.name:
ex += 1
assert ex >= 1, "fun not exists in fun_ist??! :<"
self._combination(fun_list)
return self
# Extration useful information from other functions in the group
# Args:
# sm_list -- list of instances of SubFunction
# Attributes:
# name_string_list: list of string: all signatures which have same functions' name
# overload_list: list of string for all overload of functions' have the same name
# return_type_list: list of string: all return_type of functions which have same name
# input_list_list_bef_fun_output: used for implementation before the function
# input_list_list_in_fun_output: used for implementation in the function
def _combination(self, sm_list):
self.name_string_list = []
self.overload_list = []
self.implement_list = []
self.return_type_list = []
self.input_list_list_bef_fun_output = []
self.input_list_list_in_fun_output = []
for i in sm_list:
self.name_string_list.append(i.namestring)
self.overload_list.append(i.overload())
self.implement_list.append(i.implement_ouput())
self.return_type_list.append(i.return_type)
self.input_list_list_bef_fun_output.append(i.input_list_output_bef_fun())
self.input_list_list_in_fun_output.append(i.input_list_output_in_fun())
# for i in sm_list:
# if isinstance(i, ConFunction):
# print i.overload(), i.type_fun, "\nhaha"
# used in format output
def implement_ouput(self):
# print self.implement
otstring = SPACE + ("\n" + SPACE + "#").join(self.implement.split("\n"))
return otstring
# Return:
# String e.g. @javaOverload('addReadsToTagTaxon',
# (make_sig(['int','int','int'],'void'),(metaInteger,metaInteger,metaInteger)))
def overload_output(self):
if 'static' in self.sys_char:
output_string_title = SPACE + "@javaStaticOverload(\'" + self.name + "\'"
else:
output_string_title = SPACE + "@javaOverload(\'" + self.name + "\'"
assert type(self.overload_list) == list
output_list_iter = list(self.overload_list)
for i in output_list_iter:
# print i
if i == '':
# print self.overload_list
#print "haha"
self.overload_list.remove('')
#print self.overload_list
output_list = [output_string_title]
output_list.extend(self.overload_list)
return (",\n" + DOUBLE_SPACE).join(output_list) + ")"
# Return:
# e.g. #@paramint: tagIndex
# #@paramint: taxaIndex
# #@paramint: addValue
def input_list_output_bef_fun(self):
if not self.input_list:
return ""
else:
sl = []
for i in self.input_list:
sl.append(": ".join(i))
for i in range(len(sl)):
sl[i] = "\n" + SPACE + "#@param" + sl[i]
return "".join(sl)
#Return:
# string
# e.g.int -- tagIndex
# int -- taxaIndex
# int -- addValue
def input_list_output_in_fun(self):
if not self.input_list:
return ""
else:
sl = []
for i in self.input_list:
sl.append(" -- ".join(i))
return ("\n" + DOUBLE_SPACE).join(sl)
# Select the longest implementation as the group's implementation
# Return:
# String -- string
def input_list_output_bef_fun_output(self):
if self.input_list_list_bef_fun_output[0] is None:
return None
else:
a = len(self.input_list_list_bef_fun_output[0])
string = self.input_list_list_bef_fun_output[0]
for i in self.input_list_list_bef_fun_output:
assert type(i) == str
if len(i) > a:
string = i
return string
# Return:
# string -- string
def return_type_list_output(self):
if self.return_type_list[0] is None:
return None
else:
a = len(self.return_type_list[0])
string = self.return_type_list[0]
for i in self.return_type_list:
assert type(i) == str
if len(i) > a:
string = i
return string
def title_fun_output(self):
return SPACE + "def " + self.name + "(self, *args):" + ENTER
def format_output(self):
bef_fun = self.implement_ouput() + \
SPACE + self.input_list_output_bef_fun_output() + ENTER
bef_fun += SPACE + "#@Return" + self.return_type_list_output() + ENTER if self.return_type_list_output() != 'void' and self.return_type_list_output() is not None else ''
bef_fun += self.overload_output() + ENTER
title_fun = self.title_fun_output()
if len(self.name_string_list) == 1:
imple_fun = DOUBLE_SPACE + TRI_QUOTE + (SPACE).join(
self.implement_ouput().split("#")) + "\n"
imple_fun += DOUBLE_SPACE + "Signature:" + ENTER + ENTER + DOUBLE_SPACE + self.namestring + ENTER
imple_fun += ENTER + DOUBLE_SPACE + "Arguments: " + ENTER + ENTER + \
DOUBLE_SPACE + self.input_list_output_in_fun() + ENTER \
if self.input_list_output_in_fun() != '' else ''
imple_fun += ENTER + DOUBLE_SPACE + "Return:" + ENTER + ENTER + \
DOUBLE_SPACE + self.return_type + ENTER \
if self.return_type != 'void' and self.return_type is not None else ''
imple_fun += DOUBLE_SPACE + TRI_QUOTE + ENTER + DOUBLE_SPACE + "pass" + ENTER + ENTER + ENTER
else:
imple_fun = DOUBLE_SPACE + TRI_QUOTE
length = len(self.implement_list)
for a in range(length):
imple_fun += ENTER + DOUBLE_SPACE + self.name_string_list[a] + \
(DOUBLE_SPACE).join(self.implement_list[a].split("#"))
imple_fun += ENTER + DOUBLE_SPACE + "Signature:" + ENTER
for a in self.name_string_list:
imple_fun += TRIPLE_SPACE + a + ENTER
imple_fun += ENTER + DOUBLE_SPACE + "Arguments: " + ENTER + ENTER
for a in range(length):
imple_fun += DOUBLE_SPACE + self.name_string_list[a]
ii = (DOUBLE_SPACE).join(self.input_list_list_bef_fun_output[a].split("#@param"))
imple_fun += TRIPLE_SPACE + " --".join(ii.split(":")) + ENTER
imple_fun += ENTER + DOUBLE_SPACE + "Return: " + ENTER + ENTER
for a in range(length):
if self.return_type_list[a] is None:
continue
imple_fun += DOUBLE_SPACE + self.name_string_list[a] + ENTER
imple_fun += TRIPLE_SPACE + self.return_type_list[a] + ENTER + ENTER
imple_fun += DOUBLE_SPACE + TRI_QUOTE + ENTER + DOUBLE_SPACE + "pass" + ENTER + ENTER
return bef_fun, title_fun, imple_fun, self.appendix
class ConFunction(SubFunction):
def __init__(self, *args):
SubFunction.__init__(self, *args)
#especially for constructor
def overload_output(self):
output_string_title = SPACE + "@javaConstructorOverload(java_imports[\'" + self.name + "\']"
assert type(self.overload_list) == list
output_list_iter = list(self.overload_list)
# print self.overload_list
for i in output_list_iter:
# print i
if i == '':
#print self.overload_list
#print "haha"
self.overload_list.remove('')
#print self.overload_list
output_list = [output_string_title]
output_list.extend(self.overload_list)
return (",\n" + DOUBLE_SPACE).join(output_list) + ")"
def title_fun_output(self):
return SPACE + "def __init__(self, *args):" + ENTER
class EnumFunction(SubFunction):
def __init__(self, *args):
SubFunction.__init__(self, *args)
def format_output(self):
return ENTER + SPACE + "#Need more edit" + \
ENTER + SPACE + self.name + "java_enum(java_imports[\'main_name\']+\'$" + self.name + '\')' + ENTER
def function_signature_practice(program_dit, inputroot):
"""
take the directory and output a list of functions' signatures found in the .java file
helper function function used for debugging title_processing(string)
"""
title_string = []
for file_name in get_files_indir_java(inputroot):
# try:
print "it is: " + file_name
# for i in get_function_title(file_name):
# print i
title_string.extend(get_function_title(file_name))
# except Exception:
# print "here is some error"
# print file_name
# sys.exit()
title_string = list(title_string)
title_edit_string = []
for title in title_string:
title_edit_string.append(str(title_processing(title)))
title_string = "\n".join(title_string)
title_edit_string = "\n".join(title_edit_string)
toolkit_dir = os.path.join(program_dit, "java_title_keyword_kyky.py")
global key
print set(key)
with open(toolkit_dir, "w")as text_file:
text_file.write(title_string)
text_file.write("\n\n\n")
text_file.write(title_edit_string)
def get_function_title(file_name):
"""
take the function's name out from .java file and return the string if it's a line have the function's signature
helper function function used for debugging title_processing(string)
"""
print file_name
input_line = open(file_name)
string_list = []
notfinished = False
for i in input_line:
if notfinished:
if i.find(")") != -1 and i.find(";") == -1:
notfinished = False
string_list[-1] += i[:i.find("\n")]
else:
string_list[-1] += i[:i.find("\n")]
else:
for key in TITLE_KEYWORDS:
if i.find(key) != -1 and i.find(";") == -1:
if i.find("(") != -1:
if i.find(")") != -1:
string_list.append(i[:i.find("\n")])
break
else:
string_list.append(i[:i.find("\n")])
notfinished = True
break
else:
string_list.append(i[:i.find("\n")])
break
return string_list
def get_files_indir_java(address):
"""
take the directory and output a list of .java file exists in the directory
helper function function used for debugging title_processing(string)
"""
name_list = []
for root, dirs, files in os.walk(address):
for string in files:
# print files
if string[-5:] == ".java":
name_list.append(os.path.join(root, string))
for i in name_list:
print i
return name_list
def _java_import_combo(name, index):
"""
helper function used for format output of java_imports
"""
return "\'" + name + "\':\'" + index + "\'"
def _initiate(inputfile, outputroot):
"""
initiate the tranform process
Args:
inputfile -- address of input
outputroot -- root address of TASSELpy
Return:
main_name -- string
import_java_list -- list containing the list of key words
import_list -- list containing the list of key words
outputfile -- string: directory of the output
"""
print '\n\nstart init'
import_list = "from TASSELpy.utils.Overloading import javaOverload,javaConstructorOverload,javaStaticOverload\n" + \
"from utils.helper import make_sig\nimport numpy as np\n"
sub_output = get_output_filename(inputfile)
# net\maizegenetics\dna\Chromosome.java
input_string_list = list(os.path.split(sub_output))
# yield input_string_list
# ('net\\maizegenetics\\dna', 'Chromosome.java')
# get main_name of this .java file from name list
main_name = input_string_list[-1].split('.')[0]
# yield type(main_name)
# get target file name in list
input_string_list[-1] = main_name + '.py'
# yield input_string_list
# ['net\\maizegenetics\\dna', 'Chromosome.py']
# get whole dir of target file
outputfile = os.path.join(outputroot, *input_string_list)
if not os.path.exists(os.path.split(outputfile)[0]):
os.mkdir(os.path.split(outputfile)[0], 0754)
# yield outputfile
#C:\Users\bb576\Dropbox\TASSElpy\net\maizegenetics\dna\Chromosome.py
input_string_list[-1] = main_name
index = _java_import_polish(_java_import_combo(main_name, os.path.join(*input_string_list)))
#yield index
#'Chromosome': 'net/maizegenetics/dna/Chromosome'
#java_imports = {'Chromosome': 'net/maizegenetics/dna/Chromosome',} <-- here should add something
import_java_list = ['java_imports = {' + index]
print "\nEnd init\n"
print 'Fun name is: ', main_name
print 'Output addrs is: ', outputfile, "\n\n"
return main_name, import_java_list, import_list, outputfile
def get_function_title_line(i, notfinished, string):
"""
especially when it is found the current line is representing a part of function signature
a helper function of transform1
Args:
i -- current raw string
notfinished -- boolean: false means the previous line is also a part of function signature
string -- '' or string
Return:
notfinished -- boolean
True: if the signature is not complete
False: else
string -- string
modified current line
"""
if notfinished:
if i.find(")") != -1 and i.find(";") == -1:
notfinished = False
string += i[:i.find("\n")]
else:
string += i[:i.find("\n")]
else:
for key in TITLE_KEYWORDS:
if i.find(key) != -1 and i.find(";") == -1:
if i.find("(") != -1:
if i.find(")") != -1:
string = i[:i.find("\n")]
break
else:
string = i[:i.find("\n")]
notfinished = True
break
else:
string = i[:i.find("\n")]
break
return notfinished, string
def into_doc_pro(i, main_fun, in_fun, in_doc):
"""
helper function, if i have "/*", which means a new implementation is started in this line
Args:
i -- string
main_fun -- boolean
in_fun -- boolean
in_doc -- boolean
Return:
ram -- list of string
storing the current line's information
"""
assert not in_doc
ram = []
if i == "/*\n" or i == "/**\n":
if not main_fun:
ram.append(TRI_QUOTE + ENTER)
elif in_fun:
ram.append(DOUBLE_SPACE + TRI_QUOTE)
else:
readline = i[i.find('/*') + 2:]
if not main_fun:
if readline[0] == "*":
readline = readline[1:]
ram.append("\"\"\"" + readline)
else:
if readline[0] == "*":
readline = readline[1:]
ram.append(readline)
return ram
def in_doc_pro(i, main_fun, in_fun):
"""
helper function, if previous line had "/*" and the "*/" is not found, we are still in one implementation
Args:
i -- string
main_fun -- boolean
in_fun -- boolean
Return:
ram -- list of string
storing the previous and current line's information
"""
if not i[i.find("*") + 1] == "\n":
readline = i.split("* ")
# print (readline)
if not main_fun:
readline = readline[-1]
elif in_fun:
readline = DOUBLE_SPACE + readline[-1]
else:
readline = readline[-1]
else:
readline = ''
return readline
def file_transform1(inputfile, outputroot):
"""
Main function
build the file which has name in TASSELpy
Args:
inputfile -- string
outputroot -- string
"""
if not os.path.isfile(inputfile): raise Exception("input file is not file in transform", inputfile)
# input file
global import_prepare_list, java_imports_list
import_prepare_list = []
java_imports_list = []
main_name, import_java_list, import_list, outputfile = _initiate(inputfile, outputroot)
input_line = open(inputfile)
# first get import list
# get sub dir of this file
ram = []
in_doc = False
in_fun = False
main_fun = False
not_fun_ram = ''
fun_name_notfinish = False
java_function_list = []
hit = False
with open(outputfile, 'w')as text_file:
for i in input_line:
# print i
if i.find("//") != -1:
pass
elif i.find('/*') != -1:
#print "path 1"
assert not in_doc, (inputfile, i)
ram = into_doc_pro(i, main_fun, in_fun, in_doc)
in_doc = True
if i.find('*/') != -1:
i = ram[0]
in_doc = False
readline = i[:i.find('*/')]
if not main_fun:
readline += "\"\"\"\n"
text_file.write(readline)
#else:
# not_fun_ram = DOUBLE_SPACE.join(ram)
elif i.find('*/') != -1:
assert in_doc, ("Error happens in path2", inputfile, i)
if not main_fun:
ram.append("\"\"\"\n")
else:
not_fun_ram = DOUBLE_SPACE.join(ram)
#print "path 2"
#print ram
ram = []
in_doc = False
elif i.find('*') != -1 and in_doc:
ram.append(in_doc_pro(i, main_fun, in_fun))
elif not in_doc:
#print "path not indoc"
if fun_name_notfinish:
if i.find(")") != -1 and i.find(";") == -1:
fun_name_notfinish = False
name_string += i[:i.find("\n")]
else:
name_string += i[:i.find("\n")]
else:
for key in TITLE_KEYWORDS:
if i.find(key) != -1 and i.find("new") == -1 and i.find("throw") == -1:
if i.find(";") != -1:
if i.find("(") == -1 or i.find(")") == -1:
break
#print i
hit = True
if i.find("(") != -1:
if i.find(")") != -1:
name_string = i[:i.find("\n")]
break
else:
name_string = i[:i.find("\n")]
fun_name_notfinish = True
break
else:
name_string = i[:i.find("\n")]
break
if hit and not fun_name_notfinish:
fun = title_processing(i, main_name)
java_function_list.append(fun)
#pr int i[:4], "i[:4]"
if not fun.ismain and main_fun:
#print "this is sub class"
in_fun = True
#print "path3"
#print string
#print "not_fun_ram\n", not_fun_ram
k = not_fun_ram.split("@")
not_fun_ram = ''
#print k
if i_previous.find(" @Override") != -1:
fun.implement_input('#@Override')
else:
fun.implement_input(_remove_redun_space(k[0]))
#print "implement", fun.implement
#print "end"
in_fun = False
else:
assert not main_fun, ("error in path 4", i + str(main_fun))
main_fun = True
#print "path4"
if not hit and main_fun:
pass
#print "path5"
#java_function_list[-1].appendix += i
elif not fun_name_notfinish:
hit = False
i_previous = i
java_function_list = _fun_group(java_function_list)
import_prepare_list = list(set(list(import_prepare_list)))
java_imports_list = list(set(list(java_imports_list)))
java_import_string_list = []
for i in import_prepare_list:
if i in JAVA_TYPE_KEY:
java_import_string_list.append(JAVA_IMPORT_STRING[i])
elif i in JAVA_IMPORT_KEY:
java_import_string_list.append(_package_import_key_modifier(i, JAVA_IMPORT_KEY[i]))
elif i in JAVA_ARR_KEY:
java_import_string_list.append(
"from TASSELpy.utils.primativeArray import javaPrimativeArray\nfrom TASSELpy.javaObj import javaArray")
for i in java_imports_list:
if i in JAVA_IMPORT_KEY:
import_java_list.append(_java_imports_list_modifier(i, JAVA_IMPORT_KEY[i]))
text_file.write(import_list)
text_file.write('\n'.join(java_import_string_list) + ENTER)
text_file.write((",\n" + DOUBLE_SPACE).join(import_java_list) + "}" + ENTER)
for fun in java_function_list:
try:
print str(fun)
for i in fun.format_output():
text_file.write(i)
except:
print str(fun), " has some error"
def _java_imports_list_modifier(i, string):
"""
as name tells
Args:
i -- string,
name of keyword 'Tags'
string -- string,
keyword found in dic: 'net/maizegenetics/dna/tag/Tags'
Return:
string -- string
for example: "'Tags': 'net/maizegenetics/dna/tag/Tags'"
"""
assert type(string) == str
return "\'" + i + "\': \'" + string + "\'"
def _package_import_key_modifier(i, string):
"""
as name tells
Args:
i -- string,
name of keyword 'Tags'
string -- string,
keyword found in dic: 'net/maizegenetics/dna/tag/Tags'
Return:
string -- string
for example: "from TASSELpy.net.maizegenetics.dna.tag.Tags import Tags"
"""
assert type(string) == str
k = ".".join(string.split("/"))
return "from TASSELpy." + k + " import " + i
def _fun_group(java_function_list):
"""
Group up the functions have the same name. In each group, extract all information into first object at that group.
Args:
list -- list of function
Return:
list -- list of object
first object in each group
"""
assert java_function_list[0].ismain
main = java_function_list[0]
java_function_list.remove(java_function_list[0])
fun_grp_list = []
matched = False
con_exists = False
for i in java_function_list:
if isinstance(i, ConFunction):
con_exists = True
if not fun_grp_list:
fun_grp_list.append([i])
matched = True
for liter in fun_grp_list:
if not matched and i.name == liter[0].name:
liter.append(i)
matched = True
if matched:
matched = False
else:
fun_grp_list.append([i])
fun_grp_list.insert(0, [main])
if not con_exists:
fun_grp_list.insert(1, [ConFunction(main.namestring, main.name, main.return_type,
main.input_list, False, 'constructor', main.sys_char)])
cb_fun_list = []
for flist in fun_grp_list:
flist[0].check_exists(flist)
cb_fun_list.append(flist[0])
return cb_fun_list
def check_files_exist(file_list):
"""
param
list
of
string
"""
assert type(file_list) is list and file_list != [] and type(file_list[0]) is str, \
("error in file_list, which should be a list of string", file_list)
for file_name in file_list:
assert os.path.isfile(file_name), (file_name, "file not exists")
def check_dirs_exist(file_list):
"""
param
list
of
string
"""
assert type(file_list) is list and file_list != [] and type(file_list[0]) is str, \
("error in file_list, which should be a list of string", file_list)
for file_name in file_list:
assert os.path.isdir(file_name), (file_name, "file not exists")
def get_files_indir_py(address):
name_list = []
for root, dirs, files in os.walk(address):
for string in files:
# print files
if string.find(".py") != -1 and string.find(".pyc") == -1:
if string.find("__init__.py") != -1:
pass
else:
name_list.append(os.path.join(root, string))
for i in name_list:
print i
return name_list
def get_output_filename(address):
"""
helper function, used to get the appendix addres such as "net/maizegenetics/taxa"
Args:
address -- string: "C:\Users\bb576\Dropbox\TASSELpy\net\maizegenetics\util\UnmodifiableBitSet.py"
Return:
address -- string: "\net\maizegenetics\util\UnmodifiableBitSet.py"
"""
assert type(address) is str, "Type error"
address_file_name = []
new_split = os.path.split(address)
# print new_split
address_file_name.append(new_split[1])
old_address = new_split[0]
while address_file_name[0] != "net":
#print address_file_name[0]
new_split = os.path.split(old_address)
address_file_name.insert(0, new_split[1])
old_address = new_split[0]
return os.path.join(*address_file_name)
def get_java_keyword(program_dir, inputroot):
keywords = []
for file_name in get_files_indir_py(inputroot):
print "it is: " + file_name
keywords.extend(get_java_imports(file_name))
#except Exception:
# print "here is some error during java_imports"
# print file_name
# sys.exit()
keywords = list(set(keywords))
keywords = (",\n" + SPACE).join(keywords)
toolkit_dir = os.path.join(program_dir, "java_imports_keyword.py")
with open(toolkit_dir, "w")as text_file:
text_file.write("JAVA_IMPORT_KEY = {")
text_file.write(keywords)
text_file.write("}")
def get_java_imports(file_name):
file_string = open(file_name)
in_string = False
for i in file_string:
if in_string:
if i.find("}") == -1:
java_imports.append(i[i.find("\'"):i.find("\n")])
else:
java_imports.append(i[i.find("\'"):i.find("}")])
break
if not in_string and i.find("java_imports") != -1 and i.find("{") != -1:
in_string = True
i = i[i.find("{") + 1:i.find("\n")]
if i.find("}") != -1:
i = i[:i.find("}")]
in_string = False
java_imports = [i]
print "java_imports"
print java_imports
for order in range(len(java_imports)):
java_imports[order] = _java_import_polish(java_imports[order])
for string_query in java_imports:
print string_query
return java_imports
# for different format in java_import
def _java_import_polish(dict_query):
assert type(dict_query) is str, "not string"
# print "dict_query in import_polish: ", dict_query
dict_query = str(dict_query.replace(",", ""))
dict_query = str(dict_query.replace(" ", ""))
assert dict_query.find(":") != -1, "Wrong query, no : exists : " + dict_query
split_dict_query = dict_query.split(":")
assert len(split_dict_query) == 2
# print split_dict_query
if "/" in split_dict_query[1]:
split_dict_query[1] = "/".join(split_dict_query[1].split("/"))
# print split_dict_query
else:
if "." in split_dict_query[1]:
split_dict_query[1] = "/".join(split_dict_query[1].split("."))
else:
assert "\\" in split_dict_query[1], dict_query
split_dict_query[1] = "/".join(split_dict_query[1].split("\\"))
print ": ".join(split_dict_query)
return ": ".join(split_dict_query)
# output is like this 'FisherExact': 'net/maizegenetics/stats/statistics/FisherExact'
if __name__ == '__main__':
# filetransform -i address1 -o -address2, if not exists, raise errors
input_address = ""
output_root = ""
input_java = ""
program_dir = os.path.split(sys.argv[0])[0]
try:
opts, args = getopt.getopt(sys.argv[1:], "i:o:j:t:", ["input=", "output=", "--javaimport", "--gettitle"])
for o, a in opts:
if o in ("-i", "--input"):
input_address = a
if not os.path.isfile(input_address) or not os.path.isfile(input_address):
raise Exception("input file address doesn't exist or is wrong", input_address)
print "input is file: " + input_address
name = input_address
if o in ("-o", "--output"):
output_root = a
if os.path.isdir(output_root):
if os.path.exists(os.path.join(output_root, "net")):
output = output_root
print "The output addres is: " + output_root
else:
raise Exception("no net/maizegenetics:", output_root)
else:
raise Exception("output:", output_root)
if o in ("-j", "--javaimport"):
# should have net and maizegenetics
input_java = a
case = "j"
raise AssertionError
if o in ("-t", "--gettitle"):
# should have net and maizegenetics
input_java = a
case = "t"
raise AssertionError
except AssertionError:
if case == "j":
get_java_keyword(program_dir, input_java)
sys.exit()
if case == "t":
function_signature_practice(program_dir, input_java)
sys.exit()
except Exception as inst:
x = inst.args[0]
y = inst.args[1]
print x, y, "not such directory found"
file_transform1(name, output_root)
| bsd-3-clause |
pidydx/grr | grr/lib/flows/general/processes_test.py | 1 | 8502 | #!/usr/bin/env python
"""Test the process list module."""
import os
from grr.lib import action_mocks
from grr.lib import flags
from grr.lib import flow
from grr.lib import test_lib
# pylint: disable=unused-import
from grr.lib.flows.general import processes as _
# pylint: enable=unused-import
from grr.lib.rdfvalues import client as rdf_client
class ListProcessesMock(action_mocks.FileFinderClientMock):
"""Client with real file actions and mocked-out ListProcesses."""
def __init__(self, processes_list):
super(ListProcessesMock, self).__init__()
self.processes_list = processes_list
def ListProcesses(self, _):
return self.processes_list
class ListProcessesTest(test_lib.FlowTestsBaseclass):
"""Test the process listing flow."""
def testProcessListingOnly(self):
"""Test that the ListProcesses flow works."""
client_mock = ListProcessesMock([
rdf_client.Process(
pid=2,
ppid=1,
cmdline=["cmd.exe"],
exe="c:\\windows\\cmd.exe",
ctime=long(1333718907.167083 * 1e6))
])
flow_urn = flow.GRRFlow.StartFlow(
client_id=self.client_id, flow_name="ListProcesses", token=self.token)
for s in test_lib.TestFlowHelper(
flow_urn, client_mock, client_id=self.client_id, token=self.token):
session_id = s
# Check the output collection
processes = flow.GRRFlow.ResultCollectionForFID(
session_id, token=self.token)
self.assertEqual(len(processes), 1)
self.assertEqual(processes[0].ctime, 1333718907167083L)
self.assertEqual(processes[0].cmdline, ["cmd.exe"])
def testProcessListingWithFilter(self):
"""Test that the ListProcesses flow works with filter."""
client_mock = ListProcessesMock([
rdf_client.Process(
pid=2,
ppid=1,
cmdline=["cmd.exe"],
exe="c:\\windows\\cmd.exe",
ctime=long(1333718907.167083 * 1e6)), rdf_client.Process(
pid=3,
ppid=1,
cmdline=["cmd2.exe"],
exe="c:\\windows\\cmd2.exe",
ctime=long(1333718907.167083 * 1e6)), rdf_client.Process(
pid=4,
ppid=1,
cmdline=["missing_exe.exe"],
ctime=long(1333718907.167083 * 1e6)), rdf_client.Process(
pid=5,
ppid=1,
cmdline=["missing2_exe.exe"],
ctime=long(1333718907.167083 * 1e6))
])
flow_urn = flow.GRRFlow.StartFlow(
client_id=self.client_id,
flow_name="ListProcesses",
filename_regex=r".*cmd2.exe",
token=self.token)
for s in test_lib.TestFlowHelper(
flow_urn, client_mock, client_id=self.client_id, token=self.token):
session_id = s
# Expect one result that matches regex
processes = flow.GRRFlow.ResultCollectionForFID(
session_id, token=self.token)
self.assertEqual(len(processes), 1)
self.assertEqual(processes[0].ctime, 1333718907167083L)
self.assertEqual(processes[0].cmdline, ["cmd2.exe"])
# Expect two skipped results
logs = flow.GRRFlow.LogCollectionForFID(flow_urn, token=self.token)
for log in logs:
if "Skipped 2" in log.log_message:
return
raise RuntimeError("Skipped process not mentioned in logs")
def testProcessListingFilterConnectionState(self):
p1 = rdf_client.Process(
pid=2,
ppid=1,
cmdline=["cmd.exe"],
exe="c:\\windows\\cmd.exe",
ctime=long(1333718907.167083 * 1e6),
connections=rdf_client.NetworkConnection(family="INET", state="CLOSED"))
p2 = rdf_client.Process(
pid=3,
ppid=1,
cmdline=["cmd2.exe"],
exe="c:\\windows\\cmd2.exe",
ctime=long(1333718907.167083 * 1e6),
connections=rdf_client.NetworkConnection(family="INET", state="LISTEN"))
p3 = rdf_client.Process(
pid=4,
ppid=1,
cmdline=["missing_exe.exe"],
ctime=long(1333718907.167083 * 1e6),
connections=rdf_client.NetworkConnection(
family="INET", state="ESTABLISHED"))
client_mock = ListProcessesMock([p1, p2, p3])
flow_urn = flow.GRRFlow.StartFlow(
client_id=self.client_id,
flow_name="ListProcesses",
connection_states=["ESTABLISHED", "LISTEN"],
token=self.token)
for s in test_lib.TestFlowHelper(
flow_urn, client_mock, client_id=self.client_id, token=self.token):
session_id = s
processes = flow.GRRFlow.ResultCollectionForFID(
session_id, token=self.token)
self.assertEqual(len(processes), 2)
states = set()
for process in processes:
states.add(str(process.connections[0].state))
self.assertItemsEqual(states, ["ESTABLISHED", "LISTEN"])
def testWhenFetchingFiltersOutProcessesWithoutExeAndConnectionState(self):
p1 = rdf_client.Process(
pid=2,
ppid=1,
cmdline=["test_img.dd"],
ctime=long(1333718907.167083 * 1e6))
p2 = rdf_client.Process(
pid=2,
ppid=1,
cmdline=["cmd.exe"],
exe="c:\\windows\\cmd.exe",
ctime=long(1333718907.167083 * 1e6),
connections=rdf_client.NetworkConnection(
family="INET", state="ESTABLISHED"))
client_mock = ListProcessesMock([p1, p2])
for s in test_lib.TestFlowHelper(
"ListProcesses",
client_mock,
fetch_binaries=True,
client_id=self.client_id,
connection_states=["LISTEN"],
token=self.token):
session_id = s
# No output matched.
processes = flow.GRRFlow.ResultCollectionForFID(
session_id, token=self.token)
self.assertEqual(len(processes), 0)
def testFetchesAndStoresBinary(self):
process = rdf_client.Process(
pid=2,
ppid=1,
cmdline=["test_img.dd"],
exe=os.path.join(self.base_path, "test_img.dd"),
ctime=long(1333718907.167083 * 1e6))
client_mock = ListProcessesMock([process])
for s in test_lib.TestFlowHelper(
"ListProcesses",
client_mock,
client_id=self.client_id,
fetch_binaries=True,
token=self.token):
session_id = s
results = flow.GRRFlow.ResultCollectionForFID(session_id, token=self.token)
binaries = list(results)
self.assertEqual(len(binaries), 1)
self.assertEqual(binaries[0].pathspec.path, process.exe)
self.assertEqual(binaries[0].st_size, os.stat(process.exe).st_size)
def testDoesNotFetchDuplicates(self):
process1 = rdf_client.Process(
pid=2,
ppid=1,
cmdline=["test_img.dd"],
exe=os.path.join(self.base_path, "test_img.dd"),
ctime=long(1333718907.167083 * 1e6))
process2 = rdf_client.Process(
pid=3,
ppid=1,
cmdline=["test_img.dd", "--arg"],
exe=os.path.join(self.base_path, "test_img.dd"),
ctime=long(1333718907.167083 * 1e6))
client_mock = ListProcessesMock([process1, process2])
for s in test_lib.TestFlowHelper(
"ListProcesses",
client_mock,
client_id=self.client_id,
fetch_binaries=True,
token=self.token):
session_id = s
processes = flow.GRRFlow.ResultCollectionForFID(
session_id, token=self.token)
self.assertEqual(len(processes), 1)
def testWhenFetchingIgnoresMissingFiles(self):
process1 = rdf_client.Process(
pid=2,
ppid=1,
cmdline=["test_img.dd"],
exe=os.path.join(self.base_path, "test_img.dd"),
ctime=long(1333718907.167083 * 1e6))
process2 = rdf_client.Process(
pid=2,
ppid=1,
cmdline=["file_that_does_not_exist"],
exe=os.path.join(self.base_path, "file_that_does_not_exist"),
ctime=long(1333718907.167083 * 1e6))
client_mock = ListProcessesMock([process1, process2])
for s in test_lib.TestFlowHelper(
"ListProcesses",
client_mock,
client_id=self.client_id,
fetch_binaries=True,
token=self.token,
check_flow_errors=False):
session_id = s
results = flow.GRRFlow.ResultCollectionForFID(session_id, token=self.token)
binaries = list(results)
self.assertEqual(len(binaries), 1)
self.assertEqual(binaries[0].pathspec.path, process1.exe)
def main(argv):
# Run the full test suite
test_lib.GrrTestProgram(argv=argv)
if __name__ == "__main__":
flags.StartMain(main)
| apache-2.0 |
mrrrgn/AutobahnPython | examples/twisted/wamp1/rpc/symmetric/server.py | 17 | 1646 | ###############################################################################
##
## Copyright (C) 2013-2014 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
import sys
from twisted.python import log
from twisted.internet import reactor
from autobahn.twisted.websocket import listenWS
from autobahn.wamp1.protocol import WampServerFactory, \
WampServerProtocol
class MyServerProtocol(WampServerProtocol):
def onSessionOpen(self):
self.callClient()
def callClient(self):
self.call("http://example.com/client#getTime").addCallback(self.onClientResult)
def onClientResult(self, result):
print result
reactor.callLater(1, self.callClient)
if __name__ == '__main__':
if len(sys.argv) > 1 and sys.argv[1] == 'debug':
log.startLogging(sys.stdout)
debug = True
else:
debug = False
factory = WampServerFactory("ws://localhost:9000", debugWamp = debug)
factory.protocol = MyServerProtocol
listenWS(factory)
reactor.run()
| apache-2.0 |
google-research/language | language/orqa/utils/eval_utils.py | 1 | 3649 | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Evaluation utilities."""
import json
import re
import string
import unicodedata
import tensorflow.compat.v1 as tf
def normalize_answer(s):
"""Normalize answer."""
s = unicodedata.normalize("NFD", s)
def remove_articles(text):
return re.sub(r"\b(a|an|the)\b", " ", text)
def white_space_fix(text):
return " ".join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def exact_match_score(prediction, ground_truth):
return normalize_answer(prediction) == normalize_answer(ground_truth)
def regex_match_score(prediction, ground_truth):
try:
regex = re.compile(ground_truth,
flags=re.IGNORECASE + re.UNICODE + re.MULTILINE)
return regex.match(prediction) is not None
except re.error:
return False
def metric_max_over_ground_truths(metric_fn, prediction,
ground_truths):
scores_for_ground_truths = []
for ground_truth in ground_truths:
score = metric_fn(prediction, ground_truth)
scores_for_ground_truths.append(score)
return max(scores_for_ground_truths)
def is_correct(answers, prediction,
is_regex):
if is_regex:
metric_fn = regex_match_score
else:
metric_fn = exact_match_score
return metric_max_over_ground_truths(
metric_fn=metric_fn, prediction=prediction, ground_truths=answers)
def evaluate_predictions_impl(references,
predictions,
is_regex):
"""Calculates and returns metrics."""
missing_predictions = 0
correct = 0
for q, a in references.items():
if q in predictions:
correct += int(
is_correct(answers=a, prediction=predictions[q], is_regex=is_regex))
else:
missing_predictions += 1
return dict(
missing_predictions=missing_predictions,
num_correct=correct,
num_total=len(references),
accuracy=correct / float(len(references)))
def evaluate_predictions(references_path, predictions_path,
is_regex):
"""Calculates and returns metrics."""
if is_regex != ("CuratedTrec" in references_path):
print("Warning: regex evaluation should (only) be applied to CuratedTrec.")
references = {}
with tf.io.gfile.GFile(references_path) as f:
for line in f:
example = json.loads(line)
references[example["question"]] = example["answer"]
print("Found {} references in {}".format(len(references), references_path))
predictions = {}
with tf.io.gfile.GFile(predictions_path) as f:
for line in f:
example = json.loads(line)
predictions[example["question"]] = example["prediction"]
print("Found {} predictions in {}".format(len(predictions), predictions_path))
return evaluate_predictions_impl(
references=references, predictions=predictions, is_regex=is_regex)
| apache-2.0 |
amarjeetkapoor1/Sim | Sim_site/Sim/models.py | 1 | 62204 | # This is an auto-generated Django model module.
# You'll have to do the following manually to clean this up:
# * Rearrange models' order
# * Make sure each model has one field with primary_key=True
# * Make sure each ForeignKey has `on_delete` set to the desired behavior.
# * Remove `` lines if you wish to allow Django to create, modify, and delete the table
# Feel free to rename the models, but don't rename db_table values or field names.
from __future__ import unicode_literals
from django.db import models
class Indianlegacysectionsangle(models.Model):
recno = models.IntegerField(db_column='RECNO', blank=True, null=True) # Field name made lowercase.
name = models.CharField(db_column='Name', max_length=510, blank=True, null=True) # Field name made lowercase.
staadname = models.CharField(db_column='StaadName', max_length=510, blank=True, null=True) # Field name made lowercase.
d = models.FloatField(db_column='D', blank=True, null=True) # Field name made lowercase.
b = models.FloatField(db_column='B', blank=True, null=True) # Field name made lowercase.
t = models.FloatField(db_column='T', blank=True, null=True) # Field name made lowercase.
ax = models.FloatField(db_column='AX', blank=True, null=True) # Field name made lowercase.
rz = models.FloatField(db_column='Rz', blank=True, null=True) # Field name made lowercase.
r1 = models.FloatField(db_column='R1', blank=True, null=True) # Field name made lowercase.
class Meta:
db_table = 'IndianLegacySectionsAngle'
class Indianlegacysectionschannel(models.Model):
recno = models.IntegerField(db_column='RECNO', blank=True, null=True) # Field name made lowercase.
name = models.CharField(db_column='Name', max_length=510, blank=True, null=True) # Field name made lowercase.
staadname = models.CharField(db_column='StaadName', max_length=510, blank=True, null=True) # Field name made lowercase.
ax = models.FloatField(db_column='AX', blank=True, null=True) # Field name made lowercase.
d = models.FloatField(db_column='D', blank=True, null=True) # Field name made lowercase.
bf = models.FloatField(db_column='Bf', blank=True, null=True) # Field name made lowercase.
tf = models.FloatField(db_column='Tf', blank=True, null=True) # Field name made lowercase.
tw = models.FloatField(db_column='Tw', blank=True, null=True) # Field name made lowercase.
iz = models.FloatField(db_column='Iz', blank=True, null=True) # Field name made lowercase.
iy = models.FloatField(db_column='Iy', blank=True, null=True) # Field name made lowercase.
ct = models.FloatField(db_column='Ct', blank=True, null=True) # Field name made lowercase.
ix = models.FloatField(db_column='Ix', blank=True, null=True) # Field name made lowercase.
zx = models.FloatField(db_column='Zx', blank=True, null=True) # Field name made lowercase.
zy = models.FloatField(db_column='Zy', blank=True, null=True) # Field name made lowercase.
r1 = models.FloatField(db_column='R1', blank=True, null=True) # Field name made lowercase.
r2 = models.FloatField(db_column='R2', blank=True, null=True) # Field name made lowercase.
rivet_dia = models.FloatField(db_column='Rivet Dia', blank=True, null=True) # Field name made lowercase. Field renamed to remove unsuitable characters.
g = models.FloatField(db_column='G', blank=True, null=True) # Field name made lowercase.
g1 = models.IntegerField(db_column='G1', blank=True, null=True) # Field name made lowercase.
h1 = models.FloatField(blank=True, null=True)
class Meta:
db_table = 'IndianLegacySectionsChannel'
class IndianlegacysectionsconversionErrors(models.Model):
object_type = models.CharField(db_column='Object Type', max_length=510, blank=True, null=True) # Field name made lowercase. Field renamed to remove unsuitable characters.
object_name = models.CharField(db_column='Object Name', max_length=510, blank=True, null=True) # Field name made lowercase. Field renamed to remove unsuitable characters.
error_description = models.TextField(db_column='Error Description', blank=True, null=True) # Field name made lowercase. Field renamed to remove unsuitable characters.
class Meta:
db_table = 'IndianLegacySectionsConversion Errors'
class Indianlegacysectionsdbinfo(models.Model):
country = models.CharField(db_column='Country', max_length=100, blank=True, null=True) # Field name made lowercase.
countryadj = models.CharField(db_column='CountryAdj', max_length=100, blank=True, null=True) # Field name made lowercase.
materialtype = models.CharField(db_column='MaterialType', max_length=100, blank=True, null=True) # Field name made lowercase.
name = models.CharField(db_column='Name', max_length=100, blank=True, null=True) # Field name made lowercase.
version = models.CharField(db_column='Version', max_length=100, blank=True, null=True) # Field name made lowercase.
class Meta:
db_table = 'IndianLegacySectionsDBInfo'
class IndianlegacysectionsfieldUnits(models.Model):
tablename = models.CharField(db_column='TableName', max_length=100, blank=True, null=True) # Field name made lowercase.
field0 = models.CharField(db_column='Field0', max_length=100, blank=True, null=True) # Field name made lowercase.
field1 = models.CharField(db_column='Field1', max_length=100, blank=True, null=True) # Field name made lowercase.
field2 = models.CharField(db_column='Field2', max_length=100, blank=True, null=True) # Field name made lowercase.
field3 = models.CharField(db_column='Field3', max_length=100, blank=True, null=True) # Field name made lowercase.
field4 = models.CharField(db_column='Field4', max_length=100, blank=True, null=True) # Field name made lowercase.
field5 = models.CharField(db_column='Field5', max_length=100, blank=True, null=True) # Field name made lowercase.
field6 = models.CharField(db_column='Field6', max_length=100, blank=True, null=True) # Field name made lowercase.
field7 = models.CharField(db_column='Field7', max_length=100, blank=True, null=True) # Field name made lowercase.
field8 = models.CharField(db_column='Field8', max_length=100, blank=True, null=True) # Field name made lowercase.
field9 = models.CharField(db_column='Field9', max_length=100, blank=True, null=True) # Field name made lowercase.
field10 = models.CharField(db_column='Field10', max_length=100, blank=True, null=True) # Field name made lowercase.
field11 = models.CharField(db_column='Field11', max_length=100, blank=True, null=True) # Field name made lowercase.
field12 = models.CharField(db_column='Field12', max_length=100, blank=True, null=True) # Field name made lowercase.
field13 = models.CharField(db_column='Field13', max_length=100, blank=True, null=True) # Field name made lowercase.
field14 = models.CharField(db_column='Field14', max_length=100, blank=True, null=True) # Field name made lowercase.
field15 = models.CharField(db_column='Field15', max_length=100, blank=True, null=True) # Field name made lowercase.
field16 = models.CharField(db_column='Field16', max_length=100, blank=True, null=True) # Field name made lowercase.
field17 = models.CharField(db_column='Field17', max_length=100, blank=True, null=True) # Field name made lowercase.
field18 = models.CharField(db_column='Field18', max_length=100, blank=True, null=True) # Field name made lowercase.
field19 = models.CharField(db_column='Field19', max_length=100, blank=True, null=True) # Field name made lowercase.
field20 = models.CharField(db_column='Field20', max_length=100, blank=True, null=True) # Field name made lowercase.
field21 = models.CharField(db_column='Field21', max_length=100, blank=True, null=True) # Field name made lowercase.
class Meta:
db_table = 'IndianLegacySectionsField Units'
class IndianlegacysectionsiShape(models.Model):
recno = models.IntegerField(db_column='RECNO', blank=True, null=True) # Field name made lowercase.
name = models.CharField(db_column='Name', max_length=510, blank=True, null=True) # Field name made lowercase.
staadname = models.CharField(db_column='StaadName', max_length=510, blank=True, null=True) # Field name made lowercase.
ax = models.FloatField(db_column='AX', blank=True, null=True) # Field name made lowercase.
d = models.FloatField(db_column='D', blank=True, null=True) # Field name made lowercase.
bf = models.FloatField(db_column='Bf', blank=True, null=True) # Field name made lowercase.
tf = models.FloatField(db_column='Tf', blank=True, null=True) # Field name made lowercase.
tw = models.FloatField(db_column='Tw', blank=True, null=True) # Field name made lowercase.
iz = models.FloatField(db_column='Iz', blank=True, null=True) # Field name made lowercase.
ix = models.FloatField(db_column='Ix', blank=True, null=True) # Field name made lowercase.
ct = models.FloatField(db_column='Ct', blank=True, null=True) # Field name made lowercase.
iy = models.FloatField(db_column='Iy', blank=True, null=True) # Field name made lowercase.
zx = models.FloatField(db_column='Zx', blank=True, null=True) # Field name made lowercase.
zy = models.FloatField(db_column='Zy', blank=True, null=True) # Field name made lowercase.
class Meta:
db_table = 'IndianLegacySectionsI Shape'
class IndianlegacysectionsmShape(models.Model):
recno = models.IntegerField(db_column='RECNO', blank=True, null=True) # Field name made lowercase.
name = models.CharField(db_column='Name', max_length=510, blank=True, null=True) # Field name made lowercase.
staadname = models.CharField(db_column='StaadName', max_length=510, blank=True, null=True) # Field name made lowercase.
ax = models.FloatField(db_column='AX', blank=True, null=True) # Field name made lowercase.
d = models.FloatField(db_column='D', blank=True, null=True) # Field name made lowercase.
bf = models.FloatField(db_column='Bf', blank=True, null=True) # Field name made lowercase.
tf = models.FloatField(db_column='Tf', blank=True, null=True) # Field name made lowercase.
tw = models.FloatField(db_column='Tw', blank=True, null=True) # Field name made lowercase.
iz = models.FloatField(db_column='Iz', blank=True, null=True) # Field name made lowercase.
ix = models.FloatField(db_column='Ix', blank=True, null=True) # Field name made lowercase.
ct = models.FloatField(db_column='Ct', blank=True, null=True) # Field name made lowercase.
iy = models.FloatField(db_column='Iy', blank=True, null=True) # Field name made lowercase.
zx = models.FloatField(db_column='Zx', blank=True, null=True) # Field name made lowercase.
zy = models.FloatField(db_column='Zy', blank=True, null=True) # Field name made lowercase.
class Meta:
db_table = 'IndianLegacySectionsM Shape'
class Indianlegacysectionspipe(models.Model):
recno = models.IntegerField(db_column='RECNO', blank=True, null=True) # Field name made lowercase.
name = models.CharField(db_column='Name', max_length=510, blank=True, null=True) # Field name made lowercase.
staadname = models.CharField(db_column='StaadName', max_length=510, blank=True, null=True) # Field name made lowercase.
od = models.FloatField(db_column='OD', blank=True, null=True) # Field name made lowercase.
tw = models.FloatField(db_column='Tw', blank=True, null=True) # Field name made lowercase.
ax = models.FloatField(db_column='AX', blank=True, null=True) # Field name made lowercase.
i = models.FloatField(db_column='I', blank=True, null=True) # Field name made lowercase.
z = models.FloatField(db_column='Z', blank=True, null=True) # Field name made lowercase.
class Meta:
db_table = 'IndianLegacySectionsPipe'
class IndianlegacysectionssShape(models.Model):
recno = models.IntegerField(db_column='RECNO', blank=True, null=True) # Field name made lowercase.
name = models.CharField(db_column='Name', max_length=510, blank=True, null=True) # Field name made lowercase.
staadname = models.CharField(db_column='StaadName', max_length=510, blank=True, null=True) # Field name made lowercase.
ax = models.FloatField(db_column='AX', blank=True, null=True) # Field name made lowercase.
d = models.FloatField(db_column='D', blank=True, null=True) # Field name made lowercase.
bf = models.FloatField(db_column='Bf', blank=True, null=True) # Field name made lowercase.
tf = models.FloatField(db_column='Tf', blank=True, null=True) # Field name made lowercase.
tw = models.FloatField(db_column='Tw', blank=True, null=True) # Field name made lowercase.
iz = models.FloatField(db_column='Iz', blank=True, null=True) # Field name made lowercase.
ix = models.FloatField(db_column='Ix', blank=True, null=True) # Field name made lowercase.
ct = models.FloatField(db_column='Ct', blank=True, null=True) # Field name made lowercase.
iy = models.FloatField(db_column='Iy', blank=True, null=True) # Field name made lowercase.
zx = models.FloatField(db_column='Zx', blank=True, null=True) # Field name made lowercase.
zy = models.FloatField(db_column='Zy', blank=True, null=True) # Field name made lowercase.
r1 = models.FloatField(db_column='R1', blank=True, null=True) # Field name made lowercase.
r2 = models.FloatField(db_column='R2', blank=True, null=True) # Field name made lowercase.
rivet_dia1 = models.FloatField(db_column='Rivet Dia1', blank=True, null=True) # Field name made lowercase. Field renamed to remove unsuitable characters.
rivet_dia2 = models.FloatField(db_column='Rivet Dia2', blank=True, null=True) # Field name made lowercase. Field renamed to remove unsuitable characters.
g1 = models.FloatField(db_column='G1', blank=True, null=True) # Field name made lowercase.
g2 = models.FloatField(db_column='G2', blank=True, null=True) # Field name made lowercase.
g3 = models.IntegerField(db_column='G3', blank=True, null=True) # Field name made lowercase.
h1 = models.FloatField(blank=True, null=True)
class Meta:
db_table = 'IndianLegacySectionsS Shape'
class IndianlegacysectionstShape(models.Model):
recno = models.IntegerField(db_column='RECNO', blank=True, null=True) # Field name made lowercase.
name = models.CharField(db_column='Name', max_length=510, blank=True, null=True) # Field name made lowercase.
staadname = models.CharField(db_column='StaadName', max_length=510, blank=True, null=True) # Field name made lowercase.
ax = models.FloatField(db_column='AX', blank=True, null=True) # Field name made lowercase.
d = models.FloatField(db_column='D', blank=True, null=True) # Field name made lowercase.
bf = models.FloatField(db_column='Bf', blank=True, null=True) # Field name made lowercase.
tf = models.FloatField(db_column='Tf', blank=True, null=True) # Field name made lowercase.
tw = models.FloatField(db_column='Tw', blank=True, null=True) # Field name made lowercase.
iz = models.FloatField(db_column='Iz', blank=True, null=True) # Field name made lowercase.
ix = models.FloatField(db_column='Ix', blank=True, null=True) # Field name made lowercase.
ct = models.FloatField(db_column='Ct', blank=True, null=True) # Field name made lowercase.
iy = models.FloatField(db_column='Iy', blank=True, null=True) # Field name made lowercase.
zx = models.FloatField(db_column='Zx', blank=True, null=True) # Field name made lowercase.
zy = models.FloatField(db_column='Zy', blank=True, null=True) # Field name made lowercase.
class Meta:
db_table = 'IndianLegacySectionsT Shape'
class Indianlegacysectionstube(models.Model):
recno = models.IntegerField(db_column='RECNO', blank=True, null=True) # Field name made lowercase.
name = models.CharField(db_column='Name', max_length=510, blank=True, null=True) # Field name made lowercase.
staadname = models.CharField(db_column='StaadName', max_length=510, blank=True, null=True) # Field name made lowercase.
ax = models.FloatField(db_column='AX', blank=True, null=True) # Field name made lowercase.
d = models.FloatField(db_column='D', blank=True, null=True) # Field name made lowercase.
b = models.FloatField(db_column='B', blank=True, null=True) # Field name made lowercase.
t = models.FloatField(db_column='T', blank=True, null=True) # Field name made lowercase.
iz = models.FloatField(db_column='Iz', blank=True, null=True) # Field name made lowercase.
iy = models.FloatField(db_column='Iy', blank=True, null=True) # Field name made lowercase.
ix = models.FloatField(db_column='Ix', blank=True, null=True) # Field name made lowercase.
zx = models.FloatField(db_column='Zx', blank=True, null=True) # Field name made lowercase.
zy = models.FloatField(db_column='Zy', blank=True, null=True) # Field name made lowercase.
class Meta:
db_table = 'IndianLegacySectionsTube'
class IndianlegacysectionswShape(models.Model):
recno = models.IntegerField(db_column='RECNO', blank=True, null=True) # Field name made lowercase.
name = models.CharField(db_column='Name', max_length=510, blank=True, null=True) # Field name made lowercase.
staadname = models.CharField(db_column='StaadName', max_length=510, blank=True, null=True) # Field name made lowercase.
ax = models.FloatField(db_column='AX', blank=True, null=True) # Field name made lowercase.
d = models.FloatField(db_column='D', blank=True, null=True) # Field name made lowercase.
bf = models.FloatField(db_column='Bf', blank=True, null=True) # Field name made lowercase.
tf = models.FloatField(db_column='Tf', blank=True, null=True) # Field name made lowercase.
tw = models.FloatField(db_column='Tw', blank=True, null=True) # Field name made lowercase.
iz = models.FloatField(db_column='Iz', blank=True, null=True) # Field name made lowercase.
ix = models.FloatField(db_column='Ix', blank=True, null=True) # Field name made lowercase.
ct = models.FloatField(db_column='Ct', blank=True, null=True) # Field name made lowercase.
iy = models.FloatField(db_column='Iy', blank=True, null=True) # Field name made lowercase.
zx = models.FloatField(db_column='Zx', blank=True, null=True) # Field name made lowercase.
zy = models.FloatField(db_column='Zy', blank=True, null=True) # Field name made lowercase.
class Meta:
db_table = 'IndianLegacySectionsW Shape'
class Indiansectionsangle(models.Model):
recno = models.IntegerField(db_column='RECNO', blank=True, null=True) # Field name made lowercase.
name = models.CharField(db_column='Name', max_length=510, blank=True, null=True) # Field name made lowercase.
staadname = models.CharField(db_column='StaadName', max_length=510, blank=True, null=True) # Field name made lowercase.
d = models.FloatField(db_column='D', blank=True, null=True) # Field name made lowercase.
b = models.FloatField(db_column='B', blank=True, null=True) # Field name made lowercase.
t = models.FloatField(db_column='T', blank=True, null=True) # Field name made lowercase.
ax = models.FloatField(db_column='AX', blank=True, null=True) # Field name made lowercase.
rz = models.FloatField(db_column='Rz', blank=True, null=True) # Field name made lowercase.
r1 = models.FloatField(db_column='R1', blank=True, null=True) # Field name made lowercase.
class Meta:
db_table = 'IndianSectionsAngle'
class Indiansectionschannel(models.Model):
recno = models.IntegerField(db_column='RECNO', blank=True, null=True) # Field name made lowercase.
name = models.CharField(db_column='Name', max_length=510, blank=True, null=True) # Field name made lowercase.
staadname = models.CharField(db_column='StaadName', max_length=510, blank=True, null=True) # Field name made lowercase.
ax = models.FloatField(db_column='AX', blank=True, null=True) # Field name made lowercase.
d = models.FloatField(db_column='D', blank=True, null=True) # Field name made lowercase.
bf = models.FloatField(db_column='Bf', blank=True, null=True) # Field name made lowercase.
tf = models.FloatField(db_column='Tf', blank=True, null=True) # Field name made lowercase.
tw = models.FloatField(db_column='Tw', blank=True, null=True) # Field name made lowercase.
iz = models.FloatField(db_column='Iz', blank=True, null=True) # Field name made lowercase.
iy = models.FloatField(db_column='Iy', blank=True, null=True) # Field name made lowercase.
ct = models.FloatField(db_column='Ct', blank=True, null=True) # Field name made lowercase.
ix = models.FloatField(db_column='Ix', blank=True, null=True) # Field name made lowercase.
zx = models.FloatField(db_column='Zx', blank=True, null=True) # Field name made lowercase.
zy = models.FloatField(db_column='Zy', blank=True, null=True) # Field name made lowercase.
r1 = models.FloatField(db_column='R1', blank=True, null=True) # Field name made lowercase.
r2 = models.FloatField(db_column='R2', blank=True, null=True) # Field name made lowercase.
rivet_dia = models.FloatField(db_column='Rivet Dia', blank=True, null=True) # Field name made lowercase. Field renamed to remove unsuitable characters.
g = models.FloatField(db_column='G', blank=True, null=True) # Field name made lowercase.
g1 = models.IntegerField(db_column='G1', blank=True, null=True) # Field name made lowercase.
h1 = models.FloatField(blank=True, null=True)
class Meta:
db_table = 'IndianSectionsChannel'
class Indiansectionsdbinfo(models.Model):
country = models.CharField(db_column='Country', max_length=100, blank=True, null=True) # Field name made lowercase.
countryadj = models.CharField(db_column='CountryAdj', max_length=100, blank=True, null=True) # Field name made lowercase.
materialtype = models.CharField(db_column='MaterialType', max_length=100, blank=True, null=True) # Field name made lowercase.
name = models.CharField(db_column='Name', max_length=100, blank=True, null=True) # Field name made lowercase.
version = models.CharField(db_column='Version', max_length=100, blank=True, null=True) # Field name made lowercase.
description = models.CharField(db_column='Description', max_length=510, blank=True, null=True) # Field name made lowercase.
class Meta:
db_table = 'IndianSectionsDBInfo'
class IndiansectionsfieldUnits(models.Model):
tablename = models.CharField(db_column='TableName', max_length=100, blank=True, null=True) # Field name made lowercase.
field0 = models.CharField(db_column='Field0', max_length=100, blank=True, null=True) # Field name made lowercase.
field1 = models.CharField(db_column='Field1', max_length=100, blank=True, null=True) # Field name made lowercase.
field2 = models.CharField(db_column='Field2', max_length=100, blank=True, null=True) # Field name made lowercase.
field3 = models.CharField(db_column='Field3', max_length=100, blank=True, null=True) # Field name made lowercase.
field4 = models.CharField(db_column='Field4', max_length=100, blank=True, null=True) # Field name made lowercase.
field5 = models.CharField(db_column='Field5', max_length=100, blank=True, null=True) # Field name made lowercase.
field6 = models.CharField(db_column='Field6', max_length=100, blank=True, null=True) # Field name made lowercase.
field7 = models.CharField(db_column='Field7', max_length=100, blank=True, null=True) # Field name made lowercase.
field8 = models.CharField(db_column='Field8', max_length=100, blank=True, null=True) # Field name made lowercase.
field9 = models.CharField(db_column='Field9', max_length=100, blank=True, null=True) # Field name made lowercase.
field10 = models.CharField(db_column='Field10', max_length=100, blank=True, null=True) # Field name made lowercase.
field11 = models.CharField(db_column='Field11', max_length=100, blank=True, null=True) # Field name made lowercase.
field12 = models.CharField(db_column='Field12', max_length=100, blank=True, null=True) # Field name made lowercase.
field13 = models.CharField(db_column='Field13', max_length=100, blank=True, null=True) # Field name made lowercase.
field14 = models.CharField(db_column='Field14', max_length=100, blank=True, null=True) # Field name made lowercase.
field15 = models.CharField(db_column='Field15', max_length=100, blank=True, null=True) # Field name made lowercase.
field16 = models.CharField(db_column='Field16', max_length=100, blank=True, null=True) # Field name made lowercase.
field17 = models.CharField(db_column='Field17', max_length=100, blank=True, null=True) # Field name made lowercase.
field18 = models.CharField(db_column='Field18', max_length=100, blank=True, null=True) # Field name made lowercase.
field19 = models.CharField(db_column='Field19', max_length=100, blank=True, null=True) # Field name made lowercase.
field20 = models.CharField(db_column='Field20', max_length=100, blank=True, null=True) # Field name made lowercase.
field21 = models.CharField(db_column='Field21', max_length=100, blank=True, null=True) # Field name made lowercase.
class Meta:
db_table = 'IndianSectionsField Units'
class IndiansectionsiShape(models.Model):
recno = models.IntegerField(db_column='RECNO', blank=True, null=True) # Field name made lowercase.
name = models.CharField(db_column='Name', max_length=510, blank=True, null=True) # Field name made lowercase.
staadname = models.CharField(db_column='StaadName', max_length=510, blank=True, null=True) # Field name made lowercase.
ax = models.FloatField(db_column='AX', blank=True, null=True) # Field name made lowercase.
d = models.FloatField(db_column='D', blank=True, null=True) # Field name made lowercase.
bf = models.FloatField(db_column='Bf', blank=True, null=True) # Field name made lowercase.
tf = models.FloatField(db_column='Tf', blank=True, null=True) # Field name made lowercase.
tw = models.FloatField(db_column='Tw', blank=True, null=True) # Field name made lowercase.
iz = models.FloatField(db_column='Iz', blank=True, null=True) # Field name made lowercase.
ix = models.FloatField(db_column='Ix', blank=True, null=True) # Field name made lowercase.
ct = models.FloatField(db_column='Ct', blank=True, null=True) # Field name made lowercase.
iy = models.FloatField(db_column='Iy', blank=True, null=True) # Field name made lowercase.
zx = models.FloatField(db_column='Zx', blank=True, null=True) # Field name made lowercase.
zy = models.FloatField(db_column='Zy', blank=True, null=True) # Field name made lowercase.
class Meta:
db_table = 'IndianSectionsI Shape'
class IndiansectionsmShape(models.Model):
recno = models.IntegerField(db_column='RECNO', blank=True, null=True) # Field name made lowercase.
name = models.CharField(db_column='Name', max_length=510, blank=True, null=True) # Field name made lowercase.
staadname = models.CharField(db_column='StaadName', max_length=510, blank=True, null=True) # Field name made lowercase.
ax = models.FloatField(db_column='AX', blank=True, null=True) # Field name made lowercase.
d = models.FloatField(db_column='D', blank=True, null=True) # Field name made lowercase.
bf = models.FloatField(db_column='Bf', blank=True, null=True) # Field name made lowercase.
tf = models.FloatField(db_column='Tf', blank=True, null=True) # Field name made lowercase.
tw = models.FloatField(db_column='Tw', blank=True, null=True) # Field name made lowercase.
iz = models.FloatField(db_column='Iz', blank=True, null=True) # Field name made lowercase.
ix = models.FloatField(db_column='Ix', blank=True, null=True) # Field name made lowercase.
ct = models.FloatField(db_column='Ct', blank=True, null=True) # Field name made lowercase.
iy = models.FloatField(db_column='Iy', blank=True, null=True) # Field name made lowercase.
zx = models.FloatField(db_column='Zx', blank=True, null=True) # Field name made lowercase.
zy = models.FloatField(db_column='Zy', blank=True, null=True) # Field name made lowercase.
class Meta:
db_table = 'IndianSectionsM Shape'
class Indiansectionspipe(models.Model):
recno = models.IntegerField(db_column='RECNO', blank=True, null=True) # Field name made lowercase.
name = models.CharField(db_column='Name', max_length=510, blank=True, null=True) # Field name made lowercase.
staadname = models.CharField(db_column='StaadName', max_length=510, blank=True, null=True) # Field name made lowercase.
od = models.FloatField(db_column='OD', blank=True, null=True) # Field name made lowercase.
tw = models.FloatField(db_column='Tw', blank=True, null=True) # Field name made lowercase.
ax = models.FloatField(db_column='AX', blank=True, null=True) # Field name made lowercase.
i = models.FloatField(db_column='I', blank=True, null=True) # Field name made lowercase.
z = models.FloatField(db_column='Z', blank=True, null=True) # Field name made lowercase.
class Meta:
db_table = 'IndianSectionsPipe'
class IndiansectionssShape(models.Model):
recno = models.IntegerField(db_column='RECNO', blank=True, null=True) # Field name made lowercase.
name = models.CharField(db_column='Name', max_length=510, blank=True, null=True) # Field name made lowercase.
staadname = models.CharField(db_column='StaadName', max_length=510, blank=True, null=True) # Field name made lowercase.
ax = models.FloatField(db_column='AX', blank=True, null=True) # Field name made lowercase.
d = models.FloatField(db_column='D', blank=True, null=True) # Field name made lowercase.
bf = models.FloatField(db_column='Bf', blank=True, null=True) # Field name made lowercase.
tf = models.FloatField(db_column='Tf', blank=True, null=True) # Field name made lowercase.
tw = models.FloatField(db_column='Tw', blank=True, null=True) # Field name made lowercase.
iz = models.FloatField(db_column='Iz', blank=True, null=True) # Field name made lowercase.
ix = models.FloatField(db_column='Ix', blank=True, null=True) # Field name made lowercase.
ct = models.FloatField(db_column='Ct', blank=True, null=True) # Field name made lowercase.
iy = models.FloatField(db_column='Iy', blank=True, null=True) # Field name made lowercase.
zx = models.FloatField(db_column='Zx', blank=True, null=True) # Field name made lowercase.
zy = models.FloatField(db_column='Zy', blank=True, null=True) # Field name made lowercase.
r1 = models.FloatField(db_column='R1', blank=True, null=True) # Field name made lowercase.
r2 = models.FloatField(db_column='R2', blank=True, null=True) # Field name made lowercase.
rivet_dia1 = models.FloatField(db_column='Rivet Dia1', blank=True, null=True) # Field name made lowercase. Field renamed to remove unsuitable characters.
rivet_dia2 = models.FloatField(db_column='Rivet Dia2', blank=True, null=True) # Field name made lowercase. Field renamed to remove unsuitable characters.
g1 = models.FloatField(db_column='G1', blank=True, null=True) # Field name made lowercase.
g2 = models.FloatField(db_column='G2', blank=True, null=True) # Field name made lowercase.
g3 = models.IntegerField(db_column='G3', blank=True, null=True) # Field name made lowercase.
h1 = models.FloatField(blank=True, null=True)
class Meta:
db_table = 'IndianSectionsS Shape'
class IndiansectionstShape(models.Model):
recno = models.IntegerField(db_column='RECNO', blank=True, null=True) # Field name made lowercase.
name = models.CharField(db_column='Name', max_length=510, blank=True, null=True) # Field name made lowercase.
staadname = models.CharField(db_column='StaadName', max_length=510, blank=True, null=True) # Field name made lowercase.
ax = models.FloatField(db_column='AX', blank=True, null=True) # Field name made lowercase.
d = models.FloatField(db_column='D', blank=True, null=True) # Field name made lowercase.
bf = models.FloatField(db_column='Bf', blank=True, null=True) # Field name made lowercase.
tf = models.FloatField(db_column='Tf', blank=True, null=True) # Field name made lowercase.
tw = models.FloatField(db_column='Tw', blank=True, null=True) # Field name made lowercase.
iz = models.FloatField(db_column='Iz', blank=True, null=True) # Field name made lowercase.
ix = models.FloatField(db_column='Ix', blank=True, null=True) # Field name made lowercase.
ct = models.FloatField(db_column='Ct', blank=True, null=True) # Field name made lowercase.
iy = models.FloatField(db_column='Iy', blank=True, null=True) # Field name made lowercase.
zx = models.FloatField(db_column='Zx', blank=True, null=True) # Field name made lowercase.
zy = models.FloatField(db_column='Zy', blank=True, null=True) # Field name made lowercase.
class Meta:
db_table = 'IndianSectionsT Shape'
class Indiansectionstube(models.Model):
recno = models.IntegerField(db_column='RECNO', blank=True, null=True) # Field name made lowercase.
name = models.CharField(db_column='Name', max_length=510, blank=True, null=True) # Field name made lowercase.
staadname = models.CharField(db_column='StaadName', max_length=510, blank=True, null=True) # Field name made lowercase.
ax = models.FloatField(db_column='AX', blank=True, null=True) # Field name made lowercase.
d = models.FloatField(db_column='D', blank=True, null=True) # Field name made lowercase.
b = models.FloatField(db_column='B', blank=True, null=True) # Field name made lowercase.
t = models.FloatField(db_column='T', blank=True, null=True) # Field name made lowercase.
iz = models.FloatField(db_column='Iz', blank=True, null=True) # Field name made lowercase.
iy = models.FloatField(db_column='Iy', blank=True, null=True) # Field name made lowercase.
ix = models.FloatField(db_column='Ix', blank=True, null=True) # Field name made lowercase.
zx = models.FloatField(db_column='Zx', blank=True, null=True) # Field name made lowercase.
zy = models.FloatField(db_column='Zy', blank=True, null=True) # Field name made lowercase.
class Meta:
db_table = 'IndianSectionsTube'
class IndiansectionswShape(models.Model):
recno = models.IntegerField(db_column='RECNO', blank=True, null=True) # Field name made lowercase.
name = models.CharField(db_column='Name', max_length=510, blank=True, null=True) # Field name made lowercase.
staadname = models.CharField(db_column='StaadName', max_length=510, blank=True, null=True) # Field name made lowercase.
ax = models.FloatField(db_column='AX', blank=True, null=True) # Field name made lowercase.
d = models.FloatField(db_column='D', blank=True, null=True) # Field name made lowercase.
bf = models.FloatField(db_column='Bf', blank=True, null=True) # Field name made lowercase.
tf = models.FloatField(db_column='Tf', blank=True, null=True) # Field name made lowercase.
tw = models.FloatField(db_column='Tw', blank=True, null=True) # Field name made lowercase.
iz = models.FloatField(db_column='Iz', blank=True, null=True) # Field name made lowercase.
ix = models.FloatField(db_column='Ix', blank=True, null=True) # Field name made lowercase.
ct = models.FloatField(db_column='Ct', blank=True, null=True) # Field name made lowercase.
iy = models.FloatField(db_column='Iy', blank=True, null=True) # Field name made lowercase.
zx = models.FloatField(db_column='Zx', blank=True, null=True) # Field name made lowercase.
zy = models.FloatField(db_column='Zy', blank=True, null=True) # Field name made lowercase.
class Meta:
db_table = 'IndianSectionsW Shape'
class Jindalsectionsdbinfo(models.Model):
country = models.CharField(db_column='Country', max_length=100, blank=True, null=True) # Field name made lowercase.
countryadj = models.CharField(db_column='CountryAdj', max_length=100, blank=True, null=True) # Field name made lowercase.
materialtype = models.CharField(db_column='MaterialType', max_length=100, blank=True, null=True) # Field name made lowercase.
name = models.CharField(db_column='Name', max_length=100, blank=True, null=True) # Field name made lowercase.
version = models.CharField(db_column='Version', max_length=100, blank=True, null=True) # Field name made lowercase.
class Meta:
db_table = 'JindalSectionsDBInfo'
class JindalsectionsfieldUnits(models.Model):
tablename = models.CharField(db_column='TableName', max_length=100, blank=True, null=True) # Field name made lowercase.
field0 = models.CharField(db_column='Field0', max_length=100, blank=True, null=True) # Field name made lowercase.
field1 = models.CharField(db_column='Field1', max_length=100, blank=True, null=True) # Field name made lowercase.
field2 = models.CharField(db_column='Field2', max_length=100, blank=True, null=True) # Field name made lowercase.
field3 = models.CharField(db_column='Field3', max_length=100, blank=True, null=True) # Field name made lowercase.
field4 = models.CharField(db_column='Field4', max_length=100, blank=True, null=True) # Field name made lowercase.
field5 = models.CharField(db_column='Field5', max_length=100, blank=True, null=True) # Field name made lowercase.
field6 = models.CharField(db_column='Field6', max_length=100, blank=True, null=True) # Field name made lowercase.
field7 = models.CharField(db_column='Field7', max_length=100, blank=True, null=True) # Field name made lowercase.
field8 = models.CharField(db_column='Field8', max_length=100, blank=True, null=True) # Field name made lowercase.
field9 = models.CharField(db_column='Field9', max_length=100, blank=True, null=True) # Field name made lowercase.
field10 = models.CharField(db_column='Field10', max_length=100, blank=True, null=True) # Field name made lowercase.
field11 = models.CharField(db_column='Field11', max_length=100, blank=True, null=True) # Field name made lowercase.
field12 = models.CharField(db_column='Field12', max_length=100, blank=True, null=True) # Field name made lowercase.
field13 = models.CharField(db_column='Field13', max_length=100, blank=True, null=True) # Field name made lowercase.
field14 = models.CharField(db_column='Field14', max_length=100, blank=True, null=True) # Field name made lowercase.
field15 = models.CharField(db_column='Field15', max_length=100, blank=True, null=True) # Field name made lowercase.
field16 = models.CharField(db_column='Field16', max_length=100, blank=True, null=True) # Field name made lowercase.
field17 = models.CharField(db_column='Field17', max_length=100, blank=True, null=True) # Field name made lowercase.
field18 = models.CharField(db_column='Field18', max_length=100, blank=True, null=True) # Field name made lowercase.
field19 = models.CharField(db_column='Field19', max_length=100, blank=True, null=True) # Field name made lowercase.
field20 = models.CharField(db_column='Field20', max_length=100, blank=True, null=True) # Field name made lowercase.
field21 = models.CharField(db_column='Field21', max_length=510, blank=True, null=True) # Field name made lowercase.
class Meta:
db_table = 'JindalSectionsField Units'
class JindalsectionsheShape(models.Model):
recno = models.IntegerField(db_column='RECNO', blank=True, null=True) # Field name made lowercase.
name = models.CharField(db_column='Name', max_length=510, blank=True, null=True) # Field name made lowercase.
staadname = models.CharField(db_column='StaadName', max_length=510, blank=True, null=True) # Field name made lowercase.
ax = models.FloatField(db_column='AX', blank=True, null=True) # Field name made lowercase.
d = models.FloatField(db_column='D', blank=True, null=True) # Field name made lowercase.
bf = models.FloatField(db_column='Bf', blank=True, null=True) # Field name made lowercase.
tf = models.FloatField(db_column='Tf', blank=True, null=True) # Field name made lowercase.
tw = models.FloatField(db_column='Tw', blank=True, null=True) # Field name made lowercase.
iz = models.FloatField(db_column='Iz', blank=True, null=True) # Field name made lowercase.
ix = models.FloatField(db_column='Ix', blank=True, null=True) # Field name made lowercase.
ct = models.FloatField(db_column='Ct', blank=True, null=True) # Field name made lowercase.
iy = models.FloatField(db_column='Iy', blank=True, null=True) # Field name made lowercase.
zx = models.FloatField(db_column='Zx', blank=True, null=True) # Field name made lowercase.
zy = models.FloatField(db_column='Zy', blank=True, null=True) # Field name made lowercase.
r1 = models.FloatField(db_column='R1', blank=True, null=True) # Field name made lowercase.
class Meta:
db_table = 'JindalSectionsHE Shape'
class JindalsectionsipeShape(models.Model):
recno = models.IntegerField(db_column='RECNO', blank=True, null=True) # Field name made lowercase.
name = models.CharField(db_column='Name', max_length=510, blank=True, null=True) # Field name made lowercase.
staadname = models.CharField(db_column='StaadName', max_length=510, blank=True, null=True) # Field name made lowercase.
ax = models.FloatField(db_column='AX', blank=True, null=True) # Field name made lowercase.
d = models.FloatField(db_column='D', blank=True, null=True) # Field name made lowercase.
bf = models.FloatField(db_column='Bf', blank=True, null=True) # Field name made lowercase.
tf = models.FloatField(db_column='Tf', blank=True, null=True) # Field name made lowercase.
tw = models.FloatField(db_column='Tw', blank=True, null=True) # Field name made lowercase.
iz = models.FloatField(db_column='Iz', blank=True, null=True) # Field name made lowercase.
ix = models.FloatField(db_column='Ix', blank=True, null=True) # Field name made lowercase.
ct = models.FloatField(db_column='Ct', blank=True, null=True) # Field name made lowercase.
iy = models.FloatField(db_column='Iy', blank=True, null=True) # Field name made lowercase.
zx = models.FloatField(db_column='Zx', blank=True, null=True) # Field name made lowercase.
zy = models.FloatField(db_column='Zy', blank=True, null=True) # Field name made lowercase.
r1 = models.FloatField(db_column='R1', blank=True, null=True) # Field name made lowercase.
class Meta:
db_table = 'JindalSectionsIPE Shape'
class JindalsectionsismcShape(models.Model):
recno = models.IntegerField(db_column='RECNO', blank=True, null=True) # Field name made lowercase.
name = models.CharField(db_column='Name', max_length=510, blank=True, null=True) # Field name made lowercase.
staadname = models.CharField(db_column='StaadName', max_length=510, blank=True, null=True) # Field name made lowercase.
ax = models.FloatField(db_column='AX', blank=True, null=True) # Field name made lowercase.
d = models.FloatField(db_column='D', blank=True, null=True) # Field name made lowercase.
bf = models.FloatField(db_column='Bf', blank=True, null=True) # Field name made lowercase.
tf = models.FloatField(db_column='Tf', blank=True, null=True) # Field name made lowercase.
tw = models.FloatField(db_column='Tw', blank=True, null=True) # Field name made lowercase.
iz = models.FloatField(db_column='Iz', blank=True, null=True) # Field name made lowercase.
iy = models.FloatField(db_column='Iy', blank=True, null=True) # Field name made lowercase.
ct = models.FloatField(db_column='Ct', blank=True, null=True) # Field name made lowercase.
ix = models.FloatField(db_column='Ix', blank=True, null=True) # Field name made lowercase.
zx = models.FloatField(db_column='Zx', blank=True, null=True) # Field name made lowercase.
zy = models.FloatField(db_column='Zy', blank=True, null=True) # Field name made lowercase.
r1 = models.FloatField(db_column='R1', blank=True, null=True) # Field name made lowercase.
r2 = models.FloatField(db_column='R2', blank=True, null=True) # Field name made lowercase.
class Meta:
db_table = 'JindalSectionsISMC Shape'
class JindalsectionsnpbShape(models.Model):
recno = models.IntegerField(db_column='RECNO', blank=True, null=True) # Field name made lowercase.
name = models.CharField(db_column='Name', max_length=510, blank=True, null=True) # Field name made lowercase.
staadname = models.CharField(db_column='StaadName', max_length=510, blank=True, null=True) # Field name made lowercase.
ax = models.FloatField(db_column='AX', blank=True, null=True) # Field name made lowercase.
d = models.FloatField(db_column='D', blank=True, null=True) # Field name made lowercase.
bf = models.FloatField(db_column='Bf', blank=True, null=True) # Field name made lowercase.
tf = models.FloatField(db_column='Tf', blank=True, null=True) # Field name made lowercase.
tw = models.FloatField(db_column='Tw', blank=True, null=True) # Field name made lowercase.
iz = models.FloatField(db_column='Iz', blank=True, null=True) # Field name made lowercase.
ix = models.FloatField(db_column='Ix', blank=True, null=True) # Field name made lowercase.
ct = models.FloatField(db_column='Ct', blank=True, null=True) # Field name made lowercase.
iy = models.FloatField(db_column='Iy', blank=True, null=True) # Field name made lowercase.
zx = models.FloatField(db_column='Zx', blank=True, null=True) # Field name made lowercase.
zy = models.FloatField(db_column='Zy', blank=True, null=True) # Field name made lowercase.
r1 = models.FloatField(db_column='R1', blank=True, null=True) # Field name made lowercase.
class Meta:
db_table = 'JindalSectionsNPB Shape'
class JindalsectionsubShape(models.Model):
recno = models.IntegerField(db_column='RECNO', blank=True, null=True) # Field name made lowercase.
name = models.CharField(db_column='Name', max_length=510, blank=True, null=True) # Field name made lowercase.
staadname = models.CharField(db_column='StaadName', max_length=510, blank=True, null=True) # Field name made lowercase.
ax = models.FloatField(db_column='AX', blank=True, null=True) # Field name made lowercase.
d = models.FloatField(db_column='D', blank=True, null=True) # Field name made lowercase.
bf = models.FloatField(db_column='Bf', blank=True, null=True) # Field name made lowercase.
tf = models.FloatField(db_column='Tf', blank=True, null=True) # Field name made lowercase.
tw = models.FloatField(db_column='Tw', blank=True, null=True) # Field name made lowercase.
iz = models.FloatField(db_column='Iz', blank=True, null=True) # Field name made lowercase.
ix = models.FloatField(db_column='Ix', blank=True, null=True) # Field name made lowercase.
ct = models.FloatField(db_column='Ct', blank=True, null=True) # Field name made lowercase.
iy = models.FloatField(db_column='Iy', blank=True, null=True) # Field name made lowercase.
zx = models.FloatField(db_column='Zx', blank=True, null=True) # Field name made lowercase.
zy = models.FloatField(db_column='Zy', blank=True, null=True) # Field name made lowercase.
r1 = models.FloatField(db_column='R1', blank=True, null=True) # Field name made lowercase.
class Meta:
db_table = 'JindalSectionsUB Shape'
class JindalsectionsucShape(models.Model):
recno = models.IntegerField(db_column='RECNO', blank=True, null=True) # Field name made lowercase.
name = models.CharField(db_column='Name', max_length=510, blank=True, null=True) # Field name made lowercase.
staadname = models.CharField(db_column='StaadName', max_length=510, blank=True, null=True) # Field name made lowercase.
ax = models.FloatField(db_column='AX', blank=True, null=True) # Field name made lowercase.
d = models.FloatField(db_column='D', blank=True, null=True) # Field name made lowercase.
bf = models.FloatField(db_column='Bf', blank=True, null=True) # Field name made lowercase.
tf = models.FloatField(db_column='Tf', blank=True, null=True) # Field name made lowercase.
tw = models.FloatField(db_column='Tw', blank=True, null=True) # Field name made lowercase.
iz = models.FloatField(db_column='Iz', blank=True, null=True) # Field name made lowercase.
ix = models.FloatField(db_column='Ix', blank=True, null=True) # Field name made lowercase.
ct = models.FloatField(db_column='Ct', blank=True, null=True) # Field name made lowercase.
iy = models.FloatField(db_column='Iy', blank=True, null=True) # Field name made lowercase.
zx = models.FloatField(db_column='Zx', blank=True, null=True) # Field name made lowercase.
zy = models.FloatField(db_column='Zy', blank=True, null=True) # Field name made lowercase.
r1 = models.FloatField(db_column='R1', blank=True, null=True) # Field name made lowercase.
class Meta:
db_table = 'JindalSectionsUC Shape'
class JindalsectionswpbShape(models.Model):
recno = models.IntegerField(db_column='RECNO', blank=True, null=True) # Field name made lowercase.
name = models.CharField(db_column='Name', max_length=510, blank=True, null=True) # Field name made lowercase.
staadname = models.CharField(db_column='StaadName', max_length=510, blank=True, null=True) # Field name made lowercase.
ax = models.FloatField(db_column='AX', blank=True, null=True) # Field name made lowercase.
d = models.FloatField(db_column='D', blank=True, null=True) # Field name made lowercase.
bf = models.FloatField(db_column='Bf', blank=True, null=True) # Field name made lowercase.
tf = models.FloatField(db_column='Tf', blank=True, null=True) # Field name made lowercase.
tw = models.FloatField(db_column='Tw', blank=True, null=True) # Field name made lowercase.
iz = models.FloatField(db_column='Iz', blank=True, null=True) # Field name made lowercase.
ix = models.FloatField(db_column='Ix', blank=True, null=True) # Field name made lowercase.
ct = models.FloatField(db_column='Ct', blank=True, null=True) # Field name made lowercase.
iy = models.FloatField(db_column='Iy', blank=True, null=True) # Field name made lowercase.
zx = models.FloatField(db_column='Zx', blank=True, null=True) # Field name made lowercase.
zy = models.FloatField(db_column='Zy', blank=True, null=True) # Field name made lowercase.
r1 = models.FloatField(db_column='R1', blank=True, null=True) # Field name made lowercase.
class Meta:
db_table = 'JindalSectionsWPB Shape'
class Job(models.Model):
job_id = models.AutoField(primary_key=True)
idd = models.CharField(max_length=24, blank=True, null=True)
name = models.CharField(max_length=10, blank=True, null=True)
date = models.CharField(max_length=10, blank=True, null=True)
client = models.CharField(max_length=24, blank=True, null=True)
comment = models.TextField(blank=True, null=True)
checker_name = models.CharField(max_length=24, blank=True, null=True)
engineer_name = models.CharField(max_length=24, blank=True, null=True)
approved_name = models.CharField(max_length=24, blank=True, null=True)
checker_date = models.CharField(max_length=10, blank=True, null=True)
ref = models.CharField(max_length=24, blank=True, null=True)
part = models.CharField(max_length=24, blank=True, null=True)
rev = models.CharField(max_length=24, blank=True, null=True)
approved_date = models.CharField(max_length=10, blank=True, null=True)
class Meta:
db_table = 'Job'
class JobMaterial(models.Model):
job_id = models.IntegerField(primary_key=True)
name = models.CharField(max_length=24)
e = models.FloatField(db_column='E', blank=True, null=True) # Field name made lowercase.
poisson = models.FloatField(blank=True, null=True)
density = models.FloatField(blank=True, null=True)
damp = models.FloatField(blank=True, null=True)
alpha = models.FloatField(blank=True, null=True)
g = models.FloatField(db_column='G', blank=True, null=True) # Field name made lowercase.
strength = models.CharField(max_length=24, blank=True, null=True)
type = models.CharField(max_length=24, blank=True, null=True)
class Meta:
db_table = 'Job_material'
class Joint(models.Model):
sno = models.AutoField(db_column='Sno', primary_key=True) # Field name made lowercase.
job_id = models.IntegerField()
idd = models.IntegerField()
x = models.FloatField()
y = models.FloatField()
z = models.FloatField(blank=True, null=True)
support = models.CharField(max_length=10, blank=True, null=True)
class Meta:
db_table = 'Joint'
unique_together = (('job_id', 'idd'),)
class Member(models.Model):
sno = models.AutoField(db_column='Sno', primary_key=True) # Field name made lowercase.
job_id = models.IntegerField()
member_id = models.IntegerField()
member_property = models.IntegerField(blank=True, null=True)
class Meta:
db_table = 'Member'
unique_together = (('job_id', 'member_id'),)
class MemberIncidence(models.Model):
sno = models.AutoField(db_column='Sno', primary_key=True) # Field name made lowercase.
job_id = models.IntegerField()
member_id = models.IntegerField()
joint_id = models.IntegerField()
class Meta:
db_table = 'Member_incidence'
unique_together = (('job_id', 'member_id', 'joint_id'),)
class MemberProperty(models.Model):
sno = models.AutoField(db_column='Sno', primary_key=True) # Field name made lowercase.
job_id = models.IntegerField()
idd = models.IntegerField()
type = models.CharField(max_length=24)
yd = models.FloatField(db_column='YD', blank=True, null=True) # Field name made lowercase.
zd = models.FloatField(db_column='ZD', blank=True, null=True) # Field name made lowercase.
class Meta:
db_table = 'Member_property'
unique_together = (('job_id', 'idd'),)
class Tatastructuressectionschs(models.Model):
recno = models.IntegerField(db_column='RECNO', blank=True, null=True) # Field name made lowercase.
name = models.CharField(db_column='Name', max_length=510, blank=True, null=True) # Field name made lowercase.
staadname = models.CharField(db_column='StaadName', max_length=510, blank=True, null=True) # Field name made lowercase.
od = models.FloatField(db_column='OD', blank=True, null=True) # Field name made lowercase.
tw = models.FloatField(db_column='Tw', blank=True, null=True) # Field name made lowercase.
ax = models.FloatField(db_column='AX', blank=True, null=True) # Field name made lowercase.
i = models.FloatField(db_column='I', blank=True, null=True) # Field name made lowercase.
z = models.FloatField(db_column='Z', blank=True, null=True) # Field name made lowercase.
c = models.FloatField(db_column='C', blank=True, null=True) # Field name made lowercase.
class Meta:
db_table = 'TataStructuresSectionsCHS'
class Tatastructuressectionsdbinfo(models.Model):
country = models.CharField(db_column='Country', max_length=100, blank=True, null=True) # Field name made lowercase.
countryadj = models.CharField(db_column='CountryAdj', max_length=100, blank=True, null=True) # Field name made lowercase.
materialtype = models.CharField(db_column='MaterialType', max_length=100, blank=True, null=True) # Field name made lowercase.
name = models.CharField(db_column='Name', max_length=100, blank=True, null=True) # Field name made lowercase.
version = models.CharField(db_column='Version', max_length=100, blank=True, null=True) # Field name made lowercase.
description = models.TextField(db_column='Description', blank=True, null=True) # Field name made lowercase.
class Meta:
db_table = 'TataStructuresSectionsDBInfo'
class TatastructuressectionsfieldUnits(models.Model):
tablename = models.CharField(db_column='TableName', max_length=100, blank=True, null=True) # Field name made lowercase.
field0 = models.CharField(db_column='Field0', max_length=100, blank=True, null=True) # Field name made lowercase.
field1 = models.CharField(db_column='Field1', max_length=100, blank=True, null=True) # Field name made lowercase.
field2 = models.CharField(db_column='Field2', max_length=100, blank=True, null=True) # Field name made lowercase.
field3 = models.CharField(db_column='Field3', max_length=100, blank=True, null=True) # Field name made lowercase.
field4 = models.CharField(db_column='Field4', max_length=100, blank=True, null=True) # Field name made lowercase.
field5 = models.CharField(db_column='Field5', max_length=100, blank=True, null=True) # Field name made lowercase.
field6 = models.CharField(db_column='Field6', max_length=100, blank=True, null=True) # Field name made lowercase.
field7 = models.CharField(db_column='Field7', max_length=100, blank=True, null=True) # Field name made lowercase.
field8 = models.CharField(db_column='Field8', max_length=100, blank=True, null=True) # Field name made lowercase.
field9 = models.CharField(db_column='Field9', max_length=100, blank=True, null=True) # Field name made lowercase.
field10 = models.CharField(db_column='Field10', max_length=100, blank=True, null=True) # Field name made lowercase.
field11 = models.CharField(db_column='Field11', max_length=100, blank=True, null=True) # Field name made lowercase.
field12 = models.CharField(db_column='Field12', max_length=100, blank=True, null=True) # Field name made lowercase.
field13 = models.CharField(db_column='Field13', max_length=100, blank=True, null=True) # Field name made lowercase.
field14 = models.CharField(db_column='Field14', max_length=100, blank=True, null=True) # Field name made lowercase.
field15 = models.CharField(db_column='Field15', max_length=100, blank=True, null=True) # Field name made lowercase.
field16 = models.CharField(db_column='Field16', max_length=100, blank=True, null=True) # Field name made lowercase.
field17 = models.CharField(db_column='Field17', max_length=100, blank=True, null=True) # Field name made lowercase.
field18 = models.CharField(db_column='Field18', max_length=100, blank=True, null=True) # Field name made lowercase.
field19 = models.CharField(db_column='Field19', max_length=100, blank=True, null=True) # Field name made lowercase.
field20 = models.CharField(db_column='Field20', max_length=100, blank=True, null=True) # Field name made lowercase.
class Meta:
db_table = 'TataStructuresSectionsField Units'
class Tatastructuressectionsrhs(models.Model):
recno = models.IntegerField(db_column='RECNO', blank=True, null=True) # Field name made lowercase.
name = models.CharField(db_column='Name', max_length=510, blank=True, null=True) # Field name made lowercase.
staadname = models.CharField(db_column='StaadName', max_length=510, blank=True, null=True) # Field name made lowercase.
ax = models.FloatField(db_column='AX', blank=True, null=True) # Field name made lowercase.
d = models.FloatField(db_column='D', blank=True, null=True) # Field name made lowercase.
b = models.FloatField(db_column='B', blank=True, null=True) # Field name made lowercase.
t = models.FloatField(db_column='T', blank=True, null=True) # Field name made lowercase.
iz = models.FloatField(db_column='IZ', blank=True, null=True) # Field name made lowercase.
iy = models.FloatField(db_column='Iy', blank=True, null=True) # Field name made lowercase.
ix = models.FloatField(db_column='Ix', blank=True, null=True) # Field name made lowercase.
zx = models.FloatField(db_column='Zx', blank=True, null=True) # Field name made lowercase.
zy = models.FloatField(db_column='Zy', blank=True, null=True) # Field name made lowercase.
c = models.FloatField(db_column='C', blank=True, null=True) # Field name made lowercase.
class Meta:
db_table = 'TataStructuresSectionsRHS'
class Tatastructuressectionsshs(models.Model):
recno = models.IntegerField(db_column='RECNO', blank=True, null=True) # Field name made lowercase.
name = models.CharField(db_column='Name', max_length=510, blank=True, null=True) # Field name made lowercase.
staadname = models.CharField(db_column='StaadName', max_length=510, blank=True, null=True) # Field name made lowercase.
ax = models.FloatField(db_column='AX', blank=True, null=True) # Field name made lowercase.
d = models.FloatField(db_column='D', blank=True, null=True) # Field name made lowercase.
b = models.FloatField(db_column='B', blank=True, null=True) # Field name made lowercase.
t = models.FloatField(db_column='T', blank=True, null=True) # Field name made lowercase.
iz = models.FloatField(db_column='IZ', blank=True, null=True) # Field name made lowercase.
iy = models.FloatField(db_column='Iy', blank=True, null=True) # Field name made lowercase.
ix = models.FloatField(db_column='Ix', blank=True, null=True) # Field name made lowercase.
zx = models.FloatField(db_column='Zx', blank=True, null=True) # Field name made lowercase.
zy = models.FloatField(db_column='Zy', blank=True, null=True) # Field name made lowercase.
c = models.FloatField(db_column='C', blank=True, null=True) # Field name made lowercase.
class Meta:
db_table = 'TataStructuresSectionsSHS'
| mit |
AunShiLord/sympy | sympy/combinatorics/tests/test_perm_groups.py | 1 | 23991 | from sympy.combinatorics.perm_groups import PermutationGroup
from sympy.combinatorics.named_groups import SymmetricGroup, CyclicGroup,\
DihedralGroup, AlternatingGroup, AbelianGroup, RubikGroup
from sympy.combinatorics.permutations import Permutation
from sympy.utilities.pytest import skip, XFAIL
from sympy.combinatorics.generators import rubik_cube_generators
from sympy.combinatorics.polyhedron import tetrahedron as Tetra, cube
from sympy.combinatorics.testutil import _verify_bsgs, _verify_centralizer,\
_verify_normal_closure
rmul = Permutation.rmul
def test_has():
a = Permutation([1, 0])
G = PermutationGroup([a])
assert G.is_abelian
a = Permutation([2, 0, 1])
b = Permutation([2, 1, 0])
G = PermutationGroup([a, b])
assert not G.is_abelian
G = PermutationGroup([a])
assert G.has(a)
assert not G.has(b)
a = Permutation([2, 0, 1, 3, 4, 5])
b = Permutation([0, 2, 1, 3, 4])
assert PermutationGroup(a, b).degree == \
PermutationGroup(a, b).degree == 6
def test_generate():
a = Permutation([1, 0])
g = list(PermutationGroup([a]).generate())
assert g == [Permutation([0, 1]), Permutation([1, 0])]
assert len(list(PermutationGroup(Permutation((0, 1))).generate())) == 1
g = PermutationGroup([a]).generate(method='dimino')
assert list(g) == [Permutation([0, 1]), Permutation([1, 0])]
a = Permutation([2, 0, 1])
b = Permutation([2, 1, 0])
G = PermutationGroup([a, b])
g = G.generate()
v1 = [p.array_form for p in list(g)]
v1.sort()
assert v1 == [[0, 1, 2], [0, 2, 1], [1, 0, 2], [1, 2, 0], [2, 0,
1], [2, 1, 0]]
v2 = list(G.generate(method='dimino', af=True))
assert v1 == sorted(v2)
a = Permutation([2, 0, 1, 3, 4, 5])
b = Permutation([2, 1, 3, 4, 5, 0])
g = PermutationGroup([a, b]).generate(af=True)
assert len(list(g)) == 360
def test_order():
a = Permutation([2, 0, 1, 3, 4, 5, 6, 7, 8, 9])
b = Permutation([2, 1, 3, 4, 5, 6, 7, 8, 9, 0])
g = PermutationGroup([a, b])
assert g.order() == 1814400
def test_stabilizer():
S = SymmetricGroup(2)
H = S.stabilizer(0)
assert H.generators == [Permutation(1)]
a = Permutation([2, 0, 1, 3, 4, 5])
b = Permutation([2, 1, 3, 4, 5, 0])
G = PermutationGroup([a, b])
G0 = G.stabilizer(0)
assert G0.order() == 60
gens_cube = [[1, 3, 5, 7, 0, 2, 4, 6], [1, 3, 0, 2, 5, 7, 4, 6]]
gens = [Permutation(p) for p in gens_cube]
G = PermutationGroup(gens)
G2 = G.stabilizer(2)
assert G2.order() == 6
G2_1 = G2.stabilizer(1)
v = list(G2_1.generate(af=True))
assert v == [[0, 1, 2, 3, 4, 5, 6, 7], [3, 1, 2, 0, 7, 5, 6, 4]]
gens = (
(1, 2, 0, 4, 5, 3, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19),
(0, 1, 2, 3, 4, 5, 19, 6, 8, 9, 10, 11, 12, 13, 14,
15, 16, 7, 17, 18),
(0, 1, 2, 3, 4, 5, 6, 7, 9, 18, 16, 11, 12, 13, 14, 15, 8, 17, 10, 19))
gens = [Permutation(p) for p in gens]
G = PermutationGroup(gens)
G2 = G.stabilizer(2)
assert G2.order() == 181440
S = SymmetricGroup(3)
assert [G.order() for G in S.basic_stabilizers] == [6, 2]
def test_center():
# the center of the dihedral group D_n is of order 2 for even n
for i in (4, 6, 10):
D = DihedralGroup(i)
assert (D.center()).order() == 2
# the center of the dihedral group D_n is of order 1 for odd n>2
for i in (3, 5, 7):
D = DihedralGroup(i)
assert (D.center()).order() == 1
# the center of an abelian group is the group itself
for i in (2, 3, 5):
for j in (1, 5, 7):
for k in (1, 1, 11):
G = AbelianGroup(i, j, k)
assert G.center().is_subgroup(G)
# the center of a nonabelian simple group is trivial
for i in(1, 5, 9):
A = AlternatingGroup(i)
assert (A.center()).order() == 1
# brute-force verifications
D = DihedralGroup(5)
A = AlternatingGroup(3)
C = CyclicGroup(4)
G.is_subgroup(D*A*C)
assert _verify_centralizer(G, G)
def test_centralizer():
# the centralizer of the trivial group is the entire group
S = SymmetricGroup(2)
assert S.centralizer(Permutation(list(range(2)))).is_subgroup(S)
A = AlternatingGroup(5)
assert A.centralizer(Permutation(list(range(5)))).is_subgroup(A)
# a centralizer in the trivial group is the trivial group itself
triv = PermutationGroup([Permutation([0, 1, 2, 3])])
D = DihedralGroup(4)
assert triv.centralizer(D).is_subgroup(triv)
# brute-force verifications for centralizers of groups
for i in (4, 5, 6):
S = SymmetricGroup(i)
A = AlternatingGroup(i)
C = CyclicGroup(i)
D = DihedralGroup(i)
for gp in (S, A, C, D):
for gp2 in (S, A, C, D):
if not gp2.is_subgroup(gp):
assert _verify_centralizer(gp, gp2)
# verify the centralizer for all elements of several groups
S = SymmetricGroup(5)
elements = list(S.generate_dimino())
for element in elements:
assert _verify_centralizer(S, element)
A = AlternatingGroup(5)
elements = list(A.generate_dimino())
for element in elements:
assert _verify_centralizer(A, element)
D = DihedralGroup(7)
elements = list(D.generate_dimino())
for element in elements:
assert _verify_centralizer(D, element)
# verify centralizers of small groups within small groups
small = []
for i in (1, 2, 3):
small.append(SymmetricGroup(i))
small.append(AlternatingGroup(i))
small.append(DihedralGroup(i))
small.append(CyclicGroup(i))
for gp in small:
for gp2 in small:
if gp.degree == gp2.degree:
assert _verify_centralizer(gp, gp2)
def test_coset_rank():
gens_cube = [[1, 3, 5, 7, 0, 2, 4, 6], [1, 3, 0, 2, 5, 7, 4, 6]]
gens = [Permutation(p) for p in gens_cube]
G = PermutationGroup(gens)
i = 0
for h in G.generate(af=True):
rk = G.coset_rank(h)
assert rk == i
h1 = G.coset_unrank(rk, af=True)
assert h == h1
i += 1
assert G.coset_unrank(48) == None
assert G.coset_unrank(G.coset_rank(gens[0])) == gens[0]
def test_coset_factor():
a = Permutation([0, 2, 1])
G = PermutationGroup([a])
c = Permutation([2, 1, 0])
assert not G.coset_factor(c)
assert G.coset_rank(c) is None
a = Permutation([2,0,1,3,4,5])
b = Permutation([2,1,3,4,5,0])
g = PermutationGroup([a, b])
assert g.order() == 360
d = Permutation([1,0,2,3,4,5])
assert not g.coset_factor(d.array_form)
assert not g.contains(d)
c = Permutation([1,0,2,3,5,4])
v = g.coset_factor(c, True)
tr = g.basic_transversals
p = Permutation.rmul(*[tr[i][v[i]] for i in range(len(g.base))])
assert p == c
v = g.coset_factor(c)
p = Permutation.rmul(*v)
assert p == c
assert g.contains(c)
G = PermutationGroup([Permutation([2,1,0])])
p = Permutation([1,0,2])
assert G.coset_factor(p) == []
def test_orbits():
a = Permutation([2, 0, 1])
b = Permutation([2, 1, 0])
g = PermutationGroup([a, b])
assert g.orbit(0) == set([0, 1, 2])
assert g.orbits() == [set([0, 1, 2])]
assert g.is_transitive() and g.is_transitive(strict=False)
assert g.orbit_transversal(0) == \
[Permutation(
[0, 1, 2]), Permutation([2, 0, 1]), Permutation([1, 2, 0])]
assert g.orbit_transversal(0, True) == \
[(0, Permutation([0, 1, 2])), (2, Permutation([2, 0, 1])),
(1, Permutation([1, 2, 0]))]
a = Permutation(list(range(1, 100)) + [0])
G = PermutationGroup([a])
assert [min(o) for o in G.orbits()] == [0]
G = PermutationGroup(rubik_cube_generators())
assert [min(o) for o in G.orbits()] == [0, 1]
assert not G.is_transitive() and not G.is_transitive(strict=False)
G = PermutationGroup([Permutation(0, 1, 3), Permutation(3)(0, 1)])
assert not G.is_transitive() and G.is_transitive(strict=False)
assert PermutationGroup(
Permutation(3)).is_transitive(strict=False) is False
def test_is_normal():
gens_s5 = [Permutation(p) for p in [[1, 2, 3, 4, 0], [2, 1, 4, 0, 3]]]
G1 = PermutationGroup(gens_s5)
assert G1.order() == 120
gens_a5 = [Permutation(p) for p in [[1, 0, 3, 2, 4], [2, 1, 4, 3, 0]]]
G2 = PermutationGroup(gens_a5)
assert G2.order() == 60
assert G2.is_normal(G1)
gens3 = [Permutation(p) for p in [[2, 1, 3, 0, 4], [1, 2, 0, 3, 4]]]
G3 = PermutationGroup(gens3)
assert not G3.is_normal(G1)
assert G3.order() == 12
G4 = G1.normal_closure(G3.generators)
assert G4.order() == 60
gens5 = [Permutation(p) for p in [[1, 2, 3, 0, 4], [1, 2, 0, 3, 4]]]
G5 = PermutationGroup(gens5)
assert G5.order() == 24
G6 = G1.normal_closure(G5.generators)
assert G6.order() == 120
assert G1.is_subgroup(G6)
assert not G1.is_subgroup(G4)
assert G2.is_subgroup(G4)
def test_eq():
a = [[1, 2, 0, 3, 4, 5], [1, 0, 2, 3, 4, 5], [2, 1, 0, 3, 4, 5], [
1, 2, 0, 3, 4, 5]]
a = [Permutation(p) for p in a + [[1, 2, 3, 4, 5, 0]]]
g = Permutation([1, 2, 3, 4, 5, 0])
G1, G2, G3 = [PermutationGroup(x) for x in [a[:2], a[2:4], [g, g**2]]]
assert G1.order() == G2.order() == G3.order() == 6
assert G1.is_subgroup(G2)
assert not G1.is_subgroup(G3)
G4 = PermutationGroup([Permutation([0, 1])])
assert not G1.is_subgroup(G4)
assert G4.is_subgroup(G1, 0)
assert PermutationGroup(g, g).is_subgroup(PermutationGroup(g))
assert SymmetricGroup(3).is_subgroup(SymmetricGroup(4), 0)
assert SymmetricGroup(3).is_subgroup(SymmetricGroup(3)*CyclicGroup(5), 0)
assert not CyclicGroup(5).is_subgroup(SymmetricGroup(3)*CyclicGroup(5), 0)
assert CyclicGroup(3).is_subgroup(SymmetricGroup(3)*CyclicGroup(5), 0)
def test_derived_subgroup():
a = Permutation([1, 0, 2, 4, 3])
b = Permutation([0, 1, 3, 2, 4])
G = PermutationGroup([a, b])
C = G.derived_subgroup()
assert C.order() == 3
assert C.is_normal(G)
assert C.is_subgroup(G, 0)
assert not G.is_subgroup(C, 0)
gens_cube = [[1, 3, 5, 7, 0, 2, 4, 6], [1, 3, 0, 2, 5, 7, 4, 6]]
gens = [Permutation(p) for p in gens_cube]
G = PermutationGroup(gens)
C = G.derived_subgroup()
assert C.order() == 12
def test_is_solvable():
a = Permutation([1, 2, 0])
b = Permutation([1, 0, 2])
G = PermutationGroup([a, b])
assert G.is_solvable
a = Permutation([1, 2, 3, 4, 0])
b = Permutation([1, 0, 2, 3, 4])
G = PermutationGroup([a, b])
assert not G.is_solvable
def test_rubik1():
gens = rubik_cube_generators()
gens1 = [gens[-1]] + [p**2 for p in gens[1:]]
G1 = PermutationGroup(gens1)
assert G1.order() == 19508428800
gens2 = [p**2 for p in gens]
G2 = PermutationGroup(gens2)
assert G2.order() == 663552
assert G2.is_subgroup(G1, 0)
C1 = G1.derived_subgroup()
assert C1.order() == 4877107200
assert C1.is_subgroup(G1, 0)
assert not G2.is_subgroup(C1, 0)
G = RubikGroup(2)
assert G.order() == 3674160
@XFAIL
def test_rubik():
skip('takes too much time')
G = PermutationGroup(rubik_cube_generators())
assert G.order() == 43252003274489856000
G1 = PermutationGroup(G[:3])
assert G1.order() == 170659735142400
assert not G1.is_normal(G)
G2 = G.normal_closure(G1.generators)
assert G2.is_subgroup(G)
def test_direct_product():
C = CyclicGroup(4)
D = DihedralGroup(4)
G = C*C*C
assert G.order() == 64
assert G.degree == 12
assert len(G.orbits()) == 3
assert G.is_abelian is True
H = D*C
assert H.order() == 32
assert H.is_abelian is False
def test_orbit_rep():
G = DihedralGroup(6)
assert G.orbit_rep(1, 3) in [Permutation([2, 3, 4, 5, 0, 1]),
Permutation([4, 3, 2, 1, 0, 5])]
H = CyclicGroup(4)*G
assert H.orbit_rep(1, 5) is False
def test_schreier_vector():
G = CyclicGroup(50)
v = [0]*50
v[23] = -1
assert G.schreier_vector(23) == v
H = DihedralGroup(8)
assert H.schreier_vector(2) == [0, 1, -1, 0, 0, 1, 0, 0]
L = SymmetricGroup(4)
assert L.schreier_vector(1) == [1, -1, 0, 0]
def test_random_pr():
D = DihedralGroup(6)
r = 11
n = 3
_random_prec_n = {}
_random_prec_n[0] = {'s': 7, 't': 3, 'x': 2, 'e': -1}
_random_prec_n[1] = {'s': 5, 't': 5, 'x': 1, 'e': -1}
_random_prec_n[2] = {'s': 3, 't': 4, 'x': 2, 'e': 1}
D._random_pr_init(r, n, _random_prec_n=_random_prec_n)
assert D._random_gens[11] == [0, 1, 2, 3, 4, 5]
_random_prec = {'s': 2, 't': 9, 'x': 1, 'e': -1}
assert D.random_pr(_random_prec=_random_prec) == \
Permutation([0, 5, 4, 3, 2, 1])
def test_is_alt_sym():
G = DihedralGroup(10)
assert G.is_alt_sym() is False
S = SymmetricGroup(10)
N_eps = 10
_random_prec = {'N_eps': N_eps,
0: Permutation([[2], [1, 4], [0, 6, 7, 8, 9, 3, 5]]),
1: Permutation([[1, 8, 7, 6, 3, 5, 2, 9], [0, 4]]),
2: Permutation([[5, 8], [4, 7], [0, 1, 2, 3, 6, 9]]),
3: Permutation([[3], [0, 8, 2, 7, 4, 1, 6, 9, 5]]),
4: Permutation([[8], [4, 7, 9], [3, 6], [0, 5, 1, 2]]),
5: Permutation([[6], [0, 2, 4, 5, 1, 8, 3, 9, 7]]),
6: Permutation([[6, 9, 8], [4, 5], [1, 3, 7], [0, 2]]),
7: Permutation([[4], [0, 2, 9, 1, 3, 8, 6, 5, 7]]),
8: Permutation([[1, 5, 6, 3], [0, 2, 7, 8, 4, 9]]),
9: Permutation([[8], [6, 7], [2, 3, 4, 5], [0, 1, 9]])}
assert S.is_alt_sym(_random_prec=_random_prec) is True
A = AlternatingGroup(10)
_random_prec = {'N_eps': N_eps,
0: Permutation([[1, 6, 4, 2, 7, 8, 5, 9, 3], [0]]),
1: Permutation([[1], [0, 5, 8, 4, 9, 2, 3, 6, 7]]),
2: Permutation([[1, 9, 8, 3, 2, 5], [0, 6, 7, 4]]),
3: Permutation([[6, 8, 9], [4, 5], [1, 3, 7, 2], [0]]),
4: Permutation([[8], [5], [4], [2, 6, 9, 3], [1], [0, 7]]),
5: Permutation([[3, 6], [0, 8, 1, 7, 5, 9, 4, 2]]),
6: Permutation([[5], [2, 9], [1, 8, 3], [0, 4, 7, 6]]),
7: Permutation([[1, 8, 4, 7, 2, 3], [0, 6, 9, 5]]),
8: Permutation([[5, 8, 7], [3], [1, 4, 2, 6], [0, 9]]),
9: Permutation([[4, 9, 6], [3, 8], [1, 2], [0, 5, 7]])}
assert A.is_alt_sym(_random_prec=_random_prec) is False
def test_minimal_block():
D = DihedralGroup(6)
block_system = D.minimal_block([0, 3])
for i in range(3):
assert block_system[i] == block_system[i + 3]
S = SymmetricGroup(6)
assert S.minimal_block([0, 1]) == [0, 0, 0, 0, 0, 0]
assert Tetra.pgroup.minimal_block([0, 1]) == [0, 0, 0, 0]
def test_max_div():
S = SymmetricGroup(10)
assert S.max_div == 5
def test_is_primitive():
S = SymmetricGroup(5)
assert S.is_primitive() is True
C = CyclicGroup(7)
assert C.is_primitive() is True
def test_random_stab():
S = SymmetricGroup(5)
_random_el = Permutation([1, 3, 2, 0, 4])
_random_prec = {'rand': _random_el}
g = S.random_stab(2, _random_prec=_random_prec)
assert g == Permutation([1, 3, 2, 0, 4])
h = S.random_stab(1)
assert h(1) == 1
def test_transitivity_degree():
perm = Permutation([1, 2, 0])
C = PermutationGroup([perm])
assert C.transitivity_degree == 1
gen1 = Permutation([1, 2, 0, 3, 4])
gen2 = Permutation([1, 2, 3, 4, 0])
# alternating group of degree 5
Alt = PermutationGroup([gen1, gen2])
assert Alt.transitivity_degree == 3
def test_schreier_sims_random():
assert sorted(Tetra.pgroup.base) == [0, 1]
S = SymmetricGroup(3)
base = [0, 1]
strong_gens = [Permutation([1, 2, 0]), Permutation([1, 0, 2]),
Permutation([0, 2, 1])]
assert S.schreier_sims_random(base, strong_gens, 5) == (base, strong_gens)
D = DihedralGroup(3)
_random_prec = {'g': [Permutation([2, 0, 1]), Permutation([1, 2, 0]),
Permutation([1, 0, 2])]}
base = [0, 1]
strong_gens = [Permutation([1, 2, 0]), Permutation([2, 1, 0]),
Permutation([0, 2, 1])]
assert D.schreier_sims_random([], D.generators, 2,
_random_prec=_random_prec) == (base, strong_gens)
def test_baseswap():
S = SymmetricGroup(4)
S.schreier_sims()
base = S.base
strong_gens = S.strong_gens
assert base == [0, 1, 2]
deterministic = S.baseswap(base, strong_gens, 1, randomized=False)
randomized = S.baseswap(base, strong_gens, 1)
assert deterministic[0] == [0, 2, 1]
assert _verify_bsgs(S, deterministic[0], deterministic[1]) is True
assert randomized[0] == [0, 2, 1]
assert _verify_bsgs(S, randomized[0], randomized[1]) is True
def test_schreier_sims_incremental():
identity = Permutation([0, 1, 2, 3, 4])
TrivialGroup = PermutationGroup([identity])
base, strong_gens = TrivialGroup.schreier_sims_incremental(base=[0, 1, 2])
assert _verify_bsgs(TrivialGroup, base, strong_gens) is True
S = SymmetricGroup(5)
base, strong_gens = S.schreier_sims_incremental(base=[0, 1, 2])
assert _verify_bsgs(S, base, strong_gens) is True
D = DihedralGroup(2)
base, strong_gens = D.schreier_sims_incremental(base=[1])
assert _verify_bsgs(D, base, strong_gens) is True
A = AlternatingGroup(7)
gens = A.generators[:]
gen0 = gens[0]
gen1 = gens[1]
gen1 = rmul(gen1, ~gen0)
gen0 = rmul(gen0, gen1)
gen1 = rmul(gen0, gen1)
base, strong_gens = A.schreier_sims_incremental(base=[0, 1], gens=gens)
assert _verify_bsgs(A, base, strong_gens) is True
C = CyclicGroup(11)
gen = C.generators[0]
base, strong_gens = C.schreier_sims_incremental(gens=[gen**3])
assert _verify_bsgs(C, base, strong_gens) is True
def _subgroup_search(i, j, k):
prop_true = lambda x: True
prop_fix_points = lambda x: [x(point) for point in points] == points
prop_comm_g = lambda x: rmul(x, g) == rmul(g, x)
prop_even = lambda x: x.is_even
for i in range(i, j, k):
S = SymmetricGroup(i)
A = AlternatingGroup(i)
C = CyclicGroup(i)
Sym = S.subgroup_search(prop_true)
assert Sym.is_subgroup(S)
Alt = S.subgroup_search(prop_even)
assert Alt.is_subgroup(A)
Sym = S.subgroup_search(prop_true, init_subgroup=C)
assert Sym.is_subgroup(S)
points = [7]
assert S.stabilizer(7).is_subgroup(S.subgroup_search(prop_fix_points))
points = [3, 4]
assert S.stabilizer(3).stabilizer(4).is_subgroup(
S.subgroup_search(prop_fix_points))
points = [3, 5]
fix35 = A.subgroup_search(prop_fix_points)
points = [5]
fix5 = A.subgroup_search(prop_fix_points)
assert A.subgroup_search(prop_fix_points, init_subgroup=fix35
).is_subgroup(fix5)
base, strong_gens = A.schreier_sims_incremental()
g = A.generators[0]
comm_g = \
A.subgroup_search(prop_comm_g, base=base, strong_gens=strong_gens)
assert _verify_bsgs(comm_g, base, comm_g.generators) is True
assert [prop_comm_g(gen) is True for gen in comm_g.generators]
def test_subgroup_search():
_subgroup_search(10, 15, 2)
@XFAIL
def test_subgroup_search2():
skip('takes too much time')
_subgroup_search(16, 17, 1)
def test_normal_closure():
# the normal closure of the trivial group is trivial
S = SymmetricGroup(3)
identity = Permutation([0, 1, 2])
closure = S.normal_closure(identity)
assert closure.is_trivial
# the normal closure of the entire group is the entire group
A = AlternatingGroup(4)
assert A.normal_closure(A).is_subgroup(A)
# brute-force verifications for subgroups
for i in (3, 4, 5):
S = SymmetricGroup(i)
A = AlternatingGroup(i)
D = DihedralGroup(i)
C = CyclicGroup(i)
for gp in (A, D, C):
assert _verify_normal_closure(S, gp)
# brute-force verifications for all elements of a group
S = SymmetricGroup(5)
elements = list(S.generate_dimino())
for element in elements:
assert _verify_normal_closure(S, element)
# small groups
small = []
for i in (1, 2, 3):
small.append(SymmetricGroup(i))
small.append(AlternatingGroup(i))
small.append(DihedralGroup(i))
small.append(CyclicGroup(i))
for gp in small:
for gp2 in small:
if gp2.is_subgroup(gp, 0) and gp2.degree == gp.degree:
assert _verify_normal_closure(gp, gp2)
def test_derived_series():
# the derived series of the trivial group consists only of the trivial group
triv = PermutationGroup([Permutation([0, 1, 2])])
assert triv.derived_series()[0].is_subgroup(triv)
# the derived series for a simple group consists only of the group itself
for i in (5, 6, 7):
A = AlternatingGroup(i)
assert A.derived_series()[0].is_subgroup(A)
# the derived series for S_4 is S_4 > A_4 > K_4 > triv
S = SymmetricGroup(4)
series = S.derived_series()
assert series[1].is_subgroup(AlternatingGroup(4))
assert series[2].is_subgroup(DihedralGroup(2))
assert series[3].is_trivial
def test_lower_central_series():
# the lower central series of the trivial group consists of the trivial
# group
triv = PermutationGroup([Permutation([0, 1, 2])])
assert triv.lower_central_series()[0].is_subgroup(triv)
# the lower central series of a simple group consists of the group itself
for i in (5, 6, 7):
A = AlternatingGroup(i)
assert A.lower_central_series()[0].is_subgroup(A)
# GAP-verified example
S = SymmetricGroup(6)
series = S.lower_central_series()
assert len(series) == 2
assert series[1].is_subgroup(AlternatingGroup(6))
def test_commutator():
# the commutator of the trivial group and the trivial group is trivial
S = SymmetricGroup(3)
triv = PermutationGroup([Permutation([0, 1, 2])])
assert S.commutator(triv, triv).is_subgroup(triv)
# the commutator of the trivial group and any other group is again trivial
A = AlternatingGroup(3)
assert S.commutator(triv, A).is_subgroup(triv)
# the commutator is commutative
for i in (3, 4, 5):
S = SymmetricGroup(i)
A = AlternatingGroup(i)
D = DihedralGroup(i)
assert S.commutator(A, D).is_subgroup(S.commutator(D, A))
# the commutator of an abelian group is trivial
S = SymmetricGroup(7)
A1 = AbelianGroup(2, 5)
A2 = AbelianGroup(3, 4)
triv = PermutationGroup([Permutation([0, 1, 2, 3, 4, 5, 6])])
assert S.commutator(A1, A1).is_subgroup(triv)
assert S.commutator(A2, A2).is_subgroup(triv)
# examples calculated by hand
S = SymmetricGroup(3)
A = AlternatingGroup(3)
assert S.commutator(A, S).is_subgroup(A)
def test_is_nilpotent():
# every abelian group is nilpotent
for i in (1, 2, 3):
C = CyclicGroup(i)
Ab = AbelianGroup(i, i + 2)
assert C.is_nilpotent
assert Ab.is_nilpotent
Ab = AbelianGroup(5, 7, 10)
assert Ab.is_nilpotent
# A_5 is not solvable and thus not nilpotent
assert AlternatingGroup(5).is_nilpotent is False
def test_is_trivial():
for i in range(5):
triv = PermutationGroup([Permutation(list(range(i)))])
assert triv.is_trivial
def test_pointwise_stabilizer():
S = SymmetricGroup(2)
stab = S.pointwise_stabilizer([0])
assert stab.generators == [Permutation(1)]
S = SymmetricGroup(5)
points = []
stab = S
for point in (2, 0, 3, 4, 1):
stab = stab.stabilizer(point)
points.append(point)
assert S.pointwise_stabilizer(points).is_subgroup(stab)
def test_make_perm():
assert cube.pgroup.make_perm(5, seed=list(range(5))) == \
Permutation([4, 7, 6, 5, 0, 3, 2, 1])
assert cube.pgroup.make_perm(7, seed=list(range(7))) == \
Permutation([6, 7, 3, 2, 5, 4, 0, 1])
| bsd-3-clause |
sunny94/temp | sympy/physics/quantum/tests/test_operatorordering.py | 115 | 1490 | from sympy.physics.quantum import Dagger
from sympy.physics.quantum.boson import BosonOp
from sympy.physics.quantum.fermion import FermionOp
from sympy.physics.quantum.operatorordering import (normal_order,
normal_ordered_form)
def test_normal_order():
a = BosonOp('a')
b = BosonOp('b')
c = FermionOp('c')
d = FermionOp('d')
assert normal_order(a * Dagger(a)) == Dagger(a) * a
assert normal_order(Dagger(a) * a) == Dagger(a) * a
assert normal_order(a * Dagger(a) ** 2) == Dagger(a) ** 2 * a
assert normal_order(c * Dagger(c)) == - Dagger(c) * c
assert normal_order(Dagger(c) * c) == Dagger(c) * c
assert normal_order(c * Dagger(c) ** 2) == Dagger(c) ** 2 * c
def test_normal_ordered_form():
a = BosonOp('a')
b = BosonOp('b')
c = FermionOp('c')
d = FermionOp('d')
assert normal_ordered_form(Dagger(a) * a) == Dagger(a) * a
assert normal_ordered_form(a * Dagger(a)) == 1 + Dagger(a) * a
assert normal_ordered_form(a ** 2 * Dagger(a)) == \
2 * a + Dagger(a) * a ** 2
assert normal_ordered_form(a ** 3 * Dagger(a)) == \
3 * a ** 2 + Dagger(a) * a ** 3
assert normal_ordered_form(Dagger(c) * c) == Dagger(c) * c
assert normal_ordered_form(c * Dagger(c)) == 1 - Dagger(c) * c
assert normal_ordered_form(c ** 2 * Dagger(c)) == Dagger(c) * c ** 2
assert normal_ordered_form(c ** 3 * Dagger(c)) == \
c ** 2 - Dagger(c) * c ** 3
| bsd-3-clause |
galactose/wviews | answer_set.py | 1 | 1568 | """
answer_set.py: functionality for handling answer sets.
Copyright (C) 2014 Michael Kelly
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
NO_MODEL_FOR_EVALUATED_PROGRAM = -1
def parse_answer_sets(raw_worldview):
"""
parse_answer_set: takes unformatted queue of answerset values and
removes formatting, making a list of lists.
Arguments:
* answer_sets (list(str)) - a list of unformatted strings
"""
answer_set_regex = re.compile(r'{([\W\w]*)}')
worldview = []
one_model = False
for line in raw_worldview:
one_model = True
regex_object = answer_set_regex.search(line)
if regex_object:
answer_set = set()
for worldview_token in regex_object.group(1).split(','):
answer_set.add(worldview_token.strip())
worldview.append(answer_set)
return worldview if one_model else NO_MODEL_FOR_EVALUATED_PROGRAM
| gpl-3.0 |
syaiful6/django | django/contrib/postgres/forms/array.py | 258 | 6743 | import copy
from django import forms
from django.contrib.postgres.validators import (
ArrayMaxLengthValidator, ArrayMinLengthValidator,
)
from django.core.exceptions import ValidationError
from django.utils import six
from django.utils.safestring import mark_safe
from django.utils.translation import string_concat, ugettext_lazy as _
class SimpleArrayField(forms.CharField):
default_error_messages = {
'item_invalid': _('Item %(nth)s in the array did not validate: '),
}
def __init__(self, base_field, delimiter=',', max_length=None, min_length=None, *args, **kwargs):
self.base_field = base_field
self.delimiter = delimiter
super(SimpleArrayField, self).__init__(*args, **kwargs)
if min_length is not None:
self.min_length = min_length
self.validators.append(ArrayMinLengthValidator(int(min_length)))
if max_length is not None:
self.max_length = max_length
self.validators.append(ArrayMaxLengthValidator(int(max_length)))
def prepare_value(self, value):
if isinstance(value, list):
return self.delimiter.join(six.text_type(self.base_field.prepare_value(v)) for v in value)
return value
def to_python(self, value):
if value:
items = value.split(self.delimiter)
else:
items = []
errors = []
values = []
for i, item in enumerate(items):
try:
values.append(self.base_field.to_python(item))
except ValidationError as e:
for error in e.error_list:
errors.append(ValidationError(
string_concat(self.error_messages['item_invalid'], error.message),
code='item_invalid',
params={'nth': i},
))
if errors:
raise ValidationError(errors)
return values
def validate(self, value):
super(SimpleArrayField, self).validate(value)
errors = []
for i, item in enumerate(value):
try:
self.base_field.validate(item)
except ValidationError as e:
for error in e.error_list:
errors.append(ValidationError(
string_concat(self.error_messages['item_invalid'], error.message),
code='item_invalid',
params={'nth': i},
))
if errors:
raise ValidationError(errors)
def run_validators(self, value):
super(SimpleArrayField, self).run_validators(value)
errors = []
for i, item in enumerate(value):
try:
self.base_field.run_validators(item)
except ValidationError as e:
for error in e.error_list:
errors.append(ValidationError(
string_concat(self.error_messages['item_invalid'], error.message),
code='item_invalid',
params={'nth': i},
))
if errors:
raise ValidationError(errors)
class SplitArrayWidget(forms.Widget):
def __init__(self, widget, size, **kwargs):
self.widget = widget() if isinstance(widget, type) else widget
self.size = size
super(SplitArrayWidget, self).__init__(**kwargs)
@property
def is_hidden(self):
return self.widget.is_hidden
def value_from_datadict(self, data, files, name):
return [self.widget.value_from_datadict(data, files, '%s_%s' % (name, index))
for index in range(self.size)]
def id_for_label(self, id_):
# See the comment for RadioSelect.id_for_label()
if id_:
id_ += '_0'
return id_
def render(self, name, value, attrs=None):
if self.is_localized:
self.widget.is_localized = self.is_localized
value = value or []
output = []
final_attrs = self.build_attrs(attrs)
id_ = final_attrs.get('id')
for i in range(max(len(value), self.size)):
try:
widget_value = value[i]
except IndexError:
widget_value = None
if id_:
final_attrs = dict(final_attrs, id='%s_%s' % (id_, i))
output.append(self.widget.render(name + '_%s' % i, widget_value, final_attrs))
return mark_safe(self.format_output(output))
def format_output(self, rendered_widgets):
return ''.join(rendered_widgets)
@property
def media(self):
return self.widget.media
def __deepcopy__(self, memo):
obj = super(SplitArrayWidget, self).__deepcopy__(memo)
obj.widget = copy.deepcopy(self.widget)
return obj
@property
def needs_multipart_form(self):
return self.widget.needs_multipart_form
class SplitArrayField(forms.Field):
default_error_messages = {
'item_invalid': _('Item %(nth)s in the array did not validate: '),
}
def __init__(self, base_field, size, remove_trailing_nulls=False, **kwargs):
self.base_field = base_field
self.size = size
self.remove_trailing_nulls = remove_trailing_nulls
widget = SplitArrayWidget(widget=base_field.widget, size=size)
kwargs.setdefault('widget', widget)
super(SplitArrayField, self).__init__(**kwargs)
def clean(self, value):
cleaned_data = []
errors = []
if not any(value) and self.required:
raise ValidationError(self.error_messages['required'])
max_size = max(self.size, len(value))
for i in range(max_size):
item = value[i]
try:
cleaned_data.append(self.base_field.clean(item))
errors.append(None)
except ValidationError as error:
errors.append(ValidationError(
string_concat(self.error_messages['item_invalid'], error.message),
code='item_invalid',
params={'nth': i},
))
cleaned_data.append(None)
if self.remove_trailing_nulls:
null_index = None
for i, value in reversed(list(enumerate(cleaned_data))):
if value in self.base_field.empty_values:
null_index = i
else:
break
if null_index:
cleaned_data = cleaned_data[:null_index]
errors = errors[:null_index]
errors = list(filter(None, errors))
if errors:
raise ValidationError(errors)
return cleaned_data
| bsd-3-clause |
nwillemse/misc-scripts | ib-downloader/ib-downloader3.py | 1 | 8685 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
ib-downloader3.py
Created on Tue Jul 5 15:53:45 2016
@author: nwillemse
"""
import click
import time
import pandas as pd
from sys import argv
from datetime import datetime
from ib.ext.Contract import Contract
from ib.opt import Connection
class Downloader:
def __init__(
self, tickers, exchange, ticker_type, expiry, barsize,
start_date, end_date, ib_client_id, ib_port
):
self.tickers = tickers
self.exchange = exchange
self.ticker_type = ticker_type
self.expiry = expiry
self.barsize = barsize
self.client_id = ib_client_id
self.order_id = 1
self.port = ib_port
self.currency = 'USD'
self.tws_conn = None
self.curr_ohlc = pd.DataFrame(
columns=['open', 'high', 'low', 'close', 'volume', 'open_interest']
)
self.no_data_error = False
self.got_hist_data = False
self.dates_list = self._get_trade_dates(start_date, end_date)
self.what_to_show = 'MIDPOINT' if ticker_type=='CASH' else 'TRADES'
self.end_date = end_date
def _get_trade_dates(self, start_dt=None, end_dt=None):
if self.ticker_type in ['CASH', 'FUT']:
dates = pd.date_range(start_dt, end_dt).tolist()
res = map(lambda x: x.strftime('%Y-%m-%d %H:%M:%S'), dates)
res.sort(reverse=True)
print(res)
else:
fn = 'nyse_dates.txt'
print("Loading trading days from %s..." % fn)
a = pd.read_csv(fn, parse_dates=['trade_date'])
sub = a[a.trade_date >= start_dt].trade_date
sub = sub[sub <= end_dt]
sub.sort_values(ascending=False, inplace=True)
res = sub.apply(lambda x: x.strftime('%Y-%m-%d')).values.tolist()
print("Loaded %s days from %s to %s" % (len(res), res[-1], res[0]))
#print(res)
return res
def error_handler(self, msg):
if msg.typeName == "error": # and msg.id != -1:
print("Server Error:", msg)
if msg.errorCode == 162:
self.no_data_error = True
def server_handler(self, msg):
if msg.typeName == "nextValidId":
self.order_id = msg.orderId
elif msg.typeName == "managedAccounts":
self.account_code = msg.accountsList
print(self.account_code)
elif msg.typeName == "historicalData":
self.historical_data_event(msg)
elif msg.typeName == "error" and msg.id != -1:
return
# else:
# print msg.typeName, msg
def create_contract(self, symbol, sec_type, exch, curr, expiry):
contract = Contract()
contract.m_symbol = symbol
contract.m_secType = sec_type
contract.m_exchange = exch
contract.m_currency = curr
contract.m_expiry = expiry
if sec_type=='FUT':
contract.m_includeExpired = 1
print("symbol:%s secType:%s exchange:%s currency:%s expiry:%s" % (
contract.m_symbol, contract.m_secType, contract.m_exchange,
contract.m_currency, contract.m_expiry
)
)
return contract
def historical_data_event(self, msg):
if msg.date.find('finished') == -1:
try:
date = datetime.strptime(msg.date, '%Y%m%d %H:%M:%S')
except Exception:
date = datetime.strptime(msg.date, '%Y%m%d')
self.curr_ohlc.loc[date] = msg.open, msg.high, msg.low, msg.close, \
msg.volume, msg.count
else:
self.got_hist_data = True
def connect_to_tws(self):
self.tws_conn = Connection.create(host='localhost',
port=self.port,
clientId=self.client_id)
self.tws_conn.connect()
time.sleep(2)
if not self.tws_conn.isConnected():
raise Exception("Unable to connect to TWS. Make sure the Gateway or TWS has been started. Port=%s ClientId=%s" % (self.port, self.client_id))
def disconnect_from_tws(self):
if self.tws_conn is not None:
self.tws_conn.disconnect()
def register_callback_functions(self):
print("Registering callback functions...")
# Assign server messages handling function.
self.tws_conn.registerAll(self.server_handler)
# Assign error handling function.
self.tws_conn.register(self.error_handler, 'Error')
def request_historical_data(self, symbol_id, symbol):
contract = self.create_contract(symbol,
self.ticker_type,
self.exchange,
self.currency,
self.expiry)
self.got_hist_data = False
self.no_data_error = False
end_dt = self.end_date.strftime('%Y%m%d %H:%M:%S')
print("Requesting history for %s on %s..." % (symbol, self.end_date))
self.tws_conn.reqHistoricalData(symbol_id,
contract,
endDateTime=end_dt,
durationStr='250 D',
barSizeSetting=self.barsize,
whatToShow=self.what_to_show,
useRTH=0,
formatDate=1)
while not self.got_hist_data and not self.no_data_error:
time.sleep(1)
if self.no_data_error:
self.no_data_error = False
print("no data found for this day, continuing...")
return
time.sleep(8)
def start(self):
try:
print("Connecing to tws...")
self.connect_to_tws()
self.register_callback_functions()
for ticker in self.tickers:
print("Request historical data for %s" % ticker)
self.request_historical_data(1, ticker)
self.curr_ohlc.sort_index(ascending=False, inplace=True)
self.curr_ohlc.index.name = 'datetime'
if self.ticker_type=='CASH':
filename = ticker + '.' + self.currency + '.csv'
else:
filename = ticker + '.csv'
self.curr_ohlc.to_csv('data/' + filename)
except Exception:
print("Error:")
finally:
print("disconnected")
self.disconnect_from_tws()
@click.command()
@click.option('--tickers', '-t', default='SPY',
help='Comma separated list of tickers. Default="SPY"')
@click.option('--exchange', '-x', default='GLOBEX',
help='Comma separated list of tickers. Default="SPY"')
@click.option('--tickertype', '-tt', default='STK',
help='Type of tickers (STK, FUT or CASH). Defaul="STK"')
@click.option('--expiry', '-e',
help='The expiry when FUT ticker type. Default=None')
@click.option('--barsize', '-bs', default='15 mins',
help='Barsize of downloaded data. Default="15 mins"')
@click.option('--startdate', '-sd', default='2015-04-20',
help='Starting date for data download (YYYY-MM-DD).')
@click.option('--enddate', '-ed', default='2015-05-04',
help='Ending date for data download (YYYY-MM-DD).')
@click.option('--ib_client_id', '-c', default=200,
help='IB Client Id.')
@click.option('--ib_port', '-p', default=4001,
help='IB API Port.')
def main(tickers, exchange, tickertype, expiry, barsize, startdate,
enddate, ib_client_id, ib_port
):
"""
IB Downloader downloads data from Interactive Brokers for the specified
list of tickers.
"""
start_dt = datetime.strptime(startdate + ' 16:00:00', '%Y-%m-%d %H:%M:%S')
end_dt = datetime.strptime(enddate + ' 16:00:00', '%Y-%m-%d %H:%M:%S')
tickers = tickers.encode('ascii', 'ignore').split(',')
exchange = exchange.encode('ascii', 'ignore')
ticker_type = tickertype.encode('ascii', 'ignore')
barsize = barsize.encode('ascii', 'ignore')
if ticker_type == 'FUT':
expiry = expiry.encode('ascii', 'ignore')
print('Tickers: %s' % tickers)
system = Downloader(
tickers, exchange, ticker_type, expiry, barsize,
start_dt, end_dt, ib_client_id, ib_port
)
system.start()
if __name__ == '__main__':
main()
| mit |
NiclasEriksen/rpg_procgen | animations.py | 1 | 6163 | import glob
import logging
import pyglet
from functions import *
from pyglet.image import Animation, AnimationFrame
ANIM_PATH = "resources/animations/"
def create_effect_animation(image_name):
img = pyglet.image.load(image_name)
columns = img.width // 192
rows = img.height // 192
effect_seq = pyglet.image.ImageGrid(
img, rows, columns
).get_texture_sequence()
effect_frames = []
for row in range(rows, 0, -1):
end = row * columns
start = end - (columns - 1) - 1
for effect_frame in effect_seq[start:end:1]:
effect_frame = center_image(effect_frame)
effect_frames.append(AnimationFrame(effect_frame, 1 / 60))
effect_frames[(rows * columns) - 1].duration = None
return Animation(effect_frames)
class Animator:
def __init__(self, window):
self.window = window
self.animations = dict()
self.loaded_anims = []
for effect_file in glob.glob(ANIM_PATH + '*.png'):
key = effect_file[len(ANIM_PATH):-4]
self.animations[key] = create_effect_animation(effect_file)
def spawn_anim(self, animname, pos, scale=1.0, rotation=0):
try:
a = EffectSprite(self.animations[animname])
except KeyError as e:
logging.error("No animation by that name found: {}".format(e))
else:
a.animator = self
a.game_pos = self.window.get_gamepos(*pos)
a.position = pos
a.scale = scale
a.rotation = rotation
self.loaded_anims.append(a)
def get_anim(self, animname):
try:
a = EffectSprite(self.animations[animname])
except KeyError as e:
logging.error("No animation by that name found: {}".format(e))
else:
a.animator = self
return a
def set_duration(self, anim, duration):
if isinstance(anim, pyglet.image.Animation):
for f in anim.frames:
f.duration = duration
def set_anchor(self, anim, x="no", y="no"):
if isinstance(anim, pyglet.image.Animation):
for f in anim.frames:
if type(x) is not str:
f.image.anchor_x = x
if type(y) is not str:
f.image.anchor_y = y
def render(self):
for a in self.loaded_anims:
a.position = self.window.get_windowpos(*a.game_pos, precise=True)
a.draw()
class EffectSprite(pyglet.sprite.Sprite):
def on_animation_end(self):
try:
self.animator.loaded_anims.remove(self)
except ValueError:
if hasattr(self, "owner"):
self.owner.anim = False
self.delete()
class HandAnimAttack:
def __init__(self, owner, hand, duration=0.5, reach=12):
self.max_offset = reach
self.owner = owner
self.hand = hand
self.cur_time = 0
self.max_time = duration
def update(self, dt):
if self.cur_time < self.max_time:
value = smooth_in_out(
self.cur_time / self.max_time * 1
)
offset_y = self.max_offset * value
offset_x = self.max_offset // 3 * value
# if self.owner.limbs[self.hand].glow:
# self.owner.limbs[self.hand].glow.opacity = value * 255
# self.owner.limbs[self.hand].glow.scale = 1 + value / 2
if self.hand == "left":
self.owner.limbs[self.hand].offset = (offset_x, offset_y)
elif self.hand == "right":
self.owner.limbs[self.hand].offset = (-offset_x, offset_y)
self.cur_time += dt
else:
# if self.owner.limbs[self.hand].glow:
# self.owner.limbs[self.hand].glow.opacity = 0
# self.owner.limbs[self.hand].glow.scale = 1
self.owner.limbs[self.hand].offset = (0, 0)
self.owner.child_objects.remove(self)
class Pulse:
def __init__(
self, glow_object, frequency=1,
max_opacity=1., min_scale=1., max_scale=1.5
):
self.owner = glow_object
self.speed = frequency
self.scale_min, self.scale_max = min_scale, max_scale
self.max_opacity = max_opacity
self.color = glow_object.color
self.max_time = frequency
self.cur_time = 0
self.settle = False
def update(self, dt):
if self.cur_time < self.max_time and not self.settle:
value = smooth_in_out(
self.cur_time / self.max_time * 1
)
self.owner.opacity = value * (self.max_opacity * 255)
self.owner.scale = self.scale_min + (
value * (self.scale_max - self.scale_min)
)
self.cur_time += dt
else:
self.cur_time = 0
class HeadBobbing:
def __init__(self, owner, duration=0.5, amount=6):
self.max_offset = amount
self.owner = owner
self.cur_time = 0
self.max_time = duration
self.settle = False
def update(self, dt):
v = abs(self.owner.body.velocity.x) + abs(self.owner.body.velocity.y)
# self.max_time = self.owner.stats.get("ms") / 300
if v >= 30:
self.settle = False
else:
self.settle = True
if self.cur_time < self.max_time and not self.settle:
offset_y = self.max_offset * smooth_in_out(
self.cur_time / self.max_time * 1
)
self.owner.body_offset = (0, offset_y)
self.cur_time += dt
else:
if self.settle:
if self.owner.body_offset[1] > 0:
self.owner.body_offset = (
self.owner.body_offset[0],
self.owner.body_offset[1] - self.max_offset * dt * 2
)
else:
self.owner.body_offset = (0, 0)
else:
self.cur_time = 0
# else:
# self.owner.body_offset = (0, 0)
# self.owner.child_objects.remove(self)
| cc0-1.0 |
epam-mooc/edx-platform | cms/djangoapps/contentstore/views/import_export.py | 6 | 15236 | """
These views handle all actions in Studio related to import and exporting of
courses
"""
import logging
import os
import re
import shutil
import tarfile
from path import path
from tempfile import mkdtemp
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.core.exceptions import SuspiciousOperation, PermissionDenied
from django.core.files.temp import NamedTemporaryFile
from django.core.servers.basehttp import FileWrapper
from django.http import HttpResponse, HttpResponseNotFound
from django.utils.translation import ugettext as _
from django.views.decorators.http import require_http_methods, require_GET
from django_future.csrf import ensure_csrf_cookie
from edxmako.shortcuts import render_to_response
from xmodule.contentstore.django import contentstore
from xmodule.exceptions import SerializationError
from xmodule.modulestore.django import modulestore
from opaque_keys.edx.keys import CourseKey
from xmodule.modulestore.xml_importer import import_from_xml
from xmodule.modulestore.xml_exporter import export_to_xml
from .access import has_course_access
from extract_tar import safetar_extractall
from student import auth
from student.roles import CourseInstructorRole, CourseStaffRole, GlobalStaff
from util.json_request import JsonResponse
from contentstore.utils import reverse_course_url, reverse_usage_url
__all__ = ['import_handler', 'import_status_handler', 'export_handler']
log = logging.getLogger(__name__)
# Regex to capture Content-Range header ranges.
CONTENT_RE = re.compile(r"(?P<start>\d{1,11})-(?P<stop>\d{1,11})/(?P<end>\d{1,11})")
# pylint: disable=unused-argument
@login_required
@ensure_csrf_cookie
@require_http_methods(("GET", "POST", "PUT"))
def import_handler(request, course_key_string):
"""
The restful handler for importing a course.
GET
html: return html page for import page
json: not supported
POST or PUT
json: import a course via the .tar.gz file specified in request.FILES
"""
course_key = CourseKey.from_string(course_key_string)
if not has_course_access(request.user, course_key):
raise PermissionDenied()
if 'application/json' in request.META.get('HTTP_ACCEPT', 'application/json'):
if request.method == 'GET':
raise NotImplementedError('coming soon')
else:
data_root = path(settings.GITHUB_REPO_ROOT)
course_subdir = "{0}-{1}-{2}".format(course_key.org, course_key.course, course_key.run)
course_dir = data_root / course_subdir
filename = request.FILES['course-data'].name
if not filename.endswith('.tar.gz'):
return JsonResponse(
{
'ErrMsg': _('We only support uploading a .tar.gz file.'),
'Stage': 1
},
status=415
)
temp_filepath = course_dir / filename
if not course_dir.isdir():
os.mkdir(course_dir)
logging.debug('importing course to {0}'.format(temp_filepath))
# Get upload chunks byte ranges
try:
matches = CONTENT_RE.search(request.META["HTTP_CONTENT_RANGE"])
content_range = matches.groupdict()
except KeyError: # Single chunk
# no Content-Range header, so make one that will work
content_range = {'start': 0, 'stop': 1, 'end': 2}
# stream out the uploaded files in chunks to disk
if int(content_range['start']) == 0:
mode = "wb+"
else:
mode = "ab+"
size = os.path.getsize(temp_filepath)
# Check to make sure we haven't missed a chunk
# This shouldn't happen, even if different instances are handling
# the same session, but it's always better to catch errors earlier.
if size < int(content_range['start']):
log.warning(
"Reported range %s does not match size downloaded so far %s",
content_range['start'],
size
)
return JsonResponse(
{
'ErrMsg': _('File upload corrupted. Please try again'),
'Stage': 1
},
status=409
)
# The last request sometimes comes twice. This happens because
# nginx sends a 499 error code when the response takes too long.
elif size > int(content_range['stop']) and size == int(content_range['end']):
return JsonResponse({'ImportStatus': 1})
with open(temp_filepath, mode) as temp_file:
for chunk in request.FILES['course-data'].chunks():
temp_file.write(chunk)
size = os.path.getsize(temp_filepath)
if int(content_range['stop']) != int(content_range['end']) - 1:
# More chunks coming
return JsonResponse({
"files": [{
"name": filename,
"size": size,
"deleteUrl": "",
"deleteType": "",
"url": reverse_course_url('import_handler', course_key),
"thumbnailUrl": ""
}]
})
else: # This was the last chunk.
# Use sessions to keep info about import progress
session_status = request.session.setdefault("import_status", {})
key = unicode(course_key) + filename
session_status[key] = 1
request.session.modified = True
# Do everything from now on in a try-finally block to make sure
# everything is properly cleaned up.
try:
tar_file = tarfile.open(temp_filepath)
try:
safetar_extractall(tar_file, (course_dir + '/').encode('utf-8'))
except SuspiciousOperation as exc:
return JsonResponse(
{
'ErrMsg': 'Unsafe tar file. Aborting import.',
'SuspiciousFileOperationMsg': exc.args[0],
'Stage': 1
},
status=400
)
finally:
tar_file.close()
session_status[key] = 2
request.session.modified = True
# find the 'course.xml' file
def get_all_files(directory):
"""
For each file in the directory, yield a 2-tuple of (file-name,
directory-path)
"""
for dirpath, _dirnames, filenames in os.walk(directory):
for filename in filenames:
yield (filename, dirpath)
def get_dir_for_fname(directory, filename):
"""
Returns the dirpath for the first file found in the directory
with the given name. If there is no file in the directory with
the specified name, return None.
"""
for fname, dirpath in get_all_files(directory):
if fname == filename:
return dirpath
return None
fname = "course.xml"
dirpath = get_dir_for_fname(course_dir, fname)
if not dirpath:
return JsonResponse(
{
'ErrMsg': _('Could not find the course.xml file in the package.'),
'Stage': 2
},
status=415
)
dirpath = os.path.relpath(dirpath, data_root)
logging.debug('found course.xml at {0}'.format(dirpath))
course_items = import_from_xml(
modulestore(),
request.user.id,
settings.GITHUB_REPO_ROOT,
[dirpath],
load_error_modules=False,
static_content_store=contentstore(),
target_course_id=course_key,
)
new_location = course_items[0].location
logging.debug('new course at {0}'.format(new_location))
session_status[key] = 3
request.session.modified = True
# Send errors to client with stage at which error occurred.
except Exception as exception: # pylint: disable=W0703
log.exception(
"error importing course"
)
return JsonResponse(
{
'ErrMsg': str(exception),
'Stage': session_status[key]
},
status=400
)
finally:
shutil.rmtree(course_dir)
return JsonResponse({'Status': 'OK'})
elif request.method == 'GET': # assume html
course_module = modulestore().get_course(course_key)
return render_to_response('import.html', {
'context_course': course_module,
'successful_import_redirect_url': reverse_course_url('course_handler', course_key),
'import_status_url': reverse_course_url("import_status_handler", course_key, kwargs={'filename': "fillerName"}),
})
else:
return HttpResponseNotFound()
# pylint: disable=unused-argument
@require_GET
@ensure_csrf_cookie
@login_required
def import_status_handler(request, course_key_string, filename=None):
"""
Returns an integer corresponding to the status of a file import. These are:
0 : No status info found (import done or upload still in progress)
1 : Extracting file
2 : Validating.
3 : Importing to mongo
"""
course_key = CourseKey.from_string(course_key_string)
if not has_course_access(request.user, course_key):
raise PermissionDenied()
try:
session_status = request.session["import_status"]
status = session_status[course_key_string + filename]
except KeyError:
status = 0
return JsonResponse({"ImportStatus": status})
# pylint: disable=unused-argument
@ensure_csrf_cookie
@login_required
@require_http_methods(("GET",))
def export_handler(request, course_key_string):
"""
The restful handler for exporting a course.
GET
html: return html page for import page
application/x-tgz: return tar.gz file containing exported course
json: not supported
Note that there are 2 ways to request the tar.gz file. The request header can specify
application/x-tgz via HTTP_ACCEPT, or a query parameter can be used (?_accept=application/x-tgz).
If the tar.gz file has been requested but the export operation fails, an HTML page will be returned
which describes the error.
"""
course_key = CourseKey.from_string(course_key_string)
if not has_course_access(request.user, course_key):
raise PermissionDenied()
course_module = modulestore().get_course(course_key)
# an _accept URL parameter will be preferred over HTTP_ACCEPT in the header.
requested_format = request.REQUEST.get('_accept', request.META.get('HTTP_ACCEPT', 'text/html'))
export_url = reverse_course_url('export_handler', course_key) + '?_accept=application/x-tgz'
if 'application/x-tgz' in requested_format:
name = course_module.url_name
export_file = NamedTemporaryFile(prefix=name + '.', suffix=".tar.gz")
root_dir = path(mkdtemp())
try:
export_to_xml(modulestore(), contentstore(), course_module.id, root_dir, name)
logging.debug(u'tar file being generated at {0}'.format(export_file.name))
with tarfile.open(name=export_file.name, mode='w:gz') as tar_file:
tar_file.add(root_dir / name, arcname=name)
except SerializationError as exc:
log.exception(u'There was an error exporting course %s', course_module.id)
unit = None
failed_item = None
parent = None
try:
failed_item = modulestore().get_item(exc.location)
parent_loc = modulestore().get_parent_location(failed_item.location)
if parent_loc is not None:
parent = modulestore().get_item(parent_loc)
if parent.location.category == 'vertical':
unit = parent
except: # pylint: disable=bare-except
# if we have a nested exception, then we'll show the more generic error message
pass
return render_to_response('export.html', {
'context_course': course_module,
'in_err': True,
'raw_err_msg': str(exc),
'failed_module': failed_item,
'unit': unit,
'edit_unit_url': reverse_usage_url("container_handler", parent.location) if parent else "",
'course_home_url': reverse_course_url("course_handler", course_key),
'export_url': export_url
})
except Exception as exc:
log.exception('There was an error exporting course %s', course_module.id)
return render_to_response('export.html', {
'context_course': course_module,
'in_err': True,
'unit': None,
'raw_err_msg': str(exc),
'course_home_url': reverse_course_url("course_handler", course_key),
'export_url': export_url
})
finally:
shutil.rmtree(root_dir / name)
wrapper = FileWrapper(export_file)
response = HttpResponse(wrapper, content_type='application/x-tgz')
response['Content-Disposition'] = 'attachment; filename=%s' % os.path.basename(export_file.name.encode('utf-8'))
response['Content-Length'] = os.path.getsize(export_file.name)
return response
elif 'text/html' in requested_format:
return render_to_response('export.html', {
'context_course': course_module,
'export_url': export_url
})
else:
# Only HTML or x-tgz request formats are supported (no JSON).
return HttpResponse(status=406)
| agpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.