repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
kalliope-project/kalliope | Tests/test_notification_manager.py | 1 | 1844 | import collections
import unittest
import mock
from kalliope.core import SignalModule
from kalliope.core.NotificationManager import NotificationManager
class FakeSignal(SignalModule):
def __init__(self, name=None, **kwargs):
super(FakeSignal, self).__init__(**kwargs)
self.name = name
def on_notification_received(self, notification=None, payload=None):
pass
@staticmethod
def check_parameters(parameters):
pass
class TestNotificationManager(unittest.TestCase):
def setUp(self):
if __name__ == '__main__':
self.test_path = "__main__.FakeSignal.on_notification_received"
else:
self.test_path = "Tests.test_notification_manager.FakeSignal.on_notification_received"
NotificationManager._instances.clear()
def test_get_instances(self):
# create a signal
signal1 = FakeSignal()
signal2 = FakeSignal()
expected_list = [
signal1, signal2
]
compare = lambda x, y: collections.Counter(x) == collections.Counter(y)
# convert received generator into list
lst_from_notification = list(NotificationManager.get_instances())
self.assertTrue(compare(expected_list, lst_from_notification))
def test_send_notification(self):
# create a signal
signal1 = FakeSignal()
# with mock.patch("__main__.FakeSignal.on_notification_received") \
with mock.patch(self.test_path) as mock_on_notification_received:
test_notification = "test"
NotificationManager.send_notification(test_notification)
mock_on_notification_received.assert_called_once_with(notification=test_notification, payload=None)
mock_on_notification_received.reset_mock()
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
jlorieau/mollib | analysis/datasets/ramachandran.py | 1 | 8178 | """Analysis of the Ramachandran datasets.
"""
import tarfile
import json
from itertools import izip_longest as zip_longest
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from scipy import ndimage
# Plot parameters
figsize = (7, 1.5) # size of each subplot in inches
title_fontsize = 9 # size of the font for subplottitles
title_font = 'Arial'
axes_fontsize = 9 # font size of the axes
axes_font = 'Arial'
label_fontsize = 8 # font size of the tick labels
annotation_fontsize = 7
matplotlib.rc('text', usetex=False)
matplotlib.rc('font', family='sans-serif')
matplotlib.rc('axes', linewidth=0.5)
for i in ('xtick', 'ytick'):
matplotlib.rcParams[i + '.major.size'] = 2.0
matplotlib.rcParams[i + '.major.width'] = 0.75
matplotlib.rcParams[i + '.minor.size'] = 1.0
matplotlib.rcParams[i + '.minor.width'] = 0.5
# import the dataset
filename = '../../mollib/data/ramachandranstatistics/measurements.tar'
tfile = tarfile.open(name=filename, mode='r')
measurement_dict = {}
with tfile:
# Extract the files
for member in tfile.getmembers():
f= tfile.extractfile(member)
try:
identifier = member.name.strip('.json')
string = f.read().decode()
return_dict = json.loads(string)
measurement_dict[identifier] = return_dict
except KeyError:
continue
finally:
f.close()
# Import the phi/psi angles
results = {}
for identifier, return_dict in measurement_dict.items():
for classification, phi_psi_list in return_dict.items():
l = results.setdefault(classification, list())
phi_psi_list = [(i,j) for i,j in phi_psi_list if isinstance(i, float)
and isinstance(j, float)]
l.extend(phi_psi_list)
# Create and Overall dataset
phi_list = []
psi_list = []
for classification, phi_psi in results.items():
phi, psi = zip(*phi_psi)
phi_list += phi
psi_list += psi
results['Overall'] = zip(phi_list, psi_list)
# Prepare the plots
# Define the plots. These are organized by keys in results and the
# corresponding color map.
# The first item is the dataset name, the second item is the color map, and the
# third item is the position of the text label for the number of items used to
# calculate the density map
labels = (('Overall', plt.cm.plasma, (0.95, 0.01)),
('alpha-helix', plt.cm.Greens_r, (0.95, 0.01)),
('alpha-helix__N-term', plt.cm.Greens_r, (0.95, 0.01)),
('alpha-helix__C-term', plt.cm.Greens_r, (0.95, 0.01)),
('310-helix', plt.cm.Greens_r, (0.95, 0.01)),
('pi-helix', plt.cm.Greens_r, (0.95, 0.01)),
('sheet', plt.cm.Blues_r, (0.95, 0.01)),
('sheet__N-term', plt.cm.Blues_r, (0.95, 0.01)),
('sheet__C-term', plt.cm.Blues_r, (0.95, 0.01)),
('type I turn', plt.cm.Reds_r, (0.95, 0.01)),
('type II turn', plt.cm.Reds_r, (0.95, 0.01)),
("type I' turn", plt.cm.Reds_r, (0.95, 0.01)),
("type II' turn", plt.cm.Reds_r, (0.95, 0.89)),
('Gly', plt.cm.Greys_r, (0.95, 0.70)),
('No classification', plt.cm.Greys_r, (0.95, 0.01)),
)
def grouper(n, iterable, fillvalue=None):
"grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx"
args = [iter(iterable)] * n
return zip_longest(*args, fillvalue=fillvalue)
subfigure_groups = list(grouper(4, labels))
# Make rows of plots
for row, subfigure_labels in enumerate(subfigure_groups, 1):
f, axarr = plt.subplots(nrows=1, ncols=len(subfigure_labels), sharey=True,
figsize=figsize)
# Set the shared y-axis label
axarr[0].set_ylabel(r'$\psi$ (deg)', fontsize=axes_fontsize,
fontname=axes_font)
for count, values in enumerate(subfigure_labels):
# Skip empty plots
if values is None:
axarr[count].axis('off')
# axarr[count].set_visible(False)
continue
label, cmap, annot_xy = values
# Prepare the data and create 2d histograms
phi_psi = results[label]
x, y = zip(*phi_psi)
x = np.array(x)
y = np.array(y)
N = len(x)
hist2d, x, y = np.histogram2d(y, x, bins=36, range=np.array(
[(-180., 190.), (-180., 190.)]))
hist2d = -1. * np.log(hist2d + 0.1)
minimum = np.min(hist2d)
hist2d -= minimum
levels = np.arange(0.0, 5.1, 1.0)
# Set the title and x-axis label
title = (label.replace('__', ' ')
.replace('alpha', "$\\alpha$")
.replace('pi', "$\\pi$")
.replace("\\'", "'")
.replace('No', 'no'))
axarr[count].set_title(title,
size=title_fontsize,
fontname=title_font)
# Set the x-axis label
axarr[count].set_xlabel(r'$\phi$ (deg)', fontsize=axes_fontsize,
fontname=axes_font)
# Set the axis tick spacing
axarr[count].xaxis.set_ticks(np.arange(-180, 181, 90))
axarr[count].set_xlim(-180, 180)
axarr[count].yaxis.set_ticks(np.arange(-180, 181, 90))
axarr[count].set_ylim(-180, 180)
# Set the axis tick label size
axarr[count].tick_params(labelsize=label_fontsize)
# Set the axis tick label font
labels = axarr[count].get_xticklabels() + axarr[count].get_yticklabels()
for label in labels:
label.set_fontname(axes_font)
# Annotate the number of measurements on the plot
if annot_xy is not None:
axarr[count].text(annot_xy[0], annot_xy[1],
"N={:,.0f}".format(N),
verticalalignment='bottom',
horizontalalignment='right',
transform=axarr[count].transAxes,
fontsize=annotation_fontsize)
# Create the 2d contour plot
axarr[count].contourf(x[:-1], y[:-1], hist2d, levels, cmap=cmap)
# Save the figures
plt.savefig('ramachandran/ramachandran_countour_{}.png'.format(row),
format='PNG', dpi=1200, bbox_inches='tight', pad_inches=0.05)
plt.savefig('ramachandran/ramachandran_countour_{}_lowres.png'.format(row),
format='PNG', dpi=220, bbox_inches='tight', pad_inches=0.02)
plt.savefig('ramachandran/ramachandran_countour_{}.svg'.format(row),
format='SVG', bbox_inches='tight', pad_inches=0.05)
# Prepare an overall contour plot with lines
phi_psi = results['Overall']
x, y = zip(*phi_psi)
x = np.array(x)
y = np.array(y)
N = len(x)
# Convert to a histogram
hist2d, x, y = np.histogram2d(y, x, bins=36, range=np.array(
[(-180., 185.), (-180., 185.)]))
hist2d = -1. * np.log(hist2d + 0.1)
minimum = np.min(hist2d)
hist2d -= minimum
# Optionally smooth the data
# hist2d = ndimage.gaussian_filter(hist2d, sigma=0.25, order=0)
# Contour levels at 98% (4.0) and 99.8%
levels = np.array([4.1, 6.1])
f, axarr = plt.subplots(nrows=1, ncols=1, sharey=True,
figsize=(4,4))
# Set the x-axis and y-axis labels
axarr.set_xlabel(r'$\phi$ (deg)', fontsize=axes_fontsize,
fontname=axes_font)
axarr.set_ylabel(r'$\psi$ (deg)', fontsize=axes_fontsize,
fontname=axes_font)
# Set the axis tick spacing
axarr.xaxis.set_ticks(np.arange(-180, 181, 90))
axarr.set_xlim(-180, 180)
axarr.yaxis.set_ticks(np.arange(-180, 181, 90))
axarr.set_ylim(-180, 180)
# Set the axis tick label size
axarr.tick_params(labelsize=label_fontsize)
# Outer ticks
axarr.get_yaxis().set_tick_params(direction='out')
axarr.get_xaxis().set_tick_params(direction='out')
# Create the 2d contour plot
axarr.contour(x[:-1], y[:-1], hist2d, levels, cmap=plt.cm.Blues_r)
plt.savefig('ramachandran/ramachandran_line_countour_overall_lowres.png',
format='PNG', dpi=220, bbox_inches='tight', pad_inches=0.02)
plt.savefig('ramachandran/ramachandran_line_countour_overall.svg',
format='SVG', bbox_inches='tight', pad_inches=0.05)
| gpl-3.0 |
CTSRD-SOAAP/chromium-42.0.2311.135 | tools/grit/grit/gather/rc_unittest.py | 61 | 13197 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Unit tests for grit.gather.rc'''
import os
import sys
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
import unittest
import StringIO
from grit.gather import rc
from grit import util
class RcUnittest(unittest.TestCase):
part_we_want = '''IDC_KLONKACC ACCELERATORS
BEGIN
"?", IDM_ABOUT, ASCII, ALT
"/", IDM_ABOUT, ASCII, ALT
END'''
def testSectionFromFile(self):
buf = '''IDC_SOMETHINGELSE BINGO
BEGIN
BLA BLA
BLA BLA
END
%s
IDC_KLONK BINGOBONGO
BEGIN
HONGO KONGO
END
''' % self.part_we_want
f = StringIO.StringIO(buf)
out = rc.Section(f, 'IDC_KLONKACC')
out.ReadSection()
self.failUnless(out.GetText() == self.part_we_want)
out = rc.Section(util.PathFromRoot(r'grit/testdata/klonk.rc'),
'IDC_KLONKACC',
encoding='utf-16')
out.ReadSection()
out_text = out.GetText().replace('\t', '')
out_text = out_text.replace(' ', '')
self.part_we_want = self.part_we_want.replace(' ', '')
self.failUnless(out_text.strip() == self.part_we_want.strip())
def testDialog(self):
dlg = rc.Dialog(StringIO.StringIO('''IDD_ABOUTBOX DIALOGEX 22, 17, 230, 75
STYLE DS_SETFONT | DS_MODALFRAME | WS_CAPTION | WS_SYSMENU
CAPTION "About"
FONT 8, "System", 0, 0, 0x0
BEGIN
ICON IDI_KLONK,IDC_MYICON,14,9,20,20
LTEXT "klonk Version ""yibbee"" 1.0",IDC_STATIC,49,10,119,8,
SS_NOPREFIX
LTEXT "Copyright (C) 2005",IDC_STATIC,49,20,119,8
DEFPUSHBUTTON "OK",IDOK,195,6,30,11,WS_GROUP
CONTROL "Jack ""Black"" Daniels",IDC_RADIO1,"Button",
BS_AUTORADIOBUTTON,46,51,84,10
// try a line where the ID is on the continuation line
LTEXT "blablablabla blablabla blablablablablablablabla blablabla",
ID_SMURF, whatever...
END
'''), 'IDD_ABOUTBOX')
dlg.Parse()
self.failUnless(len(dlg.GetTextualIds()) == 7)
self.failUnless(len(dlg.GetCliques()) == 6)
self.failUnless(dlg.GetCliques()[1].GetMessage().GetRealContent() ==
'klonk Version "yibbee" 1.0')
transl = dlg.Translate('en')
self.failUnless(transl.strip() == dlg.GetText().strip())
def testAlternateSkeleton(self):
dlg = rc.Dialog(StringIO.StringIO('''IDD_ABOUTBOX DIALOGEX 22, 17, 230, 75
STYLE DS_SETFONT | DS_MODALFRAME | WS_CAPTION | WS_SYSMENU
CAPTION "About"
FONT 8, "System", 0, 0, 0x0
BEGIN
LTEXT "Yipee skippy",IDC_STATIC,49,10,119,8,
SS_NOPREFIX
END
'''), 'IDD_ABOUTBOX')
dlg.Parse()
alt_dlg = rc.Dialog(StringIO.StringIO('''IDD_ABOUTBOX DIALOGEX 040704, 17, 230, 75
STYLE DS_SETFONT | DS_MODALFRAME | WS_CAPTION | WS_SYSMENU
CAPTION "XXXXXXXXX"
FONT 8, "System", 0, 0, 0x0
BEGIN
LTEXT "XXXXXXXXXXXXXXXXX",IDC_STATIC,110978,10,119,8,
SS_NOPREFIX
END
'''), 'IDD_ABOUTBOX')
alt_dlg.Parse()
transl = dlg.Translate('en', skeleton_gatherer=alt_dlg)
self.failUnless(transl.count('040704') and
transl.count('110978'))
self.failUnless(transl.count('Yipee skippy'))
def testMenu(self):
menu = rc.Menu(StringIO.StringIO('''IDC_KLONK MENU
BEGIN
POPUP "&File """
BEGIN
MENUITEM "E&xit", IDM_EXIT
MENUITEM "This be ""Klonk"" me like", ID_FILE_THISBE
POPUP "gonk"
BEGIN
MENUITEM "Klonk && is ""good""", ID_GONK_KLONKIS
END
MENUITEM "This is a very long menu caption to try to see if we can make the ID go to a continuation line, blablabla blablabla bla blabla blablabla blablabla blablabla blablabla...",
ID_FILE_THISISAVERYLONGMENUCAPTIONTOTRYTOSEEIFWECANMAKETHEIDGOTOACONTINUATIONLINE
END
POPUP "&Help"
BEGIN
MENUITEM "&About ...", IDM_ABOUT
END
END'''), 'IDC_KLONK')
menu.Parse()
self.failUnless(len(menu.GetTextualIds()) == 6)
self.failUnless(len(menu.GetCliques()) == 1)
self.failUnless(len(menu.GetCliques()[0].GetMessage().GetPlaceholders()) ==
9)
transl = menu.Translate('en')
self.failUnless(transl.strip() == menu.GetText().strip())
def testVersion(self):
version = rc.Version(StringIO.StringIO('''
VS_VERSION_INFO VERSIONINFO
FILEVERSION 1,0,0,1
PRODUCTVERSION 1,0,0,1
FILEFLAGSMASK 0x3fL
#ifdef _DEBUG
FILEFLAGS 0x1L
#else
FILEFLAGS 0x0L
#endif
FILEOS 0x4L
FILETYPE 0x2L
FILESUBTYPE 0x0L
BEGIN
BLOCK "StringFileInfo"
BEGIN
BLOCK "040904e4"
BEGIN
VALUE "CompanyName", "TODO: <Company name>"
VALUE "FileDescription", "TODO: <File description>"
VALUE "FileVersion", "1.0.0.1"
VALUE "LegalCopyright", "TODO: (c) <Company name>. All rights reserved."
VALUE "InternalName", "res_format_test.dll"
VALUE "OriginalFilename", "res_format_test.dll"
VALUE "ProductName", "TODO: <Product name>"
VALUE "ProductVersion", "1.0.0.1"
END
END
BLOCK "VarFileInfo"
BEGIN
VALUE "Translation", 0x409, 1252
END
END
'''.strip()), 'VS_VERSION_INFO')
version.Parse()
self.failUnless(len(version.GetTextualIds()) == 1)
self.failUnless(len(version.GetCliques()) == 4)
transl = version.Translate('en')
self.failUnless(transl.strip() == version.GetText().strip())
def testRegressionDialogBox(self):
dialog = rc.Dialog(StringIO.StringIO('''
IDD_SIDEBAR_WEATHER_PANEL_PROPPAGE DIALOGEX 0, 0, 205, 157
STYLE DS_SETFONT | DS_FIXEDSYS | WS_CHILD
FONT 8, "MS Shell Dlg", 400, 0, 0x1
BEGIN
EDITTEXT IDC_SIDEBAR_WEATHER_NEW_CITY,3,27,112,14,ES_AUTOHSCROLL
DEFPUSHBUTTON "Add Location",IDC_SIDEBAR_WEATHER_ADD,119,27,50,14
LISTBOX IDC_SIDEBAR_WEATHER_CURR_CITIES,3,48,127,89,
LBS_NOINTEGRALHEIGHT | WS_VSCROLL | WS_TABSTOP
PUSHBUTTON "Move Up",IDC_SIDEBAR_WEATHER_MOVE_UP,134,104,50,14
PUSHBUTTON "Move Down",IDC_SIDEBAR_WEATHER_MOVE_DOWN,134,121,50,14
PUSHBUTTON "Remove",IDC_SIDEBAR_WEATHER_DELETE,134,48,50,14
LTEXT "To see current weather conditions and forecasts in the USA, enter the zip code (example: 94043) or city and state (example: Mountain View, CA).",
IDC_STATIC,3,0,199,25
CONTROL "Fahrenheit",IDC_SIDEBAR_WEATHER_FAHRENHEIT,"Button",
BS_AUTORADIOBUTTON | WS_GROUP | WS_TABSTOP,3,144,51,10
CONTROL "Celsius",IDC_SIDEBAR_WEATHER_CELSIUS,"Button",
BS_AUTORADIOBUTTON,57,144,38,10
END'''.strip()), 'IDD_SIDEBAR_WEATHER_PANEL_PROPPAGE')
dialog.Parse()
self.failUnless(len(dialog.GetTextualIds()) == 10)
def testRegressionDialogBox2(self):
dialog = rc.Dialog(StringIO.StringIO('''
IDD_SIDEBAR_EMAIL_PANEL_PROPPAGE DIALOG DISCARDABLE 0, 0, 264, 220
STYLE WS_CHILD
FONT 8, "MS Shell Dlg"
BEGIN
GROUPBOX "Email Filters",IDC_STATIC,7,3,250,190
LTEXT "Click Add Filter to create the email filter.",IDC_STATIC,16,41,130,9
PUSHBUTTON "Add Filter...",IDC_SIDEBAR_EMAIL_ADD_FILTER,196,38,50,14
PUSHBUTTON "Remove",IDC_SIDEBAR_EMAIL_REMOVE,196,174,50,14
PUSHBUTTON "", IDC_SIDEBAR_EMAIL_HIDDEN, 200, 178, 5, 5, NOT WS_VISIBLE
LISTBOX IDC_SIDEBAR_EMAIL_LIST,16,60,230,108,
LBS_NOINTEGRALHEIGHT | WS_VSCROLL | WS_TABSTOP
LTEXT "You can prevent certain emails from showing up in the sidebar with a filter.",
IDC_STATIC,16,18,234,18
END'''.strip()), 'IDD_SIDEBAR_EMAIL_PANEL_PROPPAGE')
dialog.Parse()
self.failUnless('IDC_SIDEBAR_EMAIL_HIDDEN' in dialog.GetTextualIds())
def testRegressionMenuId(self):
menu = rc.Menu(StringIO.StringIO('''
IDR_HYPERMENU_FOLDER MENU
BEGIN
POPUP "HyperFolder"
BEGIN
MENUITEM "Open Containing Folder", IDM_OPENFOLDER
END
END'''.strip()), 'IDR_HYPERMENU_FOLDER')
menu.Parse()
self.failUnless(len(menu.GetTextualIds()) == 2)
def testRegressionNewlines(self):
menu = rc.Menu(StringIO.StringIO('''
IDR_HYPERMENU_FOLDER MENU
BEGIN
POPUP "Hyper\\nFolder"
BEGIN
MENUITEM "Open Containing Folder", IDM_OPENFOLDER
END
END'''.strip()), 'IDR_HYPERMENU_FOLDER')
menu.Parse()
transl = menu.Translate('en')
# Shouldn't find \\n (the \n shouldn't be changed to \\n)
self.failUnless(transl.find('\\\\n') == -1)
def testRegressionTabs(self):
menu = rc.Menu(StringIO.StringIO('''
IDR_HYPERMENU_FOLDER MENU
BEGIN
POPUP "Hyper\\tFolder"
BEGIN
MENUITEM "Open Containing Folder", IDM_OPENFOLDER
END
END'''.strip()), 'IDR_HYPERMENU_FOLDER')
menu.Parse()
transl = menu.Translate('en')
# Shouldn't find \\t (the \t shouldn't be changed to \\t)
self.failUnless(transl.find('\\\\t') == -1)
def testEscapeUnescape(self):
original = 'Hello "bingo"\n How\\are\\you\\n?'
escaped = rc.Section.Escape(original)
self.failUnless(escaped == 'Hello ""bingo""\\n How\\\\are\\\\you\\\\n?')
unescaped = rc.Section.UnEscape(escaped)
self.failUnless(unescaped == original)
def testRegressionPathsWithSlashN(self):
original = '..\\\\..\\\\trs\\\\res\\\\nav_first.gif'
unescaped = rc.Section.UnEscape(original)
self.failUnless(unescaped == '..\\..\\trs\\res\\nav_first.gif')
def testRegressionDialogItemsTextOnly(self):
dialog = rc.Dialog(StringIO.StringIO('''IDD_OPTIONS_SEARCH DIALOGEX 0, 0, 280, 292
STYLE DS_SETFONT | DS_MODALFRAME | DS_FIXEDSYS | DS_CENTER | WS_POPUP |
WS_DISABLED | WS_CAPTION | WS_SYSMENU
CAPTION "Search"
FONT 8, "MS Shell Dlg", 400, 0, 0x1
BEGIN
GROUPBOX "Select search buttons and options",-1,7,5,266,262
CONTROL "",IDC_OPTIONS,"SysTreeView32",TVS_DISABLEDRAGDROP |
WS_BORDER | WS_TABSTOP | 0x800,16,19,248,218
LTEXT "Use Google site:",-1,26,248,52,8
COMBOBOX IDC_GOOGLE_HOME,87,245,177,256,CBS_DROPDOWNLIST |
WS_VSCROLL | WS_TABSTOP
PUSHBUTTON "Restore Defaults...",IDC_RESET,187,272,86,14
END'''), 'IDD_OPTIONS_SEARCH')
dialog.Parse()
translateables = [c.GetMessage().GetRealContent()
for c in dialog.GetCliques()]
self.failUnless('Select search buttons and options' in translateables)
self.failUnless('Use Google site:' in translateables)
def testAccelerators(self):
acc = rc.Accelerators(StringIO.StringIO('''\
IDR_ACCELERATOR1 ACCELERATORS
BEGIN
"^C", ID_ACCELERATOR32770, ASCII, NOINVERT
"^V", ID_ACCELERATOR32771, ASCII, NOINVERT
VK_INSERT, ID_ACCELERATOR32772, VIRTKEY, CONTROL, NOINVERT
END
'''), 'IDR_ACCELERATOR1')
acc.Parse()
self.failUnless(len(acc.GetTextualIds()) == 4)
self.failUnless(len(acc.GetCliques()) == 0)
transl = acc.Translate('en')
self.failUnless(transl.strip() == acc.GetText().strip())
def testRegressionEmptyString(self):
dlg = rc.Dialog(StringIO.StringIO('''\
IDD_CONFIRM_QUIT_GD_DLG DIALOGEX 0, 0, 267, 108
STYLE DS_SETFONT | DS_MODALFRAME | DS_FIXEDSYS | DS_CENTER | WS_POPUP |
WS_CAPTION
EXSTYLE WS_EX_TOPMOST
CAPTION "Google Desktop"
FONT 8, "MS Shell Dlg", 400, 0, 0x1
BEGIN
DEFPUSHBUTTON "&Yes",IDYES,82,87,50,14
PUSHBUTTON "&No",IDNO,136,87,50,14
ICON 32514,IDC_STATIC,7,9,21,20
EDITTEXT IDC_TEXTBOX,34,7,231,60,ES_MULTILINE | ES_READONLY | NOT WS_BORDER
CONTROL "",
IDC_ENABLE_GD_AUTOSTART,"Button",BS_AUTOCHECKBOX |
WS_TABSTOP,33,70,231,10
END'''), 'IDD_CONFIRM_QUIT_GD_DLG')
dlg.Parse()
def Check():
self.failUnless(transl.count('IDC_ENABLE_GD_AUTOSTART'))
self.failUnless(transl.count('END'))
transl = dlg.Translate('de', pseudo_if_not_available=True,
fallback_to_english=True)
Check()
transl = dlg.Translate('de', pseudo_if_not_available=True,
fallback_to_english=False)
Check()
transl = dlg.Translate('de', pseudo_if_not_available=False,
fallback_to_english=True)
Check()
transl = dlg.Translate('de', pseudo_if_not_available=False,
fallback_to_english=False)
Check()
transl = dlg.Translate('en', pseudo_if_not_available=True,
fallback_to_english=True)
Check()
transl = dlg.Translate('en', pseudo_if_not_available=True,
fallback_to_english=False)
Check()
transl = dlg.Translate('en', pseudo_if_not_available=False,
fallback_to_english=True)
Check()
transl = dlg.Translate('en', pseudo_if_not_available=False,
fallback_to_english=False)
Check()
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
ankur-gupta91/horizon-net-ip | openstack_dashboard/dashboards/admin/volumes/snapshots/views.py | 62 | 2756 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django.core.urlresolvers import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon.utils import memoized
from openstack_dashboard.api import cinder
from openstack_dashboard.dashboards.admin.volumes.snapshots \
import forms as vol_snapshot_forms
from openstack_dashboard.dashboards.admin.volumes.snapshots \
import tabs as vol_snapshot_tabs
from openstack_dashboard.dashboards.project.volumes.snapshots \
import views
class UpdateStatusView(forms.ModalFormView):
form_class = vol_snapshot_forms.UpdateStatus
modal_header = _("Update Volume Snapshot Status")
modal_id = "update_volume_snapshot_status"
template_name = 'admin/volumes/snapshots/update_status.html'
submit_label = _("Update Status")
submit_url = "horizon:admin:volumes:snapshots:update_status"
success_url = reverse_lazy("horizon:admin:volumes:snapshots_tab")
page_title = _("Update Volume Snapshot Status")
@memoized.memoized_method
def get_object(self):
snap_id = self.kwargs['snapshot_id']
try:
self._object = cinder.volume_snapshot_get(self.request,
snap_id)
except Exception:
exceptions.handle(self.request,
_('Unable to retrieve volume snapshot.'),
redirect=self.success_url)
return self._object
def get_context_data(self, **kwargs):
context = super(UpdateStatusView, self).get_context_data(**kwargs)
context['snapshot_id'] = self.kwargs["snapshot_id"]
args = (self.kwargs['snapshot_id'],)
context['submit_url'] = reverse(self.submit_url, args=args)
return context
def get_initial(self):
snapshot = self.get_object()
return {'snapshot_id': self.kwargs["snapshot_id"],
'status': snapshot.status}
class DetailView(views.DetailView):
tab_group_class = vol_snapshot_tabs.SnapshotDetailsTabs
@staticmethod
def get_redirect_url():
return reverse('horizon:admin:volumes:index')
| apache-2.0 |
Jgarcia-IAS/SAT | openerp/addons/sale_crm/__openerp__.py | 260 | 2036 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Opportunity to Quotation',
'version': '1.0',
'category': 'Hidden',
'description': """
This module adds a shortcut on one or several opportunity cases in the CRM.
===========================================================================
This shortcut allows you to generate a sales order based on the selected case.
If different cases are open (a list), it generates one sale order by case.
The case is then closed and linked to the generated sales order.
We suggest you to install this module, if you installed both the sale and the crm
modules.
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/crm',
'depends': ['sale', 'crm', 'web_kanban_gauge'],
'data': [
'wizard/crm_make_sale_view.xml',
'sale_crm_view.xml',
'security/sale_crm_security.xml',
'security/ir.model.access.csv',
],
'demo': [],
'test': ['test/sale_crm.yml'],
'installable': True,
'auto_install': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
mathemage/h2o-3 | h2o-py/tests/testdir_algos/glrm/pyunit_NOPASS_simplexGLRM.py | 6 | 2989 | from __future__ import print_function
from builtins import str
from builtins import range
import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.glrm import H2OGeneralizedLowRankEstimator
import numpy as np
def glrm_simplex():
m = 1000
n = 100
k = 10
print("Uploading random uniform matrix with rows = " + str(m) + " and cols = " + str(n))
Y = np.random.rand(k,n)
def ind_list(k):
tmp = [0] * k
tmp[np.random.randint(0,k)] = 1
return tmp
X = [ind_list(k) for x in range(m)]
X = np.array(X)
train = np.dot(X,Y)
train_h2o = h2o.H2OFrame(list(zip(*train.tolist())))
print("Run GLRM with quadratic mixtures (simplex) regularization on X")
initial_y = np.random.rand(n,k)
initial_y_h2o = h2o.H2OFrame(initial_y.tolist())
glrm_h2o = H2OGeneralizedLowRankEstimator(k=k, init="User", user_y=initial_y_h2o, loss="Quadratic", regularization_x="Simplex", regularization_y="None", gamma_x=1, gamma_y=0)
glrm_h2o.train(x=train_h2o.names,training_frame=train_h2o)
# glrm_h2o = h2o.glrm(x=train_h2o, k=k, init="User", user_y=initial_y_h2o, loss="Quadratic", regularization_x="Simplex", regularization_y="None", gamma_x=1, gamma_y=0)
glrm_h2o.show()
print("Check that X matrix consists of rows within standard probability simplex")
fit_x = h2o.get_frame(glrm_h2o._model_json['output']['representation_name'])
fit_x_np = np.array(h2o.as_list(fit_x))
def is_simplex(a):
row_sum = sum(a)
simplex = abs(row_sum - 1) < 1e-6
assert simplex, "Got sum over row = " + row_sum + ", but expected 1"
return simplex
np.apply_along_axis(is_simplex, 1, fit_x_np)
print("Check final objective function value")
fit_y = glrm_h2o._model_json['output']['archetypes'].cell_values
fit_y_np = [[float(s) for s in list(row)[1:]] for row in fit_y]
fit_y_np = np.array(fit_y_np)
fit_xy = np.dot(fit_x_np, fit_y_np)
glrm_obj = glrm_h2o._model_json['output']['objective']
sse = np.sum(np.square(train.__sub__(fit_xy)))
assert abs(glrm_obj - sse) < 1e-6, "Final objective was " + str(glrm_obj) + " but should equal " + str(sse)
print("Impute XY and check error metrics")
pred_h2o = glrm_h2o.predict(train_h2o)
pred_np = np.array(h2o.as_list(pred_h2o))
assert np.allclose(pred_np, fit_xy), "Imputation for numerics with quadratic loss should equal XY product"
glrm_numerr = glrm_h2o._model_json['output']['training_metrics']._metric_json['numerr']
glrm_caterr = glrm_h2o._model_json['output']['training_metrics']._metric_json['caterr']
assert abs(glrm_numerr - glrm_obj) < 1e-3, "Numeric error was " + str(glrm_numerr) + " but should equal final objective " + str(glrm_obj)
assert glrm_caterr == 0, "Categorical error was " + str(glrm_caterr) + " but should be zero"
if __name__ == "__main__":
pyunit_utils.standalone_test(glrm_simplex)
else:
glrm_simplex()
| apache-2.0 |
Venturi/cms | env/lib/python2.7/site-packages/cms/tests/test_navextender.py | 35 | 3568 | # -*- coding: utf-8 -*-
from __future__ import with_statement
from cms.models import Page
from cms.test_utils.fixtures.navextenders import NavextendersFixture
from cms.test_utils.testcases import CMSTestCase
from cms.test_utils.util.menu_extender import TestMenu
from django.conf import settings
from django.template import Template
from menus.menu_pool import menu_pool
class NavExtenderTestCase(NavextendersFixture, CMSTestCase):
"""
Tree from fixture:
page1
page2
page3
page4
page5
"""
def setUp(self):
if not menu_pool.discovered:
menu_pool.discover_menus()
self.old_menu = menu_pool.menus
# NOTE: if we're going to directly manipulate this menu pool, we should
# at least be marking it as not _expanded.
menu_pool.menus = {
'CMSMenu': self.old_menu['CMSMenu'],
'TestMenu': TestMenu
}
menu_pool._expanded = False
def tearDown(self):
menu_pool.menus = self.old_menu
def _get_page(self, num):
return Page.objects.get(title_set__title='page%s' % num)
def _update_page(self, num, **stuff):
Page.objects.filter(title_set__title='page%s' % num).update(**stuff)
def test_menu_registration(self):
self.assertEqual(len(menu_pool.menus), 2)
self.assertEqual(len(menu_pool.modifiers) >= 4, True)
def test_extenders_on_root(self):
self._update_page(1, navigation_extenders="TestMenu")
menu_pool.clear(settings.SITE_ID)
context = self.get_context()
tpl = Template("{% load menu_tags %}{% show_menu 0 100 100 100 %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(len(nodes), 2)
self.assertEqual(len(nodes[0].children), 4)
self.assertEqual(len(nodes[0].children[3].children), 1)
self._update_page(1, in_navigation=False)
menu_pool.clear(settings.SITE_ID)
tpl = Template("{% load menu_tags %}{% show_menu %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(len(nodes), 5)
def test_extenders_on_root_child(self):
self._update_page(4, navigation_extenders="TestMenu")
menu_pool.clear(settings.SITE_ID)
context = self.get_context()
tpl = Template("{% load menu_tags %}{% show_menu 0 100 100 100 %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(len(nodes), 2)
self.assertEqual(len(nodes[1].children), 4)
def test_extenders_on_child(self):
"""
TestMenu has 4 flat nodes
"""
self._update_page(1, in_navigation=False)
self._update_page(2, navigation_extenders="TestMenu")
menu_pool.clear(settings.SITE_ID)
menu_pool.clear(settings.SITE_ID)
context = self.get_context()
tpl = Template("{% load menu_tags %}{% show_menu 0 100 100 100 %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(len(nodes), 2)
self.assertEqual(len(nodes[0].children), 4)
self.assertEqual(nodes[0].children[1].get_absolute_url(), "/")
def test_incorrect_nav_extender_in_db(self):
self._update_page(2, navigation_extenders="SomethingWrong")
menu_pool.clear(settings.SITE_ID)
context = self.get_context()
tpl = Template("{% load menu_tags %}{% show_menu %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(len(nodes), 2)
| gpl-2.0 |
veger/ansible | lib/ansible/modules/network/ftd/ftd_file_download.py | 7 | 4441 | #!/usr/bin/python
# Copyright (c) 2018 Cisco and/or its affiliates.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: ftd_file_download
short_description: Downloads files from Cisco FTD devices over HTTP(S)
description:
- Downloads files from Cisco FTD devices including pending changes, disk files, certificates,
troubleshoot reports, and backups.
version_added: "2.7"
author: "Cisco Systems, Inc. (@annikulin)"
options:
operation:
description:
- The name of the operation to execute.
- Only operations that return a file can be used in this module.
required: true
type: str
path_params:
description:
- Key-value pairs that should be sent as path parameters in a REST API call.
type: dict
destination:
description:
- Absolute path of where to download the file to.
- If destination is a directory, the module uses a filename from 'Content-Disposition' header specified by the server.
required: true
type: path
"""
EXAMPLES = """
- name: Download pending changes
ftd_file_download:
operation: 'getdownload'
path_params:
objId: 'default'
destination: /tmp/
"""
RETURN = """
msg:
description: The error message describing why the module failed.
returned: error
type: string
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.ftd.common import FtdServerError, HTTPMethod
from ansible.module_utils.network.ftd.fdm_swagger_client import OperationField, ValidationError, FILE_MODEL_NAME
def is_download_operation(op_spec):
return op_spec[OperationField.METHOD] == HTTPMethod.GET and op_spec[OperationField.MODEL_NAME] == FILE_MODEL_NAME
def validate_params(connection, op_name, path_params):
field_name = 'Invalid path_params provided'
try:
is_valid, validation_report = connection.validate_path_params(op_name, path_params)
if not is_valid:
raise ValidationError({
field_name: validation_report
})
except Exception as e:
raise ValidationError({
field_name: str(e)
})
def main():
fields = dict(
operation=dict(type='str', required=True),
path_params=dict(type='dict'),
destination=dict(type='path', required=True)
)
module = AnsibleModule(argument_spec=fields,
supports_check_mode=True)
params = module.params
connection = Connection(module._socket_path)
op_name = params['operation']
op_spec = connection.get_operation_spec(op_name)
if op_spec is None:
module.fail_json(msg='Operation with specified name is not found: %s' % op_name)
if not is_download_operation(op_spec):
module.fail_json(
msg='Invalid download operation: %s. The operation must make GET request and return a file.' %
op_name)
try:
path_params = params['path_params']
validate_params(connection, op_name, path_params)
if module.check_mode:
module.exit_json(changed=False)
connection.download_file(op_spec[OperationField.URL], params['destination'], path_params)
module.exit_json(changed=False)
except FtdServerError as e:
module.fail_json(msg='Download request for %s operation failed. Status code: %s. '
'Server response: %s' % (op_name, e.code, e.response))
except ValidationError as e:
module.fail_json(msg=e.args[0])
if __name__ == '__main__':
main()
| gpl-3.0 |
Changaco/oh-mainline | vendor/packages/gdata/src/gdata/tlslite/utils/jython_compat.py | 358 | 5270 | """Miscellaneous functions to mask Python/Jython differences."""
import os
import sha
if os.name != "java":
BaseException = Exception
from sets import Set
import array
import math
def createByteArraySequence(seq):
return array.array('B', seq)
def createByteArrayZeros(howMany):
return array.array('B', [0] * howMany)
def concatArrays(a1, a2):
return a1+a2
def bytesToString(bytes):
return bytes.tostring()
def stringToBytes(s):
bytes = createByteArrayZeros(0)
bytes.fromstring(s)
return bytes
def numBits(n):
if n==0:
return 0
return int(math.floor(math.log(n, 2))+1)
class CertChainBase: pass
class SelfTestBase: pass
class ReportFuncBase: pass
#Helper functions for working with sets (from Python 2.3)
def iterSet(set):
return iter(set)
def getListFromSet(set):
return list(set)
#Factory function for getting a SHA1 object
def getSHA1(s):
return sha.sha(s)
import sys
import traceback
def formatExceptionTrace(e):
newStr = "".join(traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback))
return newStr
else:
#Jython 2.1 is missing lots of python 2.3 stuff,
#which we have to emulate here:
import java
import jarray
BaseException = java.lang.Exception
def createByteArraySequence(seq):
if isinstance(seq, type("")): #If it's a string, convert
seq = [ord(c) for c in seq]
return jarray.array(seq, 'h') #use short instead of bytes, cause bytes are signed
def createByteArrayZeros(howMany):
return jarray.zeros(howMany, 'h') #use short instead of bytes, cause bytes are signed
def concatArrays(a1, a2):
l = list(a1)+list(a2)
return createByteArraySequence(l)
#WAY TOO SLOW - MUST BE REPLACED------------
def bytesToString(bytes):
return "".join([chr(b) for b in bytes])
def stringToBytes(s):
bytes = createByteArrayZeros(len(s))
for count, c in enumerate(s):
bytes[count] = ord(c)
return bytes
#WAY TOO SLOW - MUST BE REPLACED------------
def numBits(n):
if n==0:
return 0
n= 1L * n; #convert to long, if it isn't already
return n.__tojava__(java.math.BigInteger).bitLength()
#This properly creates static methods for Jython
class staticmethod:
def __init__(self, anycallable): self.__call__ = anycallable
#Properties are not supported for Jython
class property:
def __init__(self, anycallable): pass
#True and False have to be specially defined
False = 0
True = 1
class StopIteration(Exception): pass
def enumerate(collection):
return zip(range(len(collection)), collection)
class Set:
def __init__(self, seq=None):
self.values = {}
if seq:
for e in seq:
self.values[e] = None
def add(self, e):
self.values[e] = None
def discard(self, e):
if e in self.values.keys():
del(self.values[e])
def union(self, s):
ret = Set()
for e in self.values.keys():
ret.values[e] = None
for e in s.values.keys():
ret.values[e] = None
return ret
def issubset(self, other):
for e in self.values.keys():
if e not in other.values.keys():
return False
return True
def __nonzero__( self):
return len(self.values.keys())
def __contains__(self, e):
return e in self.values.keys()
def iterSet(set):
return set.values.keys()
def getListFromSet(set):
return set.values.keys()
"""
class JCE_SHA1:
def __init__(self, s=None):
self.md = java.security.MessageDigest.getInstance("SHA1")
if s:
self.update(s)
def update(self, s):
self.md.update(s)
def copy(self):
sha1 = JCE_SHA1()
sha1.md = self.md.clone()
return sha1
def digest(self):
digest = self.md.digest()
bytes = jarray.zeros(20, 'h')
for count in xrange(20):
x = digest[count]
if x < 0: x += 256
bytes[count] = x
return bytes
"""
#Factory function for getting a SHA1 object
#The JCE_SHA1 class is way too slow...
#the sha.sha object we use instead is broken in the jython 2.1
#release, and needs to be patched
def getSHA1(s):
#return JCE_SHA1(s)
return sha.sha(s)
#Adjust the string to an array of bytes
def stringToJavaByteArray(s):
bytes = jarray.zeros(len(s), 'b')
for count, c in enumerate(s):
x = ord(c)
if x >= 128: x -= 256
bytes[count] = x
return bytes
import sys
import traceback
def formatExceptionTrace(e):
newStr = "".join(traceback.format_exception(sys.exc_type, sys.exc_value, sys.exc_traceback))
return newStr
| agpl-3.0 |
takis/odoo | openerp/addons/test_converter/tests/test_html.py | 257 | 13533 | # -*- encoding: utf-8 -*-
import json
import os
import datetime
from lxml import etree
from openerp.tests import common
from openerp.tools import html_escape as e
from openerp.addons.base.ir import ir_qweb
directory = os.path.dirname(__file__)
class TestExport(common.TransactionCase):
_model = None
def setUp(self):
super(TestExport, self).setUp()
self.Model = self.registry(self._model)
def get_field(self, name):
return self.Model._fields[name]
def get_converter(self, name, type=None):
field = self.get_field(name)
for postfix in type, field.type, '':
fs = ['ir', 'qweb', 'field']
if postfix is None: continue
if postfix: fs.append(postfix)
try:
model = self.registry('.'.join(fs))
break
except KeyError: pass
return lambda value, options=None, context=None: e(model.value_to_html(
self.cr, self.uid, value, field, options=options, context=context))
class TestBasicExport(TestExport):
_model = 'test_converter.test_model'
class TestCharExport(TestBasicExport):
def test_char(self):
converter = self.get_converter('char')
value = converter('foo')
self.assertEqual(value, 'foo')
value = converter("foo<bar>")
self.assertEqual(value, "foo<bar>")
class TestIntegerExport(TestBasicExport):
def test_integer(self):
converter = self.get_converter('integer')
value = converter(42)
self.assertEqual(value, "42")
class TestFloatExport(TestBasicExport):
def setUp(self):
super(TestFloatExport, self).setUp()
self.registry('res.lang').write(self.cr, self.uid, [1], {
'grouping': '[3,0]'
})
def test_float(self):
converter = self.get_converter('float')
value = converter(42.0)
self.assertEqual(value, "42.0")
value = converter(42.0100)
self.assertEqual(value, "42.01")
value = converter(42.01234)
self.assertEqual(value, "42.01234")
value = converter(1234567.89)
self.assertEqual(value, '1,234,567.89')
def test_numeric(self):
converter = self.get_converter('numeric')
value = converter(42.0)
self.assertEqual(value, '42.00')
value = converter(42.01234)
self.assertEqual(value, '42.01')
class TestCurrencyExport(TestExport):
_model = 'test_converter.monetary'
def setUp(self):
super(TestCurrencyExport, self).setUp()
self.Currency = self.registry('res.currency')
self.base = self.create(self.Currency, name="Source", symbol=u'source')
def create(self, model, context=None, **values):
return model.browse(
self.cr, self.uid,
model.create(self.cr, self.uid, values, context=context),
context=context)
def convert(self, obj, dest):
converter = self.registry('ir.qweb.field.monetary')
options = {
'widget': 'monetary',
'display_currency': 'c2'
}
context = dict(inherit_branding=True)
converted = converter.to_html(
self.cr, self.uid, 'value', obj, options,
etree.Element('span'),
{'field': 'obj.value', 'field-options': json.dumps(options)},
'', ir_qweb.QWebContext(self.cr, self.uid, {'obj': obj, 'c2': dest, }),
context=context,
)
return converted
def test_currency_post(self):
currency = self.create(self.Currency, name="Test", symbol=u"test")
obj = self.create(self.Model, value=0.12)
converted = self.convert(obj, dest=currency)
self.assertEqual(
converted,
'<span data-oe-model="{obj._model._name}" data-oe-id="{obj.id}" '
'data-oe-field="value" data-oe-type="monetary" '
'data-oe-expression="obj.value">'
'<span class="oe_currency_value">0.12</span>'
u'\N{NO-BREAK SPACE}{symbol}</span>'.format(
obj=obj,
symbol=currency.symbol.encode('utf-8')
).encode('utf-8'),)
def test_currency_pre(self):
currency = self.create(
self.Currency, name="Test", symbol=u"test", position='before')
obj = self.create(self.Model, value=0.12)
converted = self.convert(obj, dest=currency)
self.assertEqual(
converted,
'<span data-oe-model="{obj._model._name}" data-oe-id="{obj.id}" '
'data-oe-field="value" data-oe-type="monetary" '
'data-oe-expression="obj.value">'
u'{symbol}\N{NO-BREAK SPACE}'
'<span class="oe_currency_value">0.12</span>'
'</span>'.format(
obj=obj,
symbol=currency.symbol.encode('utf-8')
).encode('utf-8'),)
def test_currency_precision(self):
""" Precision should be the currency's, not the float field's
"""
currency = self.create(self.Currency, name="Test", symbol=u"test",)
obj = self.create(self.Model, value=0.1234567)
converted = self.convert(obj, dest=currency)
self.assertEqual(
converted,
'<span data-oe-model="{obj._model._name}" data-oe-id="{obj.id}" '
'data-oe-field="value" data-oe-type="monetary" '
'data-oe-expression="obj.value">'
'<span class="oe_currency_value">0.12</span>'
u'\N{NO-BREAK SPACE}{symbol}</span>'.format(
obj=obj,
symbol=currency.symbol.encode('utf-8')
).encode('utf-8'),)
class TestTextExport(TestBasicExport):
def test_text(self):
converter = self.get_converter('text')
value = converter("This is my text-kai")
self.assertEqual(value, "This is my text-kai")
value = converter("""
. The current line (address) in the buffer.
$ The last line in the buffer.
n The nth, line in the buffer where n is a number in the range [0,$].
$ The last line in the buffer.
- The previous line. This is equivalent to -1 and may be repeated with cumulative effect.
-n The nth previous line, where n is a non-negative number.
+ The next line. This is equivalent to +1 and may be repeated with cumulative effect.
""")
self.assertEqual(value, """<br>
. The current line (address) in the buffer.<br>
$ The last line in the buffer.<br>
n The nth, line in the buffer where n is a number in the range [0,$].<br>
$ The last line in the buffer.<br>
- The previous line. This is equivalent to -1 and may be repeated with cumulative effect.<br>
-n The nth previous line, where n is a non-negative number.<br>
+ The next line. This is equivalent to +1 and may be repeated with cumulative effect.<br>
""")
value = converter("""
fgdkls;hjas;lj <b>fdslkj</b> d;lasjfa lkdja <a href=http://spam.com>lfks</a>
fldkjsfhs <i style="color: red"><a href="http://spamspam.com">fldskjh</a></i>
""")
self.assertEqual(value, """<br>
fgdkls;hjas;lj <b>fdslkj</b> d;lasjfa lkdja <a href=http://spam.com>lfks</a><br>
fldkjsfhs <i style="color: red"><a href="http://spamspam.com">fldskjh</a></i><br>
""")
class TestMany2OneExport(TestBasicExport):
def test_many2one(self):
Sub = self.registry('test_converter.test_model.sub')
id0 = self.Model.create(self.cr, self.uid, {
'many2one': Sub.create(self.cr, self.uid, {'name': "Foo"})
})
id1 = self.Model.create(self.cr, self.uid, {
'many2one': Sub.create(self.cr, self.uid, {'name': "Fo<b>o</b>"})
})
def converter(record):
model = self.registry('ir.qweb.field.many2one')
return e(model.record_to_html(self.cr, self.uid, 'many2one', record))
value = converter(self.Model.browse(self.cr, self.uid, id0))
self.assertEqual(value, "Foo")
value = converter(self.Model.browse(self.cr, self.uid, id1))
self.assertEqual(value, "Fo<b>o</b>")
class TestBinaryExport(TestBasicExport):
def test_image(self):
field = self.get_field('binary')
converter = self.registry('ir.qweb.field.image')
with open(os.path.join(directory, 'test_vectors', 'image'), 'rb') as f:
content = f.read()
encoded_content = content.encode('base64')
value = e(converter.value_to_html(
self.cr, self.uid, encoded_content, field))
self.assertEqual(
value, '<img src="data:image/jpeg;base64,%s">' % (
encoded_content
))
with open(os.path.join(directory, 'test_vectors', 'pdf'), 'rb') as f:
content = f.read()
with self.assertRaises(ValueError):
e(converter.value_to_html(
self.cr, self.uid, 'binary', content.encode('base64'), field))
with open(os.path.join(directory, 'test_vectors', 'pptx'), 'rb') as f:
content = f.read()
with self.assertRaises(ValueError):
e(converter.value_to_html(
self.cr, self.uid, 'binary', content.encode('base64'), field))
class TestSelectionExport(TestBasicExport):
def test_selection(self):
[record] = self.Model.browse(self.cr, self.uid, [self.Model.create(self.cr, self.uid, {
'selection': 2,
'selection_str': 'C',
})])
converter = self.registry('ir.qweb.field.selection')
field_name = 'selection'
value = converter.record_to_html(self.cr, self.uid, field_name, record)
self.assertEqual(value, "réponse B")
field_name = 'selection_str'
value = converter.record_to_html(self.cr, self.uid, field_name, record)
self.assertEqual(value, "Qu'est-ce qu'il fout ce maudit pancake, tabernacle ?")
class TestHTMLExport(TestBasicExport):
def test_html(self):
converter = self.get_converter('html')
input = '<span>span</span>'
value = converter(input)
self.assertEqual(value, input)
class TestDatetimeExport(TestBasicExport):
def setUp(self):
super(TestDatetimeExport, self).setUp()
# set user tz to known value
Users = self.registry('res.users')
Users.write(self.cr, self.uid, self.uid, {
'tz': 'Pacific/Niue'
}, context=None)
def test_date(self):
converter = self.get_converter('date')
value = converter('2011-05-03')
# default lang/format is US
self.assertEqual(value, '05/03/2011')
def test_datetime(self):
converter = self.get_converter('datetime')
value = converter('2011-05-03 11:12:13')
# default lang/format is US
self.assertEqual(value, '05/03/2011 00:12:13')
def test_custom_format(self):
converter = self.get_converter('datetime')
converter2 = self.get_converter('date')
opts = {'format': 'MMMM d'}
value = converter('2011-03-02 11:12:13', options=opts)
value2 = converter2('2001-03-02', options=opts)
self.assertEqual(
value,
'March 2'
)
self.assertEqual(
value2,
'March 2'
)
class TestDurationExport(TestBasicExport):
def setUp(self):
super(TestDurationExport, self).setUp()
# needs to have lang installed otherwise falls back on en_US
self.registry('res.lang').load_lang(self.cr, self.uid, 'fr_FR')
def test_negative(self):
converter = self.get_converter('float', 'duration')
with self.assertRaises(ValueError):
converter(-4)
def test_missing_unit(self):
converter = self.get_converter('float', 'duration')
with self.assertRaises(ValueError):
converter(4)
def test_basic(self):
converter = self.get_converter('float', 'duration')
result = converter(4, {'unit': 'hour'}, {'lang': 'fr_FR'})
self.assertEqual(result, u'4 heures')
result = converter(50, {'unit': 'second'}, {'lang': 'fr_FR'})
self.assertEqual(result, u'50 secondes')
def test_multiple(self):
converter = self.get_converter('float', 'duration')
result = converter(1.5, {'unit': 'hour'}, {'lang': 'fr_FR'})
self.assertEqual(result, u"1 heure 30 minutes")
result = converter(72, {'unit': 'second'}, {'lang': 'fr_FR'})
self.assertEqual(result, u"1 minute 12 secondes")
class TestRelativeDatetime(TestBasicExport):
# not sure how a test based on "current time" should be tested. Even less
# so as it would mostly be a test of babel...
def setUp(self):
super(TestRelativeDatetime, self).setUp()
# needs to have lang installed otherwise falls back on en_US
self.registry('res.lang').load_lang(self.cr, self.uid, 'fr_FR')
def test_basic(self):
converter = self.get_converter('datetime', 'relative')
t = datetime.datetime.utcnow() - datetime.timedelta(hours=1)
result = converter(t, context={'lang': 'fr_FR'})
self.assertEqual(result, u"il y a 1 heure")
| agpl-3.0 |
flacjacket/sympy | sympy/functions/elementary/tests/test_complexes.py | 2 | 13017 | from sympy import (symbols, Symbol, sqrt, oo, re, nan, im, sign, I, E, log,
pi, arg, conjugate, expand, exp, sin, cos, Function, Abs, zoo, atan2,
S, DiracDelta, Rational, Heaviside)
from sympy.utilities.pytest import XFAIL
from sympy.utilities.randtest import comp
def N_equals(a, b):
"""Check whether two complex numbers are numerically close"""
return comp(a.n(), b.n(), 1.e-6)
def test_re():
x, y = symbols('x,y')
a, b = symbols('a,b', real=True)
r = Symbol('r', real=True)
i = Symbol('i', imaginary=True)
assert re(nan) == nan
assert re(oo) == oo
assert re(-oo) == -oo
assert re(0) == 0
assert re(1) == 1
assert re(-1) == -1
assert re(E) == E
assert re(-E) == -E
assert re(x) == re(x)
assert re(x*I) == -im(x)
assert re(r*I) == 0
assert re(r) == r
assert re(i*I) == I * i
assert re(i) == 0
assert re(x + y) == re(x + y)
assert re(x + r) == re(x) + r
assert re(re(x)) == re(x)
assert re(2 + I) == 2
assert re(x + I) == re(x)
assert re(x + y*I) == re(x) - im(y)
assert re(x + r*I) == re(x)
assert re(log(2*I)) == log(2)
assert re((2+I)**2).expand(complex=True) == 3
assert re(conjugate(x)) == re(x)
assert conjugate(re(x)) == re(x)
assert re(x).as_real_imag() == (re(x), 0)
assert re(i*r*x).diff(r) == re(i*x)
assert re(i*r*x).diff(i) == -I * im(r*x)
assert re(sqrt(a + b*I)) == (a**2 + b**2)**Rational(1,4)*cos(atan2(b, a)/2)
assert re(a * (2 + b*I)) == 2*a
assert re((1 + sqrt(a + b*I))/2) == \
(a**2 + b**2)**Rational(1,4)*cos(atan2(b, a)/2)/2 + Rational(1,2)
def test_im():
x, y = symbols('x,y')
a, b = symbols('a,b', real=True)
r = Symbol('r', real=True)
i = Symbol('i', imaginary=True)
assert im(nan) == nan
assert im(oo*I) == oo
assert im(-oo*I) == -oo
assert im(0) == 0
assert im(1) == 0
assert im(-1) == 0
assert im(E*I) == E
assert im(-E*I) == -E
assert im(x) == im(x)
assert im(x*I) == re(x)
assert im(r*I) == r
assert im(r) == 0
assert im(i*I) == 0
assert im(i) == -I * i
assert im(x + y) == im(x + y)
assert im(x + r) == im(x)
assert im(x + r*I) == im(x) + r
assert im(im(x)*I) == im(x)
assert im(2 + I) == 1
assert im(x + I) == im(x) + 1
assert im(x + y*I) == im(x) + re(y)
assert im(x + r*I) == im(x) + r
assert im(log(2*I)) == pi/2
assert im((2+I)**2).expand(complex=True) == 4
assert im(conjugate(x)) == -im(x)
assert conjugate(im(x)) == im(x)
assert im(x).as_real_imag() == (im(x), 0)
assert im(i*r*x).diff(r) == im(i*x)
assert im(i*r*x).diff(i) == -I * re(r*x)
assert im(sqrt(a + b*I)) == (a**2 + b**2)**Rational(1,4)*sin(atan2(b, a)/2)
assert im(a * (2 + b*I)) == a*b
assert im((1 + sqrt(a + b*I))/2) == \
(a**2 + b**2)**Rational(1,4)*sin(atan2(b, a)/2)/2
def test_sign():
assert sign(1.2) == 1
assert sign(-1.2) == -1
assert sign(3*I) == I
assert sign(-3*I) == -I
assert sign(0) == 0
assert sign(nan) == nan
x = Symbol('x')
assert sign(x).is_zero == None
assert sign(x).doit() == sign(x)
assert sign(1.2*x) == sign(x)
assert sign(2*x) == sign(x)
assert sign(I*x) == I*sign(x)
assert sign(-2*I*x) == -I*sign(x)
assert sign(conjugate(x)) == conjugate(sign(x))
p = Symbol('p', positive = True)
n = Symbol('n', negative = True)
m = Symbol('m', negative = True)
assert sign(2*p*x) == sign(x)
assert sign(n*x) == -sign(x)
assert sign(n*m*x) == sign(x)
x = Symbol('x', imaginary=True)
assert sign(x).is_zero == False
assert sign(x).diff(x) == 2*DiracDelta(-I*x)
assert sign(x).doit() == x / Abs(x)
assert conjugate(sign(x)) == -sign(x)
x = Symbol('x', real=True)
assert sign(x).is_zero == None
assert sign(x).diff(x) == 2*DiracDelta(x)
assert sign(x).doit() == sign(x)
assert conjugate(sign(x)) == sign(x)
x = Symbol('x', nonzero=True)
assert sign(x).is_zero == False
assert sign(x).doit() == x / Abs(x)
assert sign(Abs(x)) == 1
assert Abs(sign(x)) == 1
x = Symbol('x', positive=True)
assert sign(x).is_zero == False
assert sign(x).doit() == x / Abs(x)
assert sign(Abs(x)) == 1
assert Abs(sign(x)) == 1
x = 0
assert sign(x).is_zero == True
assert sign(x).doit() == 0
assert sign(Abs(x)) == 0
assert Abs(sign(x)) == 0
nz = Symbol('nz', nonzero=True, integer=True)
assert sign(nz)**2 == 1
assert (sign(nz)**3).args == (sign(nz), 3)
def test_as_real_imag():
n = pi**1000
# the special code for working out the real
# and complex parts of a power with Integer exponent
# should not run if there is no imaginary part, hence
# this should not hang
assert n.as_real_imag() == (n, 0)
# issue 3162
x = Symbol('x')
assert sqrt(x).as_real_imag() == \
((re(x)**2 + im(x)**2)**(S(1)/4)*cos(atan2(im(x), re(x))/2), \
(re(x)**2 + im(x)**2)**(S(1)/4)*sin(atan2(im(x), re(x))/2))
# issue 754
a, b = symbols('a,b', real=True)
assert ((1 + sqrt(a + b*I))/2).as_real_imag() == \
((a**2 + b**2)**Rational(1,4)*cos(atan2(b, a)/2)/2 + Rational(1,2), \
(a**2 + b**2)**Rational(1,4)*sin(atan2(b, a)/2)/2)
@XFAIL
def test_sign_issue_3068():
n = pi**1000
i = int(n)
assert (n - i).round() == 1 # doesn't hang
assert sign(n - i) == 1
# perhaps it's not possible to get the sign right when
# only 1 digit is being requested for this situtation;
# 2 digits works
assert (n - x).n(1, subs={x: i}) > 0
assert (n - x).n(2, subs={x: i}) > 0
def test_Abs():
x, y = symbols('x,y')
assert sign(sign(x)) == sign(x)
assert sign(x*y).func is sign
assert Abs(0) == 0
assert Abs(1) == 1
assert Abs(-1) == 1
assert Abs(I) == 1
assert Abs(-I) == 1
assert Abs(nan) == nan
assert Abs(I * pi) == pi
assert Abs(-I * pi) == pi
assert Abs(I * x) == Abs(x)
assert Abs(-I * x) == Abs(x)
assert Abs(-2*x) == 2*Abs(x)
assert Abs(-2.0*x) == 2.0*Abs(x)
assert Abs(2*pi*x*y) == 2*pi*Abs(x*y)
assert Abs(conjugate(x)) == Abs(x)
assert conjugate(Abs(x)) == Abs(x)
a = Symbol('a', positive=True)
assert Abs(2*pi*x*a) == 2*pi*a*Abs(x)
assert Abs(2*pi*I*x*a) == 2*pi*a*Abs(x)
x = Symbol('x', real=True)
n = Symbol('n', integer=True)
assert x**(2*n) == Abs(x)**(2*n)
assert Abs(x).diff(x) == sign(x)
assert abs(x) == Abs(x) # Python built-in
assert Abs(x)**3 == x**2*Abs(x)
assert Abs(x)**4 == x**4
assert (Abs(x)**(3*n)).args == (Abs(x), 3*n) # leave symbolic odd unchanged
assert (1/Abs(x)).args == (Abs(x), -1)
assert 1/Abs(x)**3 == 1/(x**2*Abs(x))
x = Symbol('x', imaginary=True)
assert Abs(x).diff(x) == -sign(x)
def test_Abs_rewrite():
x = Symbol('x', real=True)
a = Abs(x).rewrite(Heaviside).expand()
assert a == x*Heaviside(x) - x*Heaviside(-x)
for i in [-2, -1, 0, 1, 2]:
assert a.subs(x, i) == abs(i)
y = Symbol('y')
assert Abs(y).rewrite(Heaviside) == Abs(y)
def test_Abs_real():
# test some properties of abs that only apply
# to real numbers
x = Symbol('x', complex=True)
assert sqrt(x**2) != Abs(x)
assert Abs(x**2) != x**2
x = Symbol('x', real=True)
assert sqrt(x**2) == Abs(x)
assert Abs(x**2) == x**2
# if the symbol is zero, the following will still apply
nn = Symbol('nn', nonnegative=True, real=True)
np = Symbol('np', nonpositive=True, real=True)
assert Abs(nn) == nn
assert Abs(np) == -np
def test_Abs_properties():
x = Symbol('x')
assert Abs(x).is_real == True
assert Abs(x).is_positive == None
assert Abs(x).is_nonnegative == True
w = Symbol('w', complex=True, zero=False)
assert Abs(w).is_real == True
assert Abs(w).is_positive == True
assert Abs(w).is_zero == False
q = Symbol('q', positive=True)
assert Abs(q).is_real == True
assert Abs(q).is_positive == True
assert Abs(q).is_zero == False
def test_abs():
# this tests that abs calls Abs; don't rename to
# test_Abs since that test is already above
a = Symbol('a', positive=True)
assert abs(I*(1 + a)**2) == (1 + a)**2
def test_arg():
assert arg(0) == nan
assert arg(1) == 0
assert arg(-1) == pi
assert arg(I) == pi/2
assert arg(-I) == -pi/2
assert arg(1+I) == pi/4
assert arg(-1+I) == 3*pi/4
assert arg(1-I) == -pi/4
p = Symbol('p', positive=True)
assert arg(p) == 0
n = Symbol('n', negative=True)
assert arg(n) == pi
x = Symbol('x')
assert conjugate(arg(x)) == arg(x)
def test_conjugate():
a = Symbol('a', real=True)
assert conjugate(a) == a
assert conjugate(I*a) == -I*a
x, y = symbols('x,y')
assert conjugate(conjugate(x)) == x
assert conjugate(x + y) == conjugate(x) + conjugate(y)
assert conjugate(x - y) == conjugate(x) - conjugate(y)
assert conjugate(x * y) == conjugate(x) * conjugate(y)
assert conjugate(x / y) == conjugate(x) / conjugate(y)
assert conjugate(-x) == -conjugate(x)
def test_issue936():
x = Symbol('x')
assert Abs(x).expand(trig=True) == Abs(x)
assert sign(x).expand(trig=True) == sign(x)
assert arg(x).expand(trig=True) == arg(x)
def test_issue3206():
x = Symbol('x')
assert Abs(Abs(x)) == Abs(x)
def test_issue1655_derivative_conjugate():
x = Symbol('x', real=True)
y = Symbol('y', imaginary=True)
f = Function('f')
assert (f(x).conjugate()).diff(x) == (f(x).diff(x)).conjugate()
assert (f(y).conjugate()).diff(y) == -(f(y).diff(y)).conjugate()
def test_derivatives_issue1658():
x = Symbol('x', real=True)
y = Symbol('y', imaginary=True)
f = Function('f')
assert re(f(x)).diff(x) == re(f(x).diff(x))
assert im(f(x)).diff(x) == im(f(x).diff(x))
assert re(f(y)).diff(y) == -I*im(f(y).diff(y))
assert im(f(y)).diff(y) == -I*re(f(y).diff(y))
assert Abs(f(x)).diff(x).subs(f(x), 1+I*x).doit() == x/sqrt(1 + x**2)
assert arg(f(x)).diff(x).subs(f(x), 1+I*x**2).doit() == 2*x/(1+x**4)
assert Abs(f(y)).diff(y).subs(f(y), 1+y).doit() == -y/sqrt(1 - y**2)
assert arg(f(y)).diff(y).subs(f(y), I+y**2).doit() == 2*y/(1 + y**4)
def test_periodic_argument():
from sympy import (periodic_argument, unbranched_argument, oo,
principal_branch, polar_lift, pi)
x = Symbol('x')
p = Symbol('p', positive = True)
assert unbranched_argument(2 + I) == periodic_argument(2 + I, oo)
assert unbranched_argument(1 + x) == periodic_argument(1 + x, oo)
assert N_equals(unbranched_argument((1+I)**2), pi/2)
assert N_equals(unbranched_argument((1-I)**2), -pi/2)
assert N_equals(periodic_argument((1+I)**2, 3*pi), pi/2)
assert N_equals(periodic_argument((1-I)**2, 3*pi), -pi/2)
assert unbranched_argument(principal_branch(x, pi)) \
== periodic_argument(x, pi)
assert unbranched_argument(polar_lift(2 + I)) == unbranched_argument(2 + I)
assert periodic_argument(polar_lift(2 + I), 2*pi) \
== periodic_argument(2 + I, 2*pi)
assert periodic_argument(polar_lift(2 + I), 3*pi) \
== periodic_argument(2 + I, 3*pi)
assert periodic_argument(polar_lift(2 + I), pi) \
== periodic_argument(polar_lift(2 + I), pi)
assert unbranched_argument(polar_lift(1 + I)) == pi/4
assert periodic_argument(2*p, p) == periodic_argument(p, p)
assert periodic_argument(pi*p, p) == periodic_argument(p, p)
@XFAIL
def test_principal_branch_fail():
# TODO XXX why does abs(x)._eval_evalf() not fall back to global evalf?
assert N_equals(principal_branch((1 + I)**2, pi/2), 0)
def test_principal_branch():
from sympy import principal_branch, polar_lift, exp_polar
p = Symbol('p', positive=True)
x = Symbol('x')
neg = Symbol('x', negative=True)
assert principal_branch(polar_lift(x), p) == principal_branch(x, p)
assert principal_branch(polar_lift(2 + I), p) == principal_branch(2 + I, p)
assert principal_branch(2*x, p) == 2*principal_branch(x, p)
assert principal_branch(1, pi) == exp_polar(0)
assert principal_branch(-1, 2*pi) == exp_polar(I*pi)
assert principal_branch(-1, pi) == exp_polar(0)
assert principal_branch(exp_polar(3*pi*I)*x, 2*pi) == \
principal_branch(exp_polar(I*pi)*x, 2*pi)
assert principal_branch(neg*exp_polar(pi*I), 2*pi) == neg*exp_polar(-I*pi)
assert N_equals(principal_branch((1 + I)**2, 2*pi), 2*I)
assert N_equals(principal_branch((1 + I)**2, 3*pi), 2*I)
assert N_equals(principal_branch((1 + I)**2, 1*pi), 2*I)
# test argument sanitization
assert principal_branch(x, I).func is principal_branch
assert principal_branch(x, -4).func is principal_branch
assert principal_branch(x, -oo).func is principal_branch
assert principal_branch(x, zoo).func is principal_branch
| bsd-3-clause |
HubSpot/vitess | test/worker.py | 1 | 27619 | #!/usr/bin/env python
#
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests the robustness and resiliency of vtworkers."""
from collections import namedtuple
import urllib
import urllib2
import logging
import unittest
from vtdb import keyrange_constants
import base_sharding
import environment
import tablet
import utils
KEYSPACE_ID_TYPE = keyrange_constants.KIT_UINT64
class ShardTablets(namedtuple('ShardTablets', 'master replicas rdonlys')):
"""ShardTablets is a container for all the tablet.Tablets of a shard.
`master` should be a single Tablet, while `replicas` and `rdonlys` should be
lists of Tablets of the appropriate types.
"""
@property
def all_tablets(self):
"""Returns a list of all the tablets of the shard.
Does not guarantee any ordering on the returned tablets.
Returns:
List of all tablets of the shard.
"""
return [self.master] + self.replicas + self.rdonlys
@property
def replica(self):
"""Returns the first replica Tablet instance for the shard, or None."""
if self.replicas:
return self.replicas[0]
else:
return None
@property
def rdonly(self):
"""Returns the first replica Tablet instance for the shard, or None."""
if self.rdonlys:
return self.rdonlys[0]
else:
return None
def __str__(self):
return """master %s
replicas:
%s
rdonlys:
%s
""" % (self.master,
'\n'.join(' %s' % replica for replica in self.replicas),
'\n'.join(' %s' % rdonly for rdonly in self.rdonlys))
# initial shard, covers everything
shard_master = tablet.Tablet()
shard_replica = tablet.Tablet()
shard_rdonly1 = tablet.Tablet()
# split shards
# range '' - 80
shard_0_master = tablet.Tablet()
shard_0_replica = tablet.Tablet()
shard_0_rdonly1 = tablet.Tablet()
# range 80 - ''
shard_1_master = tablet.Tablet()
shard_1_replica = tablet.Tablet()
shard_1_rdonly1 = tablet.Tablet()
all_shard_tablets = ShardTablets(shard_master, [shard_replica], [shard_rdonly1])
shard_0_tablets = ShardTablets(
shard_0_master, [shard_0_replica], [shard_0_rdonly1])
shard_1_tablets = ShardTablets(
shard_1_master, [shard_1_replica], [shard_1_rdonly1])
def init_keyspace():
"""Creates a `test_keyspace` keyspace with a sharding key."""
utils.run_vtctl(
['CreateKeyspace', '-sharding_column_name', 'keyspace_id',
'-sharding_column_type', KEYSPACE_ID_TYPE, 'test_keyspace'])
def setUpModule():
try:
environment.topo_server().setup()
setup_procs = [
shard_master.init_mysql(),
shard_replica.init_mysql(),
shard_rdonly1.init_mysql(),
shard_0_master.init_mysql(),
shard_0_replica.init_mysql(),
shard_0_rdonly1.init_mysql(),
shard_1_master.init_mysql(),
shard_1_replica.init_mysql(),
shard_1_rdonly1.init_mysql(),
]
utils.wait_procs(setup_procs)
init_keyspace()
logging.debug('environment set up with the following shards and tablets:')
logging.debug('=========================================================')
logging.debug('TABLETS: test_keyspace/0:\n%s', all_shard_tablets)
logging.debug('TABLETS: test_keyspace/-80:\n%s', shard_0_tablets)
logging.debug('TABLETS: test_keyspace/80-:\n%s', shard_1_tablets)
except:
tearDownModule()
raise
def tearDownModule():
utils.required_teardown()
if utils.options.skip_teardown:
return
teardown_procs = [
shard_master.teardown_mysql(),
shard_replica.teardown_mysql(),
shard_rdonly1.teardown_mysql(),
shard_0_master.teardown_mysql(),
shard_0_replica.teardown_mysql(),
shard_0_rdonly1.teardown_mysql(),
shard_1_master.teardown_mysql(),
shard_1_replica.teardown_mysql(),
shard_1_rdonly1.teardown_mysql(),
]
utils.wait_procs(teardown_procs, raise_on_error=False)
environment.topo_server().teardown()
utils.kill_sub_processes()
utils.remove_tmp_files()
shard_master.remove_tree()
shard_replica.remove_tree()
shard_rdonly1.remove_tree()
shard_0_master.remove_tree()
shard_0_replica.remove_tree()
shard_0_rdonly1.remove_tree()
shard_1_master.remove_tree()
shard_1_replica.remove_tree()
shard_1_rdonly1.remove_tree()
class TestBaseSplitClone(unittest.TestCase, base_sharding.BaseShardingTest):
"""Abstract test base class for testing the SplitClone worker."""
def __init__(self, *args, **kwargs):
super(TestBaseSplitClone, self).__init__(*args, **kwargs)
self.num_insert_rows = utils.options.num_insert_rows
def run_shard_tablets(
self, shard_name, shard_tablets, create_table=True):
"""Handles all the necessary work for initially running a shard's tablets.
This encompasses the following steps:
1. (optional) Create db
2. Starting vttablets and let themselves init them
3. Waiting for the appropriate vttablet state
4. Force reparent to the master tablet
5. RebuildKeyspaceGraph
7. (optional) Running initial schema setup
Args:
shard_name: the name of the shard to start tablets in
shard_tablets: an instance of ShardTablets for the given shard
create_table: boolean, True iff we should create a table on the tablets
"""
# Start tablets.
#
# NOTE: The future master has to be started with type 'replica'.
shard_tablets.master.start_vttablet(
wait_for_state=None, init_tablet_type='replica',
init_keyspace='test_keyspace', init_shard=shard_name,
binlog_use_v3_resharding_mode=False)
for t in shard_tablets.replicas:
t.start_vttablet(
wait_for_state=None, init_tablet_type='replica',
init_keyspace='test_keyspace', init_shard=shard_name,
binlog_use_v3_resharding_mode=False)
for t in shard_tablets.rdonlys:
t.start_vttablet(
wait_for_state=None, init_tablet_type='rdonly',
init_keyspace='test_keyspace', init_shard=shard_name,
binlog_use_v3_resharding_mode=False)
# Block until tablets are up and we can enable replication.
# All tables should be NOT_SERVING until we run InitShardMaster.
for t in shard_tablets.all_tablets:
t.wait_for_vttablet_state('NOT_SERVING')
# Reparent to choose an initial master and enable replication.
utils.run_vtctl(
['InitShardMaster', '-force', 'test_keyspace/%s' % shard_name,
shard_tablets.master.tablet_alias], auto_log=True)
utils.run_vtctl(['RebuildKeyspaceGraph', 'test_keyspace'], auto_log=True)
# Enforce a health check instead of waiting for the next periodic one.
# (saves up to 1 second execution time on average)
for t in shard_tablets.replicas:
utils.run_vtctl(['RunHealthCheck', t.tablet_alias])
for t in shard_tablets.rdonlys:
utils.run_vtctl(['RunHealthCheck', t.tablet_alias])
# Wait for tablet state to change after starting all tablets. This allows
# us to start all tablets at once, instead of sequentially waiting.
# NOTE: Replication has to be enabled first or the health check will
# set a a replica or rdonly tablet back to NOT_SERVING.
for t in shard_tablets.all_tablets:
t.wait_for_vttablet_state('SERVING')
create_table_sql = (
'create table worker_test('
'id bigint unsigned,'
'msg varchar(64),'
'keyspace_id bigint(20) unsigned not null,'
'primary key (id),'
'index by_msg (msg)'
') Engine=InnoDB'
)
if create_table:
utils.run_vtctl(['ApplySchema',
'-sql=' + create_table_sql,
'test_keyspace'],
auto_log=True)
def copy_schema_to_destination_shards(self):
for keyspace_shard in ('test_keyspace/-80', 'test_keyspace/80-'):
utils.run_vtctl(['CopySchemaShard',
'--exclude_tables', 'unrelated',
shard_rdonly1.tablet_alias,
keyspace_shard],
auto_log=True)
def _insert_values(self, vttablet, id_offset, msg, keyspace_id, num_values):
"""Inserts values into MySQL along with the required routing comments.
Args:
vttablet: the Tablet instance to modify.
id_offset: offset for the value of `id` column.
msg: the value of `msg` column.
keyspace_id: the value of `keyspace_id` column.
num_values: number of rows to be inserted.
"""
# For maximum performance, multiple values are inserted in one statement.
# However, when the statements are too long, queries will timeout and
# vttablet will kill them. Therefore, we chunk it into multiple statements.
def chunks(full_list, n):
"""Yield successive n-sized chunks from full_list."""
for i in xrange(0, len(full_list), n):
yield full_list[i:i+n]
max_chunk_size = 100*1000
k = utils.uint64_to_hex(keyspace_id)
for chunk in chunks(range(1, num_values+1), max_chunk_size):
logging.debug('Inserting values for range [%d, %d].', chunk[0], chunk[-1])
values_str = ''
for i in chunk:
if i != chunk[0]:
values_str += ','
values_str += "(%d, '%s', 0x%x)" % (id_offset + i, msg, keyspace_id)
vttablet.mquery(
'vt_test_keyspace', [
'begin',
'insert into worker_test(id, msg, keyspace_id) values%s '
'/* vtgate:: keyspace_id:%s */' % (values_str, k),
'commit'],
write=True)
def insert_values(self, vttablet, num_values, num_shards, offset=0,
keyspace_id_range=2**64):
"""Inserts simple values, one for each potential shard.
Each row is given a message that contains the shard number, so we can easily
verify that the source and destination shards have the same data.
Args:
vttablet: the Tablet instance to modify.
num_values: The number of values to insert.
num_shards: the number of shards that we expect to have.
offset: amount that we should offset the `id`s by. This is useful for
inserting values multiple times.
keyspace_id_range: the number of distinct values that the keyspace id
can have.
"""
shard_width = keyspace_id_range / num_shards
shard_offsets = [i * shard_width for i in xrange(num_shards)]
# TODO(mberlin): Change the "id" column values from the keyspace id to a
# counter starting at 1. The incrementing ids must
# alternate between the two shards. Without this, the
# vtworker chunking won't be well balanced across shards.
for shard_num in xrange(num_shards):
self._insert_values(
vttablet,
shard_offsets[shard_num] + offset,
'msg-shard-%d' % shard_num,
shard_offsets[shard_num],
num_values)
def assert_shard_data_equal(
self, shard_num, source_tablet, destination_tablet):
"""Asserts source and destination tablets have identical shard data.
Args:
shard_num: The shard number of the shard that we want to verify.
source_tablet: Tablet instance of the source shard.
destination_tablet: Tablet instance of the destination shard.
"""
select_query = (
'select * from worker_test where msg="msg-shard-%s" order by id asc' %
shard_num)
# Make sure all the right rows made it from the source to the destination
source_rows = source_tablet.mquery('vt_test_keyspace', select_query)
destination_rows = destination_tablet.mquery(
'vt_test_keyspace', select_query)
self.assertEqual(source_rows, destination_rows)
# Make sure that there are no extra rows on the destination
count_query = 'select count(*) from worker_test'
destination_count = destination_tablet.mquery(
'vt_test_keyspace', count_query)[0][0]
self.assertEqual(destination_count, len(destination_rows))
def run_split_diff(self, keyspace_shard, source_tablets, destination_tablets):
"""Runs a vtworker SplitDiff on the given keyspace/shard.
Sets all former rdonly slaves back to rdonly.
Args:
keyspace_shard: keyspace/shard to run SplitDiff on (string)
source_tablets: ShardTablets instance for the source shard
destination_tablets: ShardTablets instance for the destination shard
"""
_ = source_tablets, destination_tablets
logging.debug('Running vtworker SplitDiff for %s', keyspace_shard)
_, _ = utils.run_vtworker(
['-cell', 'test_nj',
'--use_v3_resharding_mode=false',
'SplitDiff',
'--min_healthy_rdonly_tablets', '1',
keyspace_shard], auto_log=True)
def setUp(self):
"""Creates shards, starts the tablets, and inserts some data."""
try:
self.run_shard_tablets('0', all_shard_tablets)
# create the split shards
self.run_shard_tablets(
'-80', shard_0_tablets, create_table=False)
self.run_shard_tablets(
'80-', shard_1_tablets, create_table=False)
logging.debug('Start inserting initial data: %s rows',
self.num_insert_rows)
self.insert_values(shard_master, self.num_insert_rows, 2)
logging.debug(
'Done inserting initial data, waiting for replication to catch up')
utils.wait_for_replication_pos(shard_master, shard_rdonly1)
logging.debug('Replication on source rdonly tablet is caught up')
except:
self.tearDown()
raise
def tearDown(self):
"""Does the minimum to reset topology and tablets to their initial states.
When benchmarked, this seemed to take around 30% of the time of
(setupModule + tearDownModule).
FIXME(aaijazi): doing this in parallel greatly reduces the time it takes.
See the kill_tablets method in tablet.py.
"""
for shard_tablet in [all_shard_tablets, shard_0_tablets, shard_1_tablets]:
for t in shard_tablet.all_tablets:
t.reset_replication()
t.set_semi_sync_enabled(master=False)
t.clean_dbs()
# _vt.vreplication should be dropped to avoid interference between
# test cases
t.mquery('', 'drop table if exists _vt.vreplication')
t.kill_vttablet()
# we allow failures here as some tablets will be gone sometimes
# (the master tablets after an emergency reparent)
utils.run_vtctl(['DeleteTablet', '-allow_master', t.tablet_alias],
auto_log=True, raise_on_error=False)
utils.run_vtctl(['RebuildKeyspaceGraph', 'test_keyspace'], auto_log=True)
for shard in ['0', '-80', '80-']:
utils.run_vtctl(
['DeleteShard', '-even_if_serving', 'test_keyspace/%s' % shard],
auto_log=True)
class TestBaseSplitCloneResiliency(TestBaseSplitClone):
"""Tests that the SplitClone worker is resilient to particular failures."""
def setUp(self):
try:
super(TestBaseSplitCloneResiliency, self).setUp()
self.copy_schema_to_destination_shards()
except:
self.tearDown()
raise
def verify_successful_worker_copy_with_reparent(self, mysql_down=False):
"""Verifies that vtworker can successfully copy data for a SplitClone.
Order of operations:
1. Run a background vtworker
2. Wait until the worker successfully resolves the destination masters.
3. Reparent the destination tablets
4. Wait until the vtworker copy is finished
5. Verify that the worker was forced to reresolve topology and retry writes
due to the reparent.
6. Verify that the data was copied successfully to both new shards
Args:
mysql_down: boolean. If True, we take down the MySQL instances on the
destination masters at first, then bring them back and reparent away.
Raises:
AssertionError if things didn't go as expected.
"""
worker_proc, worker_port, worker_rpc_port = utils.run_vtworker_bg(
['--cell', 'test_nj', '--use_v3_resharding_mode=false'],
auto_log=True)
# --max_tps is only specified to enable the throttler and ensure that the
# code is executed. But the intent here is not to throttle the test, hence
# the rate limit is set very high.
# --chunk_count is 2 because rows are currently ordered by primary key such
# that all rows of the first shard come first and then the second shard.
# TODO(mberlin): Remove --offline=false once vtworker ensures that the
# destination shards are not behind the master's replication
# position.
args = ['SplitClone',
'--offline=false',
'--destination_writer_count', '1',
'--min_healthy_rdonly_tablets', '1',
'--max_tps', '9999']
# Make the clone as slow as necessary such that there is enough time to
# run PlannedReparent in the meantime.
# TODO(mberlin): Once insert_values is fixed to uniformly distribute the
# rows across shards when sorted by primary key, remove
# --chunk_count 2, --min_rows_per_chunk 1 and set
# --source_reader_count back to 1.
args.extend(['--source_reader_count', '2',
'--chunk_count', '2',
'--min_rows_per_chunk', '1',
'--write_query_max_rows', '1'])
args.append('test_keyspace/0')
workerclient_proc = utils.run_vtworker_client_bg(args, worker_rpc_port)
if mysql_down:
# vtworker is blocked at this point. This is a good time to test that its
# throttler server is reacting to RPCs.
self.check_throttler_service('localhost:%d' % worker_rpc_port,
['test_keyspace/-80', 'test_keyspace/80-'],
9999)
utils.poll_for_vars(
'vtworker', worker_port,
'WorkerState == cloning the data (online)',
condition_fn=lambda v: v.get('WorkerState') == 'cloning the'
' data (online)')
logging.debug('Worker is in copy state, Shutting down mysqld on destination masters.')
utils.wait_procs(
[shard_0_master.shutdown_mysql(),
shard_1_master.shutdown_mysql()])
# If MySQL is down, we wait until vtworker retried at least once to make
# sure it reached the point where a write failed due to MySQL being down.
# There should be two retries at least, one for each destination shard.
utils.poll_for_vars(
'vtworker', worker_port,
'WorkerRetryCount >= 2',
condition_fn=lambda v: v.get('WorkerRetryCount') >= 2)
logging.debug('Worker has retried at least once per shard, starting reparent now')
# Bring back masters. Since we test with semi-sync now, we need at least
# one replica for the new master. This test is already quite expensive,
# so we bring back the old master as a replica rather than having a third
# replica up the whole time.
logging.debug('Restarting mysqld on destination masters')
utils.wait_procs(
[shard_0_master.start_mysql(),
shard_1_master.start_mysql()])
# Reparent away from the old masters.
utils.run_vtctl(
['PlannedReparentShard', '-keyspace_shard', 'test_keyspace/-80',
'-new_master', shard_0_replica.tablet_alias], auto_log=True)
utils.run_vtctl(
['PlannedReparentShard', '-keyspace_shard', 'test_keyspace/80-',
'-new_master', shard_1_replica.tablet_alias], auto_log=True)
else:
# NOTE: There is a race condition around this:
# It's possible that the SplitClone vtworker command finishes before the
# PlannedReparentShard vtctl command, which we start below, succeeds.
# Then the test would fail because vtworker did not have to retry.
#
# To workaround this, the test takes a parameter to increase the number of
# rows that the worker has to copy (with the idea being to slow the worker
# down).
# You should choose a value for num_insert_rows, such that this test
# passes for your environment (trial-and-error...)
# Make sure that vtworker got past the point where it picked a master
# for each destination shard ("finding targets" state).
utils.poll_for_vars(
'vtworker', worker_port,
'WorkerState == cloning the data (online)',
condition_fn=lambda v: v.get('WorkerState') == 'cloning the'
' data (online)')
logging.debug('Worker is in copy state, starting reparent now')
utils.run_vtctl(
['PlannedReparentShard', '-keyspace_shard', 'test_keyspace/-80',
'-new_master', shard_0_replica.tablet_alias], auto_log=True)
utils.run_vtctl(
['PlannedReparentShard', '-keyspace_shard', 'test_keyspace/80-',
'-new_master', shard_1_replica.tablet_alias], auto_log=True)
utils.wait_procs([workerclient_proc])
# Verify that we were forced to re-resolve and retry.
worker_vars = utils.get_vars(worker_port)
self.assertGreater(worker_vars['WorkerRetryCount'], 1,
"expected vtworker to retry each of the two reparented"
" destination masters at least once, but it didn't")
self.assertNotEqual(worker_vars['WorkerRetryCount'], {},
"expected vtworker to retry, but it didn't")
utils.kill_sub_process(worker_proc, soft=True)
# Wait for the destination RDONLYs to catch up or the following offline
# clone will try to insert rows which already exist.
# TODO(mberlin): Remove this once SplitClone supports it natively.
utils.wait_for_replication_pos(shard_0_replica, shard_0_rdonly1)
utils.wait_for_replication_pos(shard_1_replica, shard_1_rdonly1)
# Run final offline clone to enable filtered replication.
_, _ = utils.run_vtworker(['-cell', 'test_nj',
'--use_v3_resharding_mode=false',
'SplitClone',
'--online=false',
'--min_healthy_rdonly_tablets', '1',
'test_keyspace/0'], auto_log=True)
# Make sure that everything is caught up to the same replication point
self.run_split_diff('test_keyspace/-80', all_shard_tablets, shard_0_tablets)
self.run_split_diff('test_keyspace/80-', all_shard_tablets, shard_1_tablets)
self.assert_shard_data_equal(0, shard_master, shard_0_tablets.replica)
self.assert_shard_data_equal(1, shard_master, shard_1_tablets.replica)
class TestReparentDuringWorkerCopy(TestBaseSplitCloneResiliency):
def __init__(self, *args, **kwargs):
super(TestReparentDuringWorkerCopy, self).__init__(*args, **kwargs)
self.num_insert_rows = utils.options.num_insert_rows_before_reparent_test
def test_reparent_during_worker_copy(self):
"""Simulates a destination reparent during a worker SplitClone copy.
The SplitClone command should be able to gracefully handle the reparent and
end up with the correct data on the destination.
Note: this test has a small possibility of flaking, due to the timing issues
involved. It's possible for the worker to finish the copy step before the
reparent succeeds, in which case there are assertions that will fail. This
seems better than having the test silently pass.
"""
self.verify_successful_worker_copy_with_reparent()
class TestMysqlDownDuringWorkerCopy(TestBaseSplitCloneResiliency):
def test_mysql_down_during_worker_copy(self):
"""This test simulates MySQL being down on the destination masters."""
self.verify_successful_worker_copy_with_reparent(mysql_down=True)
class TestVtworkerWebinterface(unittest.TestCase):
def setUp(self):
# Run vtworker without any optional arguments to start in interactive mode.
self.worker_proc, self.worker_port, _ = utils.run_vtworker_bg([])
def tearDown(self):
utils.kill_sub_process(self.worker_proc)
def test_webinterface(self):
worker_base_url = 'http://localhost:%d' % int(self.worker_port)
# Wait for /status to become available.
timeout = 10
while True:
done = False
try:
urllib2.urlopen(worker_base_url + '/status').read()
done = True
except urllib2.URLError:
pass
if done:
break
timeout = utils.wait_step(
'worker /status webpage must be available', timeout)
# Run the command twice to make sure it's idempotent.
for _ in range(2):
# Run Ping command.
try:
urllib2.urlopen(
worker_base_url + '/Debugging/Ping',
data=urllib.urlencode({'message': 'pong'})).read()
raise Exception('Should have thrown an HTTPError for the redirect.')
except urllib2.HTTPError as e:
self.assertEqual(e.code, 307)
# Wait for the Ping command to finish.
utils.poll_for_vars(
'vtworker', self.worker_port,
'WorkerState == done',
condition_fn=lambda v: v.get('WorkerState') == 'done')
# Verify that the command logged something and it's available at /status.
status = urllib2.urlopen(worker_base_url + '/status').read()
self.assertIn(
"Ping command was called with message: 'pong'", status,
'Command did not log output to /status: %s' % status)
# Reset the job.
urllib2.urlopen(worker_base_url + '/reset').read()
status_after_reset = urllib2.urlopen(worker_base_url + '/status').read()
self.assertIn(
'This worker is idle.', status_after_reset,
'/status does not indicate that the reset was successful')
class TestMinHealthyRdonlyTablets(TestBaseSplitCloneResiliency):
def split_clone_fails_not_enough_health_rdonly_tablets(self):
"""Verify vtworker errors if there aren't enough healthy RDONLY tablets."""
_, stderr = utils.run_vtworker(
['-cell', 'test_nj',
'--wait_for_healthy_rdonly_tablets_timeout', '1s',
'--use_v3_resharding_mode=false',
'SplitClone',
'--min_healthy_rdonly_tablets', '2',
'test_keyspace/0'],
auto_log=True,
expect_fail=True)
self.assertIn('findTargets() failed: FindWorkerTablet() failed for'
' test_nj/test_keyspace/0: not enough healthy RDONLY'
' tablets to choose from in (test_nj,test_keyspace/0),'
' have 1 healthy ones, need at least 2', stderr)
def add_test_options(parser):
parser.add_option(
'--num_insert_rows', type='int', default=100,
help='The number of rows, per shard, that we should insert before '
'resharding for this test.')
parser.add_option(
'--num_insert_rows_before_reparent_test', type='int', default=4500,
help='The number of rows, per shard, that we should insert before '
'running TestReparentDuringWorkerCopy (supersedes --num_insert_rows in '
'that test). There must be enough rows such that SplitClone takes '
'several seconds to run while we run a planned reparent.')
if __name__ == '__main__':
utils.main(test_options=add_test_options)
| apache-2.0 |
aflaxman/mpld3 | examples/linked_brush.py | 21 | 1136 | """
Linked Brushing Example
=======================
This example uses the standard Iris dataset and plots it with a linked brushing
tool for dynamically exploring the data. The paintbrush button at the bottom
left can be used to enable and disable the behavior.
"""
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
import mpld3
from mpld3 import plugins, utils
data = load_iris()
X = data.data
y = data.target
# dither the data for clearer plotting
X += 0.1 * np.random.random(X.shape)
fig, ax = plt.subplots(4, 4, sharex="col", sharey="row", figsize=(8, 8))
fig.subplots_adjust(left=0.05, right=0.95, bottom=0.05, top=0.95,
hspace=0.1, wspace=0.1)
for i in range(4):
for j in range(4):
points = ax[3 - i, j].scatter(X[:, j], X[:, i],
c=y, s=40, alpha=0.6)
# remove tick labels
for axi in ax.flat:
for axis in [axi.xaxis, axi.yaxis]:
axis.set_major_formatter(plt.NullFormatter())
# Here we connect the linked brush plugin
plugins.connect(fig, plugins.LinkedBrush(points))
mpld3.show()
| bsd-3-clause |
hasteur/wikipedia_bot_tasks | pywikibot/version.py | 3 | 5134 | # -*- coding: utf-8 -*-
""" Module to determine the pywikipedia version (tag, revision and date) """
#
# (C) Merlijn 'valhallasw' van Deen, 2007-2008
# (C) xqt, 2010-2011
# (C) Pywikipedia bot team, 2007-2013
#
# Distributed under the terms of the MIT license.
#
__version__ = '$Id$'
import os
import sys
import time
import urllib
cache = None
class ParseError(Exception):
""" Parsing went wrong """
def _get_program_dir():
_program_dir = os.path.normpath(os.path.split(os.path.dirname(__file__))[0])
# _program_dir = _program_dir.rstrip(os.path.basename(_program_dir))
## if not os.path.isabs(_program_dir):
## _program_dir = os.path.normpath(os.path.join(os.getcwd(), _program_dir))
return _program_dir
def getversion():
data = getversiondict()
try:
rev2 = int(getversion_onlinerepo())
rev1 = int(str(data['rev']).split()[0])
data['cmp_ver'] = 'OUTDATED' if rev1 < rev2 else 'ok'
except Exception:
data['cmp_ver'] = 'n/a'
return '%(tag)s (r%(rev)s, %(date)s, %(cmp_ver)s)' % data
def getversiondict():
global cache
if cache:
return cache
try:
(tag, rev, date) = getversion_svn()
except Exception:
try:
(tag, rev, date) = getversion_nightly()
except Exception:
try:
version = getfileversion('wikipedia.py')
if not version:
# fall-back in case everything breaks (should not be used)
import wikipedia
version = wikipedia.__version__
id, file, rev, date, ts, author, dollar = version.split(' ')
tag = 'wikipedia.py'
date = time.strptime('%sT%s' % (date, ts), '%Y-%m-%dT%H:%M:%SZ')
except: # nothing worked; version unknown (but suppress exceptions)
# the value is most likely '$Id' + '$', it means that
# wikipedia.py got imported without using svn at all
return dict(tag='', rev='-1 (unknown)', date='0 (unknown)')
datestring = time.strftime('%Y/%m/%d, %H:%M:%S', date)
cache = dict(tag=tag, rev=rev, date=datestring)
return cache
def getversion_svn(path=None):
_program_dir = path or _get_program_dir()
entries = open(os.path.join(_program_dir, '.svn/entries'))
version = entries.readline().strip()
#use sqlite table for new entries format
if version == "12":
entries.close()
from sqlite3 import dbapi2 as sqlite
con = sqlite.connect(os.path.join(_program_dir, ".svn/wc.db"))
cur = con.cursor()
cur.execute( '''select local_relpath, repos_path, revision, changed_date from nodes order by revision desc, changed_date desc''')
name, tag, rev, date = cur.fetchone()
con.close()
tag = tag[:-len(name)]
date = time.gmtime(date/1000000)
else:
for i in xrange(3):
entries.readline()
tag = entries.readline().strip()
t = tag.split('://')
t[1] = t[1].replace('svn.wikimedia.org/svnroot/pywikipedia/', '')
tag = '[%s] %s' % (t[0], t[1])
for i in xrange(4):
entries.readline()
date = time.strptime(entries.readline()[:19],'%Y-%m-%dT%H:%M:%S')
rev = entries.readline()[:-1]
entries.close()
if (not date or not tag or not rev) and not path:
raise ParseError
return (tag, rev, date)
def getversion_nightly():
data = open(os.path.join(wikipediatools.get_base_dir(), 'version'))
tag = data.readline().strip()
date = time.strptime(data.readline()[:19],'%Y-%m-%dT%H:%M:%S')
rev = data.readline().strip()
if not date or not tag or not rev:
raise ParseError
return (tag, rev, date)
## Retrieve revision number of framework online repository's svnroot
#
def getversion_onlinerepo(repo=None):
url = repo or 'http://svn.wikimedia.org/svnroot/pywikipedia/trunk/pywikipedia/'
rev = None
try:
buf = urllib.urlopen(url).read()
rev = buf.split(' ')[3][:-1]
except:
raise ParseError
return rev
## Simple version comparison
#
cmp_ver = lambda a, b, tol=1: {-1: '<', 0: '~', 1: '>'}[cmp((a-b)//tol, 0)]
## Retrieve revision number of file (__version__ variable containing Id tag)
# without importing it (thus can be done for any file)
#
def getfileversion(filename):
_program_dir = _get_program_dir()
__version__ = None
fn = os.path.join(_program_dir, filename)
if os.path.exists(fn):
for line in open(fn, 'r').readlines():
if line.find('__version__') == 0:
exec(line)
break
return __version__
## Get the tail path component and file name of the currently executing
# script. Returns a tuple.
#
def get_executing_script():
"""Get the last path component and filename of the currently
executing script."""
try:
script = os.path.abspath(sys.modules['__main__'].__file__)
except (KeyError, AttributeError):
script = sys.executable
path, file = os.path.split(script)
return (os.path.basename(path), file)
| gpl-2.0 |
ArcherSys/ArcherSys | Lib/site-packages/requests/packages/chardet/langgreekmodel.py | 2763 | 12628 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# Character Mapping Table:
Latin7_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 82,100,104, 94, 98,101,116,102,111,187,117, 92, 88,113, 85, # 40
79,118,105, 83, 67,114,119, 95, 99,109,188,253,253,253,253,253, # 50
253, 72, 70, 80, 81, 60, 96, 93, 89, 68,120, 97, 77, 86, 69, 55, # 60
78,115, 65, 66, 58, 76,106,103, 87,107,112,253,253,253,253,253, # 70
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 80
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 90
253,233, 90,253,253,253,253,253,253,253,253,253,253, 74,253,253, # a0
253,253,253,253,247,248, 61, 36, 46, 71, 73,253, 54,253,108,123, # b0
110, 31, 51, 43, 41, 34, 91, 40, 52, 47, 44, 53, 38, 49, 59, 39, # c0
35, 48,250, 37, 33, 45, 56, 50, 84, 57,120,121, 17, 18, 22, 15, # d0
124, 1, 29, 20, 21, 3, 32, 13, 25, 5, 11, 16, 10, 6, 30, 4, # e0
9, 8, 14, 7, 2, 12, 28, 23, 42, 24, 64, 75, 19, 26, 27,253, # f0
)
win1253_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 82,100,104, 94, 98,101,116,102,111,187,117, 92, 88,113, 85, # 40
79,118,105, 83, 67,114,119, 95, 99,109,188,253,253,253,253,253, # 50
253, 72, 70, 80, 81, 60, 96, 93, 89, 68,120, 97, 77, 86, 69, 55, # 60
78,115, 65, 66, 58, 76,106,103, 87,107,112,253,253,253,253,253, # 70
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 80
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 90
253,233, 61,253,253,253,253,253,253,253,253,253,253, 74,253,253, # a0
253,253,253,253,247,253,253, 36, 46, 71, 73,253, 54,253,108,123, # b0
110, 31, 51, 43, 41, 34, 91, 40, 52, 47, 44, 53, 38, 49, 59, 39, # c0
35, 48,250, 37, 33, 45, 56, 50, 84, 57,120,121, 17, 18, 22, 15, # d0
124, 1, 29, 20, 21, 3, 32, 13, 25, 5, 11, 16, 10, 6, 30, 4, # e0
9, 8, 14, 7, 2, 12, 28, 23, 42, 24, 64, 75, 19, 26, 27,253, # f0
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 98.2851%
# first 1024 sequences:1.7001%
# rest sequences: 0.0359%
# negative sequences: 0.0148%
GreekLangModel = (
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,2,2,3,3,3,3,3,3,3,3,1,3,3,3,0,2,2,3,3,0,3,0,3,2,0,3,3,3,0,
3,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,0,3,3,0,3,2,3,3,0,3,2,3,3,3,0,0,3,0,3,0,3,3,2,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,
0,2,3,2,2,3,3,3,3,3,3,3,3,0,3,3,3,3,0,2,3,3,0,3,3,3,3,2,3,3,3,0,
2,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,0,2,1,3,3,3,3,2,3,3,2,3,3,2,0,
0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,0,3,3,3,3,3,3,0,3,3,0,3,3,3,3,3,3,3,3,3,3,0,3,2,3,3,0,
2,0,1,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,3,0,0,0,0,3,3,0,3,1,3,3,3,0,3,3,0,3,3,3,3,0,0,0,0,
2,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,0,3,0,3,3,3,3,3,0,3,2,2,2,3,0,2,3,3,3,3,3,2,3,3,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,3,2,2,2,3,3,3,3,0,3,1,3,3,3,3,2,3,3,3,3,3,3,3,2,2,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,0,3,0,0,0,3,3,2,3,3,3,3,3,0,0,3,2,3,0,2,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,3,0,0,3,3,0,2,3,0,3,0,3,3,3,0,0,3,0,3,0,2,2,3,3,0,0,
0,0,1,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,0,3,2,3,3,3,3,0,3,3,3,3,3,0,3,3,2,3,2,3,3,2,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,2,3,2,3,3,3,3,3,3,0,2,3,2,3,2,2,2,3,2,3,3,2,3,0,2,2,2,3,0,
2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,0,3,3,3,2,3,3,0,0,3,0,3,0,0,0,3,2,0,3,0,3,0,0,2,0,2,0,
0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,0,3,3,3,3,3,3,0,3,3,0,3,0,0,0,3,3,0,3,3,3,0,0,1,2,3,0,
3,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,2,0,0,3,2,2,3,3,0,3,3,3,3,3,2,1,3,0,3,2,3,3,2,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,3,0,2,3,3,3,3,3,3,0,0,3,0,3,0,0,0,3,3,0,3,2,3,0,0,3,3,3,0,
3,0,0,0,2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,0,3,3,3,3,3,3,0,0,3,0,3,0,0,0,3,2,0,3,2,3,0,0,3,2,3,0,
2,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,1,2,2,3,3,3,3,3,3,0,2,3,0,3,0,0,0,3,3,0,3,0,2,0,0,2,3,1,0,
2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,3,0,3,0,3,3,2,3,0,3,3,3,3,3,3,0,3,3,3,0,2,3,0,0,3,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,0,0,3,0,0,0,3,3,0,3,0,2,3,3,0,0,3,0,3,0,3,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,0,3,3,3,3,3,3,0,0,3,0,2,0,0,0,3,3,0,3,0,3,0,0,2,0,2,0,
0,0,0,0,1,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,3,0,3,0,2,0,3,2,0,3,2,3,2,3,0,0,3,2,3,2,3,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,2,3,3,3,3,3,0,0,0,3,0,2,1,0,0,3,2,2,2,0,3,0,0,2,2,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,2,0,3,0,3,0,3,3,0,2,1,2,3,3,0,0,3,0,3,0,3,3,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,3,3,3,0,3,3,3,3,3,3,0,2,3,0,3,0,0,0,2,1,0,2,2,3,0,0,2,2,2,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,3,0,0,2,3,3,3,2,3,0,0,1,3,0,2,0,0,0,0,3,0,1,0,2,0,0,1,1,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,3,1,0,3,0,0,0,3,2,0,3,2,3,3,3,0,0,3,0,3,2,2,2,1,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,3,3,3,0,0,3,0,0,0,0,2,0,2,3,3,2,2,2,2,3,0,2,0,2,2,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,3,3,3,2,0,0,0,0,0,0,2,3,0,2,0,2,3,2,0,0,3,0,3,0,3,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,3,2,3,3,2,2,3,0,2,0,3,0,0,0,2,0,0,0,0,1,2,0,2,0,2,0,
0,2,0,2,0,2,2,0,0,1,0,2,2,2,0,2,2,2,0,2,2,2,0,0,2,0,0,1,0,0,0,0,
0,2,0,3,3,2,0,0,0,0,0,0,1,3,0,2,0,2,2,2,0,0,2,0,3,0,0,2,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,0,2,3,2,0,2,2,0,2,0,2,2,0,2,0,2,2,2,0,0,0,0,0,0,2,3,0,0,0,2,
0,1,2,0,0,0,0,2,2,0,0,0,2,1,0,2,2,0,0,0,0,0,0,1,0,2,0,0,0,0,0,0,
0,0,2,1,0,2,3,2,2,3,2,3,2,0,0,3,3,3,0,0,3,2,0,0,0,1,1,0,2,0,2,2,
0,2,0,2,0,2,2,0,0,2,0,2,2,2,0,2,2,2,2,0,0,2,0,0,0,2,0,1,0,0,0,0,
0,3,0,3,3,2,2,0,3,0,0,0,2,2,0,2,2,2,1,2,0,0,1,2,2,0,0,3,0,0,0,2,
0,1,2,0,0,0,1,2,0,0,0,0,0,0,0,2,2,0,1,0,0,2,0,0,0,2,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,3,3,2,2,0,0,0,2,0,2,3,3,0,2,0,0,0,0,0,0,2,2,2,0,2,2,0,2,0,2,
0,2,2,0,0,2,2,2,2,1,0,0,2,2,0,2,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0,
0,2,0,3,2,3,0,0,0,3,0,0,2,2,0,2,0,2,2,2,0,0,2,0,0,0,0,0,0,0,0,2,
0,0,2,2,0,0,2,2,2,0,0,0,0,0,0,2,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,2,0,0,3,2,0,2,2,2,2,2,0,0,0,2,0,0,0,0,2,0,1,0,0,2,0,1,0,0,0,
0,2,2,2,0,2,2,0,1,2,0,2,2,2,0,2,2,2,2,1,2,2,0,0,2,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,2,0,2,0,2,2,0,0,0,0,1,2,1,0,0,2,2,0,0,2,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,3,2,3,0,0,2,0,0,0,2,2,0,2,0,0,0,1,0,0,2,0,2,0,2,2,0,0,0,0,
0,0,2,0,0,0,0,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,
0,2,2,3,2,2,0,0,0,0,0,0,1,3,0,2,0,2,2,0,0,0,1,0,2,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,0,2,0,3,2,0,2,0,0,0,0,0,0,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
0,0,2,0,0,0,0,1,1,0,0,2,1,2,0,2,2,0,1,0,0,1,0,0,0,2,0,0,0,0,0,0,
0,3,0,2,2,2,0,0,2,0,0,0,2,0,0,0,2,3,0,2,0,0,0,0,0,0,2,2,0,0,0,2,
0,1,2,0,0,0,1,2,2,1,0,0,0,2,0,0,2,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,1,2,0,2,2,0,2,0,0,2,0,0,0,0,1,2,1,0,2,1,0,0,0,0,0,0,0,0,0,0,
0,0,2,0,0,0,3,1,2,2,0,2,0,0,0,0,2,0,0,0,2,0,0,3,0,0,0,0,2,2,2,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,1,0,2,0,1,2,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,0,0,0,0,2,
0,2,2,0,0,2,2,2,2,2,0,1,2,0,0,0,2,2,0,1,0,2,0,0,2,2,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,3,0,0,2,0,0,0,0,0,0,0,0,2,0,2,0,0,0,0,2,
0,1,2,0,0,0,0,2,2,1,0,1,0,1,0,2,2,2,1,0,0,0,0,0,0,1,0,0,0,0,0,0,
0,2,0,1,2,0,0,0,0,0,0,0,0,0,0,2,0,0,2,2,0,0,0,0,1,0,0,0,0,0,0,2,
0,2,2,0,0,0,0,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,2,0,0,0,
0,2,2,2,2,0,0,0,3,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0,1,
0,0,2,0,0,0,0,1,2,0,0,0,0,0,0,2,2,1,1,0,0,0,0,0,0,1,0,0,0,0,0,0,
0,2,0,2,2,2,0,0,2,0,0,0,0,0,0,0,2,2,2,0,0,0,2,0,0,0,0,0,0,0,0,2,
0,0,1,0,0,0,0,2,1,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
0,3,0,2,0,0,0,0,0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,2,
0,0,2,0,0,0,0,2,2,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,2,0,2,2,1,0,0,0,0,0,0,2,0,0,2,0,2,2,2,0,0,0,0,0,0,2,0,0,0,0,2,
0,0,2,0,0,2,0,2,2,0,0,0,0,2,0,2,0,0,0,0,0,2,0,0,0,2,0,0,0,0,0,0,
0,0,3,0,0,0,2,2,0,2,2,0,0,0,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,2,0,0,0,0,0,
0,2,2,2,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,1,
0,0,0,0,0,0,0,2,1,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,2,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,2,0,0,0,2,0,0,0,0,0,1,0,0,0,0,2,2,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,2,0,0,0,
0,2,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,0,0,2,0,2,0,0,0,
0,0,0,0,0,0,0,0,2,1,0,0,0,0,0,0,2,0,0,0,1,2,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
)
Latin7GreekModel = {
'charToOrderMap': Latin7_CharToOrderMap,
'precedenceMatrix': GreekLangModel,
'mTypicalPositiveRatio': 0.982851,
'keepEnglishLetter': False,
'charsetName': "ISO-8859-7"
}
Win1253GreekModel = {
'charToOrderMap': win1253_CharToOrderMap,
'precedenceMatrix': GreekLangModel,
'mTypicalPositiveRatio': 0.982851,
'keepEnglishLetter': False,
'charsetName': "windows-1253"
}
# flake8: noqa
| mit |
tempbottle/servo | python/mach/mach/test/test_entry_point.py | 121 | 1886 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import unicode_literals
import imp
import os
import sys
from mach.base import MachError
from mach.test.common import TestBase
from mock import patch
from mozunit import main
here = os.path.abspath(os.path.dirname(__file__))
class Entry():
"""Stub replacement for pkg_resources.EntryPoint"""
def __init__(self, providers):
self.providers = providers
def load(self):
def _providers():
return self.providers
return _providers
class TestEntryPoints(TestBase):
"""Test integrating with setuptools entry points"""
provider_dir = os.path.join(here, 'providers')
def _run_mach(self):
return TestBase._run_mach(self, ['help'], entry_point='mach.providers')
@patch('pkg_resources.iter_entry_points')
def test_load_entry_point_from_directory(self, mock):
# Ensure parent module is present otherwise we'll (likely) get
# an error due to unknown parent.
if b'mach.commands' not in sys.modules:
mod = imp.new_module(b'mach.commands')
sys.modules[b'mach.commands'] = mod
mock.return_value = [Entry(['providers'])]
# Mach error raised due to conditions_invalid.py
with self.assertRaises(MachError):
self._run_mach()
@patch('pkg_resources.iter_entry_points')
def test_load_entry_point_from_file(self, mock):
mock.return_value = [Entry([os.path.join('providers', 'basic.py')])]
result, stdout, stderr = self._run_mach()
self.assertIsNone(result)
self.assertIn('cmd_foo', stdout)
# Not enabled in automation because tests are failing.
#if __name__ == '__main__':
# main()
| mpl-2.0 |
bmel/cassandra | pylib/cqlshlib/test/run_cqlsh.py | 46 | 10597 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: this testing tool is *nix specific
import os
import sys
import re
import contextlib
import subprocess
import signal
import math
from time import time
from . import basecase
from os.path import join, normpath
def is_win():
return sys.platform in ("cygwin", "win32")
if is_win():
from winpty import WinPty
DEFAULT_PREFIX = ''
else:
import pty
DEFAULT_PREFIX = os.linesep
DEFAULT_CQLSH_PROMPT = DEFAULT_PREFIX + '(\S+@)?cqlsh(:\S+)?> '
DEFAULT_CQLSH_TERM = 'xterm'
cqlshlog = basecase.cqlshlog
def set_controlling_pty(master, slave):
os.setsid()
os.close(master)
for i in range(3):
os.dup2(slave, i)
if slave > 2:
os.close(slave)
os.close(os.open(os.ttyname(1), os.O_RDWR))
@contextlib.contextmanager
def raising_signal(signum, exc):
"""
Within the wrapped context, the given signal will interrupt signal
calls and will raise the given exception class. The preexisting signal
handling will be reinstated on context exit.
"""
def raiser(signum, frames):
raise exc()
oldhandlr = signal.signal(signum, raiser)
try:
yield
finally:
signal.signal(signum, oldhandlr)
class TimeoutError(Exception):
pass
@contextlib.contextmanager
def timing_out_itimer(seconds):
if seconds is None:
yield
return
with raising_signal(signal.SIGALRM, TimeoutError):
oldval, oldint = signal.getitimer(signal.ITIMER_REAL)
if oldval != 0.0:
raise RuntimeError("ITIMER_REAL already in use")
signal.setitimer(signal.ITIMER_REAL, seconds)
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL, 0)
@contextlib.contextmanager
def timing_out_alarm(seconds):
if seconds is None:
yield
return
with raising_signal(signal.SIGALRM, TimeoutError):
oldval = signal.alarm(int(math.ceil(seconds)))
if oldval != 0:
signal.alarm(oldval)
raise RuntimeError("SIGALRM already in use")
try:
yield
finally:
signal.alarm(0)
if is_win():
try:
import eventlet
except ImportError, e:
sys.exit("evenlet library required to run cqlshlib tests on Windows")
def timing_out(seconds):
return eventlet.Timeout(seconds, TimeoutError)
else:
# setitimer is new in 2.6, but it's still worth supporting, for potentially
# faster tests because of sub-second resolution on timeouts.
if hasattr(signal, 'setitimer'):
timing_out = timing_out_itimer
else:
timing_out = timing_out_alarm
def noop(*a):
pass
class ProcRunner:
def __init__(self, path, tty=True, env=None, args=()):
self.exe_path = path
self.args = args
self.tty = bool(tty)
self.realtty = self.tty and not is_win()
if env is None:
env = {}
self.env = env
self.readbuf = ''
self.start_proc()
def start_proc(self):
preexec = noop
stdin = stdout = stderr = None
cqlshlog.info("Spawning %r subprocess with args: %r and env: %r"
% (self.exe_path, self.args, self.env))
if self.realtty:
masterfd, slavefd = pty.openpty()
preexec = (lambda: set_controlling_pty(masterfd, slavefd))
self.proc = subprocess.Popen((self.exe_path,) + tuple(self.args),
env=self.env, preexec_fn=preexec,
stdin=stdin, stdout=stdout, stderr=stderr,
close_fds=False)
os.close(slavefd)
self.childpty = masterfd
self.send = self.send_tty
self.read = self.read_tty
else:
stdin = stdout = subprocess.PIPE
stderr = subprocess.STDOUT
self.proc = subprocess.Popen((self.exe_path,) + tuple(self.args),
env=self.env, stdin=stdin, stdout=stdout,
stderr=stderr, bufsize=0, close_fds=False)
self.send = self.send_pipe
if self.tty:
self.winpty = WinPty(self.proc.stdout)
self.read = self.read_winpty
else:
self.read = self.read_pipe
def close(self):
cqlshlog.info("Closing %r subprocess." % (self.exe_path,))
if self.realtty:
os.close(self.childpty)
else:
self.proc.stdin.close()
cqlshlog.debug("Waiting for exit")
return self.proc.wait()
def send_tty(self, data):
os.write(self.childpty, data)
def send_pipe(self, data):
self.proc.stdin.write(data)
def read_tty(self, blksize, timeout=None):
return os.read(self.childpty, blksize)
def read_pipe(self, blksize, timeout=None):
return self.proc.stdout.read(blksize)
def read_winpty(self, blksize, timeout=None):
return self.winpty.read(blksize, timeout)
def read_until(self, until, blksize=4096, timeout=None,
flags=0, ptty_timeout=None):
if not isinstance(until, re._pattern_type):
until = re.compile(until, flags)
cqlshlog.debug("Searching for %r" % (until.pattern,))
got = self.readbuf
self.readbuf = ''
with timing_out(timeout):
while True:
val = self.read(blksize, ptty_timeout)
cqlshlog.debug("read %r from subproc" % (val,))
if val == '':
raise EOFError("'until' pattern %r not found" % (until.pattern,))
got += val
m = until.search(got)
if m is not None:
self.readbuf = got[m.end():]
got = got[:m.end()]
return got
def read_lines(self, numlines, blksize=4096, timeout=None):
lines = []
with timing_out(timeout):
for n in range(numlines):
lines.append(self.read_until('\n', blksize=blksize))
return lines
def read_up_to_timeout(self, timeout, blksize=4096):
got = self.readbuf
self.readbuf = ''
curtime = time()
stoptime = curtime + timeout
while curtime < stoptime:
try:
with timing_out(stoptime - curtime):
stuff = self.read(blksize)
except TimeoutError:
break
cqlshlog.debug("read %r from subproc" % (stuff,))
if stuff == '':
break
got += stuff
curtime = time()
return got
class CqlshRunner(ProcRunner):
def __init__(self, path=None, host=None, port=None, keyspace=None, cqlver=None,
args=(), prompt=DEFAULT_CQLSH_PROMPT, env=None,
win_force_colors=True, tty=True, **kwargs):
if path is None:
cqlsh_bin = 'cqlsh'
if is_win():
cqlsh_bin = 'cqlsh.bat'
path = normpath(join(basecase.cqlshdir, cqlsh_bin))
if host is None:
host = basecase.TEST_HOST
if port is None:
port = basecase.TEST_PORT
if env is None:
env = {}
if is_win():
env['PYTHONUNBUFFERED'] = '1'
env.update(os.environ.copy())
env.setdefault('TERM', 'xterm')
env.setdefault('CQLSH_NO_BUNDLED', os.environ.get('CQLSH_NO_BUNDLED', ''))
env.setdefault('PYTHONPATH', os.environ.get('PYTHONPATH', ''))
args = tuple(args) + (host, str(port))
if cqlver is not None:
args += ('--cqlversion', str(cqlver))
if keyspace is not None:
args += ('--keyspace', keyspace)
if tty and is_win():
args += ('--tty',)
args += ('--encoding', 'utf-8')
if win_force_colors:
args += ('--color',)
self.keyspace = keyspace
ProcRunner.__init__(self, path, tty=tty, args=args, env=env, **kwargs)
self.prompt = prompt
if self.prompt is None:
self.output_header = ''
else:
self.output_header = self.read_to_next_prompt()
def read_to_next_prompt(self):
return self.read_until(self.prompt, timeout=10.0, ptty_timeout=3)
def read_up_to_timeout(self, timeout, blksize=4096):
output = ProcRunner.read_up_to_timeout(self, timeout, blksize=blksize)
# readline trying to be friendly- remove these artifacts
output = output.replace(' \r', '')
output = output.replace('\r', '')
return output
def cmd_and_response(self, cmd):
self.send(cmd + '\n')
output = self.read_to_next_prompt()
# readline trying to be friendly- remove these artifacts
output = output.replace(' \r', '')
output = output.replace('\r', '')
output = output.replace(' \b', '')
if self.realtty:
echo, output = output.split('\n', 1)
assert echo == cmd, "unexpected echo %r instead of %r" % (echo, cmd)
try:
output, promptline = output.rsplit('\n', 1)
except ValueError:
promptline = output
output = ''
assert re.match(self.prompt, DEFAULT_PREFIX + promptline), \
'last line of output %r does not match %r?' % (promptline, self.prompt)
return output + '\n'
def run_cqlsh(**kwargs):
return contextlib.closing(CqlshRunner(**kwargs))
def call_cqlsh(**kwargs):
kwargs.setdefault('prompt', None)
proginput = kwargs.pop('input', '')
kwargs['tty'] = False
c = CqlshRunner(**kwargs)
output, _ = c.proc.communicate(proginput)
result = c.close()
return output, result
| apache-2.0 |
kenorb-contrib/BitTorrent | twisted/cred/error.py | 19 | 2213 | # Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
"""Cred errors."""
class Unauthorized(Exception):
"""Standard unauthorized error."""
class DuplicateIdentity(KeyError):
"""There already exists an identity with that name."""
# Descends from KeyError for backwards compatibility: That's what
# DefaultAuthorizer.addIdentity used to raise.
def __init__(self, name):
KeyError.__init__(self, name)
self.name = name
def __repr__(self):
return "<%s name %s>" % (self.__class__.__name__,
repr(self.name))
def __str__(self):
return "There is already an identity named %s." % (self.name,)
class KeyNotFound(KeyError, Unauthorized):
"""None of the keys on your keyring seem to fit here."""
def __init__(self, serviceName, perspectiveName):
KeyError.__init__(self, (serviceName, perspectiveName))
self.serviceName = serviceName
self.perspectiveName = perspectiveName
def __repr__(self):
return "<%s (%r, %r)>" % (self.__class__.__name__,
repr(self.serviceName),
repr(self.perspectiveName))
def __str__(self):
return "No key for service %r, perspective %r." % (
repr(self.serviceName), repr(self.perspectiveName))
### "New Cred" objects
class LoginFailed(Exception):
"""
The user's request to log in failed for some reason.
"""
class UnauthorizedLogin(LoginFailed, Unauthorized):
"""The user was not authorized to log in.
"""
class UnhandledCredentials(LoginFailed):
"""A type of credentials were passed in with no knowledge of how to check
them. This is a server configuration error - it means that a protocol was
connected to a Portal without a CredentialChecker that can check all of its
potential authentication strategies.
"""
class LoginDenied(LoginFailed):
"""
The realm rejected this login for some reason.
Examples of reasons this might be raised include an avatar logging in
too frequently, a quota having been fully used, or the overall server
load being too high.
"""
| gpl-3.0 |
michalliu/OpenWrt-Firefly-Libraries | staging_dir/host/lib/scons-2.3.1/SCons/compat/_scons_collections.py | 8 | 1889 | #
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__doc__ = """
collections compatibility module for older (pre-2.4) Python versions
This does not not NOT (repeat, *NOT*) provide complete collections
functionality. It only wraps the portions of collections functionality
used by SCons, in an interface that looks enough like collections for
our purposes.
"""
__revision__ = "src/engine/SCons/compat/_scons_collections.py 2014/03/02 14:18:15 garyo"
# Use exec to hide old names from fixers.
exec("""if True:
from UserDict import UserDict
from UserList import UserList
from UserString import UserString""")
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| gpl-2.0 |
chriswaudby/pp | od/bruker-bits/simplejson/tests/test_recursion.py | 149 | 1679 | from unittest import TestCase
import simplejson as json
class JSONTestObject:
pass
class RecursiveJSONEncoder(json.JSONEncoder):
recurse = False
def default(self, o):
if o is JSONTestObject:
if self.recurse:
return [JSONTestObject]
else:
return 'JSONTestObject'
return json.JSONEncoder.default(o)
class TestRecursion(TestCase):
def test_listrecursion(self):
x = []
x.append(x)
try:
json.dumps(x)
except ValueError:
pass
else:
self.fail("didn't raise ValueError on list recursion")
x = []
y = [x]
x.append(y)
try:
json.dumps(x)
except ValueError:
pass
else:
self.fail("didn't raise ValueError on alternating list recursion")
y = []
x = [y, y]
# ensure that the marker is cleared
json.dumps(x)
def test_dictrecursion(self):
x = {}
x["test"] = x
try:
json.dumps(x)
except ValueError:
pass
else:
self.fail("didn't raise ValueError on dict recursion")
x = {}
y = {"a": x, "b": x}
# ensure that the marker is cleared
json.dumps(y)
def test_defaultrecursion(self):
enc = RecursiveJSONEncoder()
self.assertEqual(enc.encode(JSONTestObject), '"JSONTestObject"')
enc.recurse = True
try:
enc.encode(JSONTestObject)
except ValueError:
pass
else:
self.fail("didn't raise ValueError on default recursion")
| mit |
omazapa/root | interpreter/llvm/src/utils/lit/lit/formats/googletest.py | 20 | 4185 | from __future__ import absolute_import
import os
import sys
import lit.Test
import lit.TestRunner
import lit.util
from .base import TestFormat
kIsWindows = sys.platform in ['win32', 'cygwin']
class GoogleTest(TestFormat):
def __init__(self, test_sub_dir, test_suffix):
self.test_sub_dir = os.path.normcase(str(test_sub_dir)).split(';')
self.test_suffix = str(test_suffix)
# On Windows, assume tests will also end in '.exe'.
if kIsWindows:
self.test_suffix += '.exe'
def getGTestTests(self, path, litConfig, localConfig):
"""getGTestTests(path) - [name]
Return the tests available in gtest executable.
Args:
path: String path to a gtest executable
litConfig: LitConfig instance
localConfig: TestingConfig instance"""
try:
lines = lit.util.capture([path, '--gtest_list_tests'],
env=localConfig.environment)
if kIsWindows:
lines = lines.replace('\r', '')
lines = lines.split('\n')
except:
litConfig.error("unable to discover google-tests in %r" % path)
raise StopIteration
nested_tests = []
for ln in lines:
if not ln.strip():
continue
prefix = ''
index = 0
while ln[index*2:index*2+2] == ' ':
index += 1
while len(nested_tests) > index:
nested_tests.pop()
ln = ln[index*2:]
if ln.endswith('.'):
nested_tests.append(ln)
else:
yield ''.join(nested_tests) + ln
# Note: path_in_suite should not include the executable name.
def getTestsInExecutable(self, testSuite, path_in_suite, execpath,
litConfig, localConfig):
if not execpath.endswith(self.test_suffix):
return
(dirname, basename) = os.path.split(execpath)
# Discover the tests in this executable.
for testname in self.getGTestTests(execpath, litConfig, localConfig):
testPath = path_in_suite + (basename, testname)
yield lit.Test.Test(testSuite, testPath, localConfig, file_path=execpath)
def getTestsInDirectory(self, testSuite, path_in_suite,
litConfig, localConfig):
source_path = testSuite.getSourcePath(path_in_suite)
for filename in os.listdir(source_path):
filepath = os.path.join(source_path, filename)
if os.path.isdir(filepath):
# Iterate over executables in a directory.
if not os.path.normcase(filename) in self.test_sub_dir:
continue
dirpath_in_suite = path_in_suite + (filename, )
for subfilename in os.listdir(filepath):
execpath = os.path.join(filepath, subfilename)
for test in self.getTestsInExecutable(
testSuite, dirpath_in_suite, execpath,
litConfig, localConfig):
yield test
elif ('.' in self.test_sub_dir):
for test in self.getTestsInExecutable(
testSuite, path_in_suite, filepath,
litConfig, localConfig):
yield test
def execute(self, test, litConfig):
testPath,testName = os.path.split(test.getSourcePath())
while not os.path.exists(testPath):
# Handle GTest parametrized and typed tests, whose name includes
# some '/'s.
testPath, namePrefix = os.path.split(testPath)
testName = os.path.join(namePrefix, testName)
cmd = [testPath, '--gtest_filter=' + testName]
if litConfig.useValgrind:
cmd = litConfig.valgrindArgs + cmd
if litConfig.noExecute:
return lit.Test.PASS, ''
out, err, exitCode = lit.util.executeCommand(
cmd, env=test.config.environment)
if not exitCode:
return lit.Test.PASS,''
return lit.Test.FAIL, out + err
| lgpl-2.1 |
henryr/Impala | thirdparty/hive-1.1.0-cdh5.5.0-SNAPSHOT/lib/py/fb303/FacebookBase.py | 173 | 1917 | #!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import time
import FacebookService
import thrift.reflection.limited
from ttypes import fb_status
class FacebookBase(FacebookService.Iface):
def __init__(self, name):
self.name = name
self.alive = int(time.time())
self.counters = {}
def getName(self, ):
return self.name
def getVersion(self, ):
return ''
def getStatus(self, ):
return fb_status.ALIVE
def getCounters(self):
return self.counters
def resetCounter(self, key):
self.counters[key] = 0
def getCounter(self, key):
if self.counters.has_key(key):
return self.counters[key]
return 0
def incrementCounter(self, key):
self.counters[key] = self.getCounter(key) + 1
def setOption(self, key, value):
pass
def getOption(self, key):
return ""
def getOptions(self):
return {}
def getOptions(self):
return {}
def aliveSince(self):
return self.alive
def getCpuProfile(self, duration):
return ""
def getLimitedReflection(self):
return thrift.reflection.limited.Service()
def reinitialize(self):
pass
def shutdown(self):
pass
| apache-2.0 |
naterh/ironic | ironic/db/sqlalchemy/alembic/versions/1e1d5ace7dc6_add_inspection_started_at_and_.py | 10 | 1348 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""add inspection_started_at and inspection_finished_at
Revision ID: 1e1d5ace7dc6
Revises: 3ae36a5f5131
Create Date: 2015-02-26 10:46:46.861927
"""
# revision identifiers, used by Alembic.
revision = '1e1d5ace7dc6'
down_revision = '3ae36a5f5131'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('nodes', sa.Column('inspection_started_at',
sa.DateTime(),
nullable=True))
op.add_column('nodes', sa.Column('inspection_finished_at',
sa.DateTime(),
nullable=True))
def downgrade():
op.drop_column('nodes', 'inspection_started_at')
op.drop_column('nodes', 'inspection_finished_at')
| apache-2.0 |
jeenalee/servo | tests/wpt/css-tests/tools/html5lib/html5lib/tests/performance/concatenation.py | 451 | 1145 | from __future__ import absolute_import, division, unicode_literals
def f1():
x = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ"
y = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ"
z = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ"
x += y + z
def f2():
x = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ"
y = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ"
z = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ"
x = x + y + z
def f3():
x = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ"
y = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ"
z = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ"
x = "".join((x, y, z))
def f4():
x = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ"
y = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ"
z = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ"
x = "%s%s%s" % (x, y, z)
import timeit
for x in range(4):
statement = "f%s" % (x + 1)
t = timeit.Timer(statement, "from __main__ import " + statement)
r = t.repeat(3, 1000000)
print(r, min(r))
| mpl-2.0 |
mezz64/home-assistant | homeassistant/auth/mfa_modules/notify.py | 13 | 11911 | """HMAC-based One-time Password auth module.
Sending HOTP through notify service
"""
import asyncio
from collections import OrderedDict
import logging
from typing import Any, Dict, List, Optional
import attr
import voluptuous as vol
from homeassistant.const import CONF_EXCLUDE, CONF_INCLUDE
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import ServiceNotFound
from homeassistant.helpers import config_validation as cv
from . import (
MULTI_FACTOR_AUTH_MODULE_SCHEMA,
MULTI_FACTOR_AUTH_MODULES,
MultiFactorAuthModule,
SetupFlow,
)
REQUIREMENTS = ["pyotp==2.3.0"]
CONF_MESSAGE = "message"
CONFIG_SCHEMA = MULTI_FACTOR_AUTH_MODULE_SCHEMA.extend(
{
vol.Optional(CONF_INCLUDE): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_EXCLUDE): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_MESSAGE, default="{} is your Home Assistant login code"): str,
},
extra=vol.PREVENT_EXTRA,
)
STORAGE_VERSION = 1
STORAGE_KEY = "auth_module.notify"
STORAGE_USERS = "users"
STORAGE_USER_ID = "user_id"
INPUT_FIELD_CODE = "code"
_LOGGER = logging.getLogger(__name__)
def _generate_secret() -> str:
"""Generate a secret."""
import pyotp # pylint: disable=import-outside-toplevel
return str(pyotp.random_base32())
def _generate_random() -> int:
"""Generate a 8 digit number."""
import pyotp # pylint: disable=import-outside-toplevel
return int(pyotp.random_base32(length=8, chars=list("1234567890")))
def _generate_otp(secret: str, count: int) -> str:
"""Generate one time password."""
import pyotp # pylint: disable=import-outside-toplevel
return str(pyotp.HOTP(secret).at(count))
def _verify_otp(secret: str, otp: str, count: int) -> bool:
"""Verify one time password."""
import pyotp # pylint: disable=import-outside-toplevel
return bool(pyotp.HOTP(secret).verify(otp, count))
@attr.s(slots=True)
class NotifySetting:
"""Store notify setting for one user."""
secret: str = attr.ib(factory=_generate_secret) # not persistent
counter: int = attr.ib(factory=_generate_random) # not persistent
notify_service: Optional[str] = attr.ib(default=None)
target: Optional[str] = attr.ib(default=None)
_UsersDict = Dict[str, NotifySetting]
@MULTI_FACTOR_AUTH_MODULES.register("notify")
class NotifyAuthModule(MultiFactorAuthModule):
"""Auth module send hmac-based one time password by notify service."""
DEFAULT_TITLE = "Notify One-Time Password"
def __init__(self, hass: HomeAssistant, config: Dict[str, Any]) -> None:
"""Initialize the user data store."""
super().__init__(hass, config)
self._user_settings: Optional[_UsersDict] = None
self._user_store = hass.helpers.storage.Store(
STORAGE_VERSION, STORAGE_KEY, private=True
)
self._include = config.get(CONF_INCLUDE, [])
self._exclude = config.get(CONF_EXCLUDE, [])
self._message_template = config[CONF_MESSAGE]
self._init_lock = asyncio.Lock()
@property
def input_schema(self) -> vol.Schema:
"""Validate login flow input data."""
return vol.Schema({INPUT_FIELD_CODE: str})
async def _async_load(self) -> None:
"""Load stored data."""
async with self._init_lock:
if self._user_settings is not None:
return
data = await self._user_store.async_load()
if data is None:
data = {STORAGE_USERS: {}}
self._user_settings = {
user_id: NotifySetting(**setting)
for user_id, setting in data.get(STORAGE_USERS, {}).items()
}
async def _async_save(self) -> None:
"""Save data."""
if self._user_settings is None:
return
await self._user_store.async_save(
{
STORAGE_USERS: {
user_id: attr.asdict(
notify_setting,
filter=attr.filters.exclude(
attr.fields(NotifySetting).secret,
attr.fields(NotifySetting).counter,
),
)
for user_id, notify_setting in self._user_settings.items()
}
}
)
@callback
def aync_get_available_notify_services(self) -> List[str]:
"""Return list of notify services."""
unordered_services = set()
for service in self.hass.services.async_services().get("notify", {}):
if service not in self._exclude:
unordered_services.add(service)
if self._include:
unordered_services &= set(self._include)
return sorted(unordered_services)
async def async_setup_flow(self, user_id: str) -> SetupFlow:
"""Return a data entry flow handler for setup module.
Mfa module should extend SetupFlow
"""
return NotifySetupFlow(
self, self.input_schema, user_id, self.aync_get_available_notify_services()
)
async def async_setup_user(self, user_id: str, setup_data: Any) -> Any:
"""Set up auth module for user."""
if self._user_settings is None:
await self._async_load()
assert self._user_settings is not None
self._user_settings[user_id] = NotifySetting(
notify_service=setup_data.get("notify_service"),
target=setup_data.get("target"),
)
await self._async_save()
async def async_depose_user(self, user_id: str) -> None:
"""Depose auth module for user."""
if self._user_settings is None:
await self._async_load()
assert self._user_settings is not None
if self._user_settings.pop(user_id, None):
await self._async_save()
async def async_is_user_setup(self, user_id: str) -> bool:
"""Return whether user is setup."""
if self._user_settings is None:
await self._async_load()
assert self._user_settings is not None
return user_id in self._user_settings
async def async_validate(self, user_id: str, user_input: Dict[str, Any]) -> bool:
"""Return True if validation passed."""
if self._user_settings is None:
await self._async_load()
assert self._user_settings is not None
notify_setting = self._user_settings.get(user_id)
if notify_setting is None:
return False
# user_input has been validate in caller
return await self.hass.async_add_executor_job(
_verify_otp,
notify_setting.secret,
user_input.get(INPUT_FIELD_CODE, ""),
notify_setting.counter,
)
async def async_initialize_login_mfa_step(self, user_id: str) -> None:
"""Generate code and notify user."""
if self._user_settings is None:
await self._async_load()
assert self._user_settings is not None
notify_setting = self._user_settings.get(user_id)
if notify_setting is None:
raise ValueError("Cannot find user_id")
def generate_secret_and_one_time_password() -> str:
"""Generate and send one time password."""
assert notify_setting
# secret and counter are not persistent
notify_setting.secret = _generate_secret()
notify_setting.counter = _generate_random()
return _generate_otp(notify_setting.secret, notify_setting.counter)
code = await self.hass.async_add_executor_job(
generate_secret_and_one_time_password
)
await self.async_notify_user(user_id, code)
async def async_notify_user(self, user_id: str, code: str) -> None:
"""Send code by user's notify service."""
if self._user_settings is None:
await self._async_load()
assert self._user_settings is not None
notify_setting = self._user_settings.get(user_id)
if notify_setting is None:
_LOGGER.error("Cannot find user %s", user_id)
return
await self.async_notify(
code,
notify_setting.notify_service, # type: ignore
notify_setting.target,
)
async def async_notify(
self, code: str, notify_service: str, target: Optional[str] = None
) -> None:
"""Send code by notify service."""
data = {"message": self._message_template.format(code)}
if target:
data["target"] = [target]
await self.hass.services.async_call("notify", notify_service, data)
class NotifySetupFlow(SetupFlow):
"""Handler for the setup flow."""
def __init__(
self,
auth_module: NotifyAuthModule,
setup_schema: vol.Schema,
user_id: str,
available_notify_services: List[str],
) -> None:
"""Initialize the setup flow."""
super().__init__(auth_module, setup_schema, user_id)
# to fix typing complaint
self._auth_module: NotifyAuthModule = auth_module
self._available_notify_services = available_notify_services
self._secret: Optional[str] = None
self._count: Optional[int] = None
self._notify_service: Optional[str] = None
self._target: Optional[str] = None
async def async_step_init(
self, user_input: Optional[Dict[str, str]] = None
) -> Dict[str, Any]:
"""Let user select available notify services."""
errors: Dict[str, str] = {}
hass = self._auth_module.hass
if user_input:
self._notify_service = user_input["notify_service"]
self._target = user_input.get("target")
self._secret = await hass.async_add_executor_job(_generate_secret)
self._count = await hass.async_add_executor_job(_generate_random)
return await self.async_step_setup()
if not self._available_notify_services:
return self.async_abort(reason="no_available_service")
schema: Dict[str, Any] = OrderedDict()
schema["notify_service"] = vol.In(self._available_notify_services)
schema["target"] = vol.Optional(str)
return self.async_show_form(
step_id="init", data_schema=vol.Schema(schema), errors=errors
)
async def async_step_setup(
self, user_input: Optional[Dict[str, str]] = None
) -> Dict[str, Any]:
"""Verify user can receive one-time password."""
errors: Dict[str, str] = {}
hass = self._auth_module.hass
if user_input:
verified = await hass.async_add_executor_job(
_verify_otp, self._secret, user_input["code"], self._count
)
if verified:
await self._auth_module.async_setup_user(
self._user_id,
{"notify_service": self._notify_service, "target": self._target},
)
return self.async_create_entry(title=self._auth_module.name, data={})
errors["base"] = "invalid_code"
# generate code every time, no retry logic
assert self._secret and self._count
code = await hass.async_add_executor_job(
_generate_otp, self._secret, self._count
)
assert self._notify_service
try:
await self._auth_module.async_notify(
code, self._notify_service, self._target
)
except ServiceNotFound:
return self.async_abort(reason="notify_service_not_exist")
return self.async_show_form(
step_id="setup",
data_schema=self._setup_schema,
description_placeholders={"notify_service": self._notify_service},
errors=errors,
)
| apache-2.0 |
talpor/recipe-search-hackathon | recipe-search/recipe/migrations/0002_auto_20150221_2013.py | 1 | 2060 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('recipe', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=128)),
('description', models.TextField(null=True)),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='recipe',
name='categories',
field=models.ManyToManyField(to='recipe.Category'),
preserve_default=True,
),
migrations.AddField(
model_name='recipe',
name='description',
field=models.TextField(null=True),
preserve_default=True,
),
migrations.AddField(
model_name='recipe',
name='name',
field=models.CharField(default='default_name', unique=True, max_length=128),
preserve_default=False,
),
migrations.AlterField(
model_name='ingrediententry',
name='quantity',
field=models.FloatField(default=0),
preserve_default=True,
),
migrations.AlterField(
model_name='ingrediententry',
name='unit',
field=models.CharField(max_length=16, choices=[(b'kg', b'kilogram'), (b'gr', b'gram'), (b'mg', b'miligrams'), (b'lb', b'pound'), (b'oz', b'ounce'), (b'gr', b'gram'), (b'lt', b'liter'), (b'ml', b'mililiter'), (b'gal', b'gallon'), (b'1', b'one'), (b'2', b'half'), (b'3', b'third'), (b'4', b'quarter'), (b'tsp', b'teaspoon'), (b'tbsp', b'tablespoon'), (b'cup', b'cup'), (b'scp', b'scoop'), (b'pch', b'pinch'), (b'dsh', b'dash')]),
preserve_default=True,
),
]
| bsd-3-clause |
phektus/Django-Google-AppEngine-OpenId-Auth | django/template/loaders/filesystem.py | 229 | 2358 | """
Wrapper for loading templates from the filesystem.
"""
from django.conf import settings
from django.template.base import TemplateDoesNotExist
from django.template.loader import BaseLoader
from django.utils._os import safe_join
class Loader(BaseLoader):
is_usable = True
def get_template_sources(self, template_name, template_dirs=None):
"""
Returns the absolute paths to "template_name", when appended to each
directory in "template_dirs". Any paths that don't lie inside one of the
template dirs are excluded from the result set, for security reasons.
"""
if not template_dirs:
template_dirs = settings.TEMPLATE_DIRS
for template_dir in template_dirs:
try:
yield safe_join(template_dir, template_name)
except UnicodeDecodeError:
# The template dir name was a bytestring that wasn't valid UTF-8.
raise
except ValueError:
# The joined path was located outside of this particular
# template_dir (it might be inside another one, so this isn't
# fatal).
pass
def load_template_source(self, template_name, template_dirs=None):
tried = []
for filepath in self.get_template_sources(template_name, template_dirs):
try:
file = open(filepath)
try:
return (file.read().decode(settings.FILE_CHARSET), filepath)
finally:
file.close()
except IOError:
tried.append(filepath)
if tried:
error_msg = "Tried %s" % tried
else:
error_msg = "Your TEMPLATE_DIRS setting is empty. Change it to point to at least one template directory."
raise TemplateDoesNotExist(error_msg)
load_template_source.is_usable = True
_loader = Loader()
def load_template_source(template_name, template_dirs=None):
# For backwards compatibility
import warnings
warnings.warn(
"'django.template.loaders.filesystem.load_template_source' is deprecated; use 'django.template.loaders.filesystem.Loader' instead.",
DeprecationWarning
)
return _loader.load_template_source(template_name, template_dirs)
load_template_source.is_usable = True
| bsd-3-clause |
ramanajee/phantomjs | src/qt/qtwebkit/Tools/QueueStatusServer/model/queuepropertymixin_unittest.py | 125 | 2208 | # Copyright (C) 2010 Google, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Research in Motion Ltd. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
from model.queuepropertymixin import QueuePropertyMixin
from model.queues import Queue
class ObjectWithQueueName(QueuePropertyMixin):
def __init__(self):
self.queue_name = None
class QueuePropertyMixinTest(unittest.TestCase):
def test_queue_property(self):
test_object = ObjectWithQueueName()
mac_ews = Queue("mac-ews")
test_object.queue = mac_ews
self.assertEqual(test_object.queue.name(), "mac-ews")
self.assertEqual(test_object.queue_name, "mac-ews")
test_object.queue = None
self.assertEqual(test_object.queue_name, None)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
fedspendingtransparency/data-act-broker-backend | tests/unit/dataactvalidator/test_fabs32_detached_award_financial_assistance_2.py | 1 | 2326 | from tests.unit.dataactcore.factories.staging import DetachedAwardFinancialAssistanceFactory
from tests.unit.dataactvalidator.utils import number_of_errors, query_columns
_FILE = 'fabs32_detached_award_financial_assistance_2'
def test_column_headers(database):
expected_subset = {'row_number', 'period_of_performance_star', 'uniqueid_AssistanceTransactionUniqueKey'}
actual = set(query_columns(_FILE, database))
assert expected_subset == actual
def test_success(database):
""" When provided, PeriodOfPerformanceStartDate must be a valid date between 19991001 and 20991231.
(i.e., a date between 10/01/1999 and 12/31/2099)
"""
det_award_1 = DetachedAwardFinancialAssistanceFactory(period_of_performance_star='20120725',
correction_delete_indicatr='c')
det_award_2 = DetachedAwardFinancialAssistanceFactory(period_of_performance_star=None,
correction_delete_indicatr=None)
det_award_3 = DetachedAwardFinancialAssistanceFactory(period_of_performance_star='5', correction_delete_indicatr='')
det_award_4 = DetachedAwardFinancialAssistanceFactory(period_of_performance_star='', correction_delete_indicatr='C')
# Ignore correction delete indicator of D
det_award_5 = DetachedAwardFinancialAssistanceFactory(period_of_performance_star='19990131',
correction_delete_indicatr='d')
errors = number_of_errors(_FILE, database, models=[det_award_1, det_award_2, det_award_3, det_award_4, det_award_5])
assert errors == 0
def test_failure(database):
""" When provided, PeriodOfPerformanceStartDate must be a valid date between 19991001 and 20991231.
(i.e., a date between 10/01/1999 and 12/31/2099)
"""
det_award_1 = DetachedAwardFinancialAssistanceFactory(period_of_performance_star='19990131',
correction_delete_indicatr='')
det_award_2 = DetachedAwardFinancialAssistanceFactory(period_of_performance_star='21000101',
correction_delete_indicatr='c')
errors = number_of_errors(_FILE, database, models=[det_award_1, det_award_2])
assert errors == 2
| cc0-1.0 |
turbomanage/training-data-analyst | blogs/goes16/maria/create_image.py | 2 | 4697 | #!/usr/bin/env python
"""
Copyright Google Inc. 2017
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
def create_local_snapshots(outdir, hurricane_file):
import shutil,os
import hurricanes.goes_to_jpeg as g2j
shutil.rmtree(outdir, ignore_errors=True)
os.mkdir(outdir)
with open(hurricane_file, 'r') as ifp:
for line in ifp:
dt, lat, lon = g2j.parse_line(line)
objectId = g2j.get_objectId_at(dt)
outfilename = os.path.join(
outdir,
'ir_{}{:02d}{:02d}{:02d}{:02d}.jpg'.format(
dt.year, dt.month, dt.day, dt.hour, dt.second))
jpgfile = g2j.goes_to_jpeg(objectId, lat, lon, None, outfilename)
break # take out this to process all the timestamps ...
def create_query(opts):
query = """
SELECT
name,
latitude,
longitude,
iso_time,
dist2land
FROM
`bigquery-public-data.noaa_hurricanes.hurricanes`
"""
clause = "WHERE season = '{}' ".format(opts.year)
if len(opts.hurricane) > 0:
clause += " AND name LIKE '%{}%' ".format(opts.hurricane.upper())
elif len(opts.basin) > 0:
clause += " AND basin = '{}' ".format(opts.basin.upper())
else:
raise ValueError("Need to specify either a hurricane or a basin")
return query + clause
def create_snapshots_on_cloud(bucket, project, runner, opts):
import datetime, os
import apache_beam as beam
import hurricanes.goes_to_jpeg as g2j
query = create_query(opts)
OUTPUT_DIR = 'gs://{}/hurricane/'.format(bucket)
options = {
'staging_location': os.path.join(OUTPUT_DIR, 'tmp', 'staging'),
'temp_location': os.path.join(OUTPUT_DIR, 'tmp'),
'job_name': 'maria-' + datetime.datetime.now().strftime('%y%m%d-%H%M%S'),
'project': project,
'max_num_workers': 12,
'setup_file': './setup.py',
'teardown_policy': 'TEARDOWN_ALWAYS',
'no_save_main_session': True
}
opts = beam.pipeline.PipelineOptions(flags=[], **options)
p = beam.Pipeline(runner, options=opts)
(p
| 'get_tracks' >> beam.io.Read(beam.io.BigQuerySource(query=query, use_standard_sql=True))
| 'loc_at_time' >> beam.Map(lambda rowdict: (
g2j.parse_timestamp(rowdict['iso_time']),
rowdict['name'].lower(),
rowdict['latitude'],
rowdict['longitude']))
| 'to_jpg' >> beam.Map(lambda (dt,name,lat,lon):
g2j.goes_to_jpeg(
g2j.get_objectId_at(dt),
lat, lon,
bucket,
'hurricane/images/{}/ir_{}{:02d}{:02d}{:02d}{:02d}.jpg'.format(
name, dt.year, dt.month, dt.day, dt.hour, dt.second)))
)
job = p.run()
if runner == 'DirectRunner':
job.wait_until_finish()
if __name__ == '__main__':
import argparse, logging
parser = argparse.ArgumentParser(description='Plot the landfall of Hurricane Maria')
parser.add_argument('--bucket', default='', help='Specify GCS bucket to run on cloud')
parser.add_argument('--project', default='', help='Specify GCP project to bill')
parser.add_argument('--outdir', default='', help='output dir if local')
parser.add_argument('--hurricane', default='', help='name of hurricane; if empty, uses basin')
parser.add_argument('--basin', default='', help='name of basin, e.g NA for North-Atlantic')
parser.add_argument('--year', required=True, help='year of named hurricane, e.g. 2017')
opts = parser.parse_args()
runner = 'DataflowRunner' # run on Cloud
#runner = 'DirectRunner' # run Beam on local machine, but write outputs to cloud
logging.basicConfig(level=getattr(logging, 'INFO', None))
if len(opts.bucket) > 0:
if len(opts.project) == 0:
raise ValueError("Please specify billed project")
logging.info('Running on cloud ...')
create_snapshots_on_cloud(opts.bucket, opts.project, runner, opts)
elif len(opts.outdir) > 0:
create_local_snapshots(opts.outdir, 'MARIA.csv')
else:
raise ValueError("Need to specify either outdir or bucket")
| apache-2.0 |
kkoksvik/FreeCAD | src/Mod/Cam/InitGui.py | 57 | 2807 | # Cam gui init module
# (c) 2003 Juergen Riegel
#
# Gathering all the information to start FreeCAD
# This is the second one of three init scripts, the third one
# runs when the gui is up
#***************************************************************************
#* (c) Juergen Riegel (juergen.riegel@web.de) 2002 *
#* *
#* This file is part of the FreeCAD CAx development system. *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* FreeCAD is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Lesser General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with FreeCAD; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#* Juergen Riegel 2002 *
#***************************************************************************/
class CamWorkbench ( Workbench ):
"Cam workbench object"
Icon = """
/* XPM */
static const char *Cam_Box[]={
"16 16 3 1",
". c None",
"# c #000000",
"a c #c6c642",
"................",
".......#######..",
"......#aaaaa##..",
".....#aaaaa###..",
"....#aaaaa##a#..",
"...#aaaaa##aa#..",
"..#aaaaa##aaa#..",
".########aaaa#..",
".#aaaaa#aaaaa#..",
".#aaaaa#aaaa##..",
".#aaaaa#aaa##...",
".#aaaaa#aa##....",
".#aaaaa#a##... .",
".#aaaaa###......",
".########.......",
"................"};
"""
MenuText = "Cam design"
ToolTip = "Cam"
def Initialize(self):
import CamGui
import Cam
def GetClassName(self):
return "CamGui::Workbench"
# No Workbench at the moment
Gui.addWorkbench(CamWorkbench())
| lgpl-2.1 |
msebire/intellij-community | python/lib/Lib/dummy_thread.py | 86 | 4494 | """Drop-in replacement for the thread module.
Meant to be used as a brain-dead substitute so that threaded code does
not need to be rewritten for when the thread module is not present.
Suggested usage is::
try:
import thread
except ImportError:
import dummy_thread as thread
"""
__author__ = "Brett Cannon"
__email__ = "brett@python.org"
# Exports only things specified by thread documentation
# (skipping obsolete synonyms allocate(), start_new(), exit_thread())
__all__ = ['error', 'start_new_thread', 'exit', 'get_ident', 'allocate_lock',
'interrupt_main', 'LockType']
import traceback as _traceback
import warnings
class error(Exception):
"""Dummy implementation of thread.error."""
def __init__(self, *args):
self.args = args
def start_new_thread(function, args, kwargs={}):
"""Dummy implementation of thread.start_new_thread().
Compatibility is maintained by making sure that ``args`` is a
tuple and ``kwargs`` is a dictionary. If an exception is raised
and it is SystemExit (which can be done by thread.exit()) it is
caught and nothing is done; all other exceptions are printed out
by using traceback.print_exc().
If the executed function calls interrupt_main the KeyboardInterrupt will be
raised when the function returns.
"""
if type(args) != type(tuple()):
raise TypeError("2nd arg must be a tuple")
if type(kwargs) != type(dict()):
raise TypeError("3rd arg must be a dict")
global _main
_main = False
try:
function(*args, **kwargs)
except SystemExit:
pass
except:
_traceback.print_exc()
_main = True
global _interrupt
if _interrupt:
_interrupt = False
raise KeyboardInterrupt
def exit():
"""Dummy implementation of thread.exit()."""
raise SystemExit
def get_ident():
"""Dummy implementation of thread.get_ident().
Since this module should only be used when threadmodule is not
available, it is safe to assume that the current process is the
only thread. Thus a constant can be safely returned.
"""
return -1
def allocate_lock():
"""Dummy implementation of thread.allocate_lock()."""
return LockType()
def stack_size(size=None):
"""Dummy implementation of thread.stack_size()."""
if size is not None:
raise error("setting thread stack size not supported")
return 0
class LockType(object):
"""Class implementing dummy implementation of thread.LockType.
Compatibility is maintained by maintaining self.locked_status
which is a boolean that stores the state of the lock. Pickling of
the lock, though, should not be done since if the thread module is
then used with an unpickled ``lock()`` from here problems could
occur from this class not having atomic methods.
"""
def __init__(self):
self.locked_status = False
def acquire(self, waitflag=None):
"""Dummy implementation of acquire().
For blocking calls, self.locked_status is automatically set to
True and returned appropriately based on value of
``waitflag``. If it is non-blocking, then the value is
actually checked and not set if it is already acquired. This
is all done so that threading.Condition's assert statements
aren't triggered and throw a little fit.
"""
if waitflag is None or waitflag:
self.locked_status = True
return True
else:
if not self.locked_status:
self.locked_status = True
return True
else:
return False
__enter__ = acquire
def __exit__(self, typ, val, tb):
self.release()
def release(self):
"""Release the dummy lock."""
# XXX Perhaps shouldn't actually bother to test? Could lead
# to problems for complex, threaded code.
if not self.locked_status:
raise error
self.locked_status = False
return True
def locked(self):
return self.locked_status
# Used to signal that interrupt_main was called in a "thread"
_interrupt = False
# True when not executing in a "thread"
_main = True
def interrupt_main():
"""Set _interrupt flag to True to have start_new_thread raise
KeyboardInterrupt upon exiting."""
if _main:
raise KeyboardInterrupt
else:
global _interrupt
_interrupt = True
| apache-2.0 |
duckinator/boreutils | test/test_basename.py | 1 | 2725 | """
Tests for POSIX-compatible `basename`.
https://pubs.opengroup.org/onlinepubs/9699919799/utilities/basename.html
"""
from helpers import check, check_version, run
def test_version():
"""Check that we're using Boreutil's implementation."""
assert check_version("basename")
def test_missing_args():
"""No args => error of the form `basename: ...`"""
assert run(["basename"]).stderr.startswith("basename:")
assert run(["basename"]).returncode > 0
def test_extra_args():
"""Too many args => error of the form `basename: ...`"""
assert run(["basename", "a", "b", "c"]).stderr.startswith("basename:")
assert run(["basename", "a", "b", "c"]).returncode > 0
def test_help():
"""Passing -h or --help => print help text."""
assert run(["basename", "-h"]).stdout.split(' ')[0] == 'Usage:'
assert run(["basename", "--help"]).stdout.split(' ')[0] == 'Usage:'
assert run(["basename", "-h"]).returncode > 0
assert run(["basename", "--help"]).returncode > 0
def test_main():
"""This was split into the test_step[1-6] functions below."""
pass
# Test the various steps:
def test_step1():
"""1. Empty string results in an empty string."""
assert check(["basename", ""]).stdout == "\n"
def test_step2():
"""2. We _do not_ skip steps 3-6 if given "//", so this should return "/".
If we do skip step 3-6, this should return "//"!
"""
assert check(["basename", "//"]).stdout == "/\n"
def test_step3():
"""3. If string is entirely slash characters, we get a single slash."""
assert check(["basename", "///"]).stdout == "/\n"
def test_step4():
"""4. Remove trailing slash characters."""
assert check(["basename", "owo/"]).stdout == "owo\n"
# ! Potential edge case if we change behavior for step 2.
assert check(["basename", "owo//"]).stdout == "owo\n"
assert check(["basename", "owo///"]).stdout == "owo\n"
def test_step5():
"""5. If there are remaining slash characters, remove everything up to
and including the last slash."""
assert check(["basename", "/a/b/c/d/owo"]).stdout == "owo\n"
# ! Potential edge case exercising steps 4+5 together.
assert check(["basename", "/a/b/c/d/owo///"]).stdout == "owo\n"
assert check(["basename", "///a/b/c/d/owo///"]).stdout == "owo\n"
def test_step6():
"""6. Remove suffix if it exists and isn't the whole string."""
assert check(["basename", "///a/b/owo.ext//", ".ext"]).stdout == "owo\n"
assert check(["basename", "///a/b/owo.ext2//", ".ext"]).stdout == "owo.ext2\n"
assert check(["basename", "///a/b/owo.ext", ".ext"]).stdout == "owo\n"
assert check(["basename", "///a/b/owo.ex", ".ext"]).stdout == "owo.ex\n"
| isc |
flavour/eden | modules/feedparser/datetimes/iso8601.py | 6 | 5630 | # Copyright 2010-2019 Kurt McKee <contactme@kurtmckee.org>
# Copyright 2002-2008 Mark Pilgrim
# All rights reserved.
#
# This file is a part of feedparser.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import absolute_import
from __future__ import unicode_literals
import re
import time
# ISO-8601 date parsing routines written by Fazal Majid.
# The ISO 8601 standard is very convoluted and irregular - a full ISO 8601
# parser is beyond the scope of feedparser and would be a worthwhile addition
# to the Python library.
# A single regular expression cannot parse ISO 8601 date formats into groups
# as the standard is highly irregular (for instance is 030104 2003-01-04 or
# 0301-04-01), so we use templates instead.
# Please note the order in templates is significant because we need a
# greedy match.
_iso8601_tmpl = [
'YYYY-?MM-?DD',
'YYYY-0MM?-?DD',
'YYYY-MM',
'YYYY-?OOO',
'YY-?MM-?DD',
'YY-?OOO',
'YYYY',
'-YY-?MM',
'-OOO',
'-YY',
'--MM-?DD',
'--MM',
'---DD',
'CC',
'',
]
_iso8601_re = [
tmpl.replace(
'YYYY', r'(?P<year>\d{4})').replace(
'YY', r'(?P<year>\d\d)').replace(
'MM', r'(?P<month>[01]\d)').replace(
'DD', r'(?P<day>[0123]\d)').replace(
'OOO', r'(?P<ordinal>[0123]\d\d)').replace(
'CC', r'(?P<century>\d\d$)')
+ r'(T?(?P<hour>\d{2}):(?P<minute>\d{2})'
+ r'(:(?P<second>\d{2}))?'
+ r'(\.(?P<fracsecond>\d+))?'
+ r'(?P<tz>[+-](?P<tzhour>\d{2})(:(?P<tzmin>\d{2}))?|Z)?)?'
for tmpl in _iso8601_tmpl]
try:
del tmpl
except NameError:
pass
_iso8601_matches = [re.compile(regex).match for regex in _iso8601_re]
try:
del regex
except NameError:
pass
def _parse_date_iso8601(date_string):
"""Parse a variety of ISO-8601-compatible formats like 20040105"""
m = None
for _iso8601_match in _iso8601_matches:
m = _iso8601_match(date_string)
if m:
break
if not m:
return
if m.span() == (0, 0):
return
params = m.groupdict()
ordinal = params.get('ordinal', 0)
if ordinal:
ordinal = int(ordinal)
else:
ordinal = 0
year = params.get('year', '--')
if not year or year == '--':
year = time.gmtime()[0]
elif len(year) == 2:
# ISO 8601 assumes current century, i.e. 93 -> 2093, NOT 1993
year = 100 * int(time.gmtime()[0] / 100) + int(year)
else:
year = int(year)
month = params.get('month', '-')
if not month or month == '-':
# ordinals are NOT normalized by mktime, we simulate them
# by setting month=1, day=ordinal
if ordinal:
month = 1
else:
month = time.gmtime()[1]
month = int(month)
day = params.get('day', 0)
if not day:
# see above
if ordinal:
day = ordinal
elif params.get('century', 0) or \
params.get('year', 0) or params.get('month', 0):
day = 1
else:
day = time.gmtime()[2]
else:
day = int(day)
# special case of the century - is the first year of the 21st century
# 2000 or 2001 ? The debate goes on...
if 'century' in params:
year = (int(params['century']) - 1) * 100 + 1
# in ISO 8601 most fields are optional
for field in ['hour', 'minute', 'second', 'tzhour', 'tzmin']:
if not params.get(field, None):
params[field] = 0
hour = int(params.get('hour', 0))
minute = int(params.get('minute', 0))
second = int(float(params.get('second', 0)))
# weekday is normalized by mktime(), we can ignore it
weekday = 0
daylight_savings_flag = -1
tm = [year, month, day, hour, minute, second, weekday,
ordinal, daylight_savings_flag]
# ISO 8601 time zone adjustments
tz = params.get('tz')
if tz and tz != 'Z':
if tz[0] == '-':
tm[3] += int(params.get('tzhour', 0))
tm[4] += int(params.get('tzmin', 0))
elif tz[0] == '+':
tm[3] -= int(params.get('tzhour', 0))
tm[4] -= int(params.get('tzmin', 0))
else:
return None
# Python's time.mktime() is a wrapper around the ANSI C mktime(3c)
# which is guaranteed to normalize d/m/y/h/m/s.
# Many implementations have bugs, but we'll pretend they don't.
return time.localtime(time.mktime(tuple(tm)))
| mit |
slayerjain/servo | tests/wpt/web-platform-tests/tools/webdriver/webdriver/webelement.py | 251 | 1846 | """Element-level WebDriver operations."""
import searchcontext
class WebElement(searchcontext.SearchContext):
"""Corresponds to a DOM element in the current page."""
def __init__(self, driver, id):
self._driver = driver
self._id = id
# Set value of mode used by SearchContext
self.mode = driver.mode
def execute(self, method, path, name, body=None):
"""Execute a command against this WebElement."""
return self._driver.execute(
method, '/element/%s%s' % (self._id, path), name, body)
def is_displayed(self):
"""Is this element displayed?"""
return self.execute('GET', '/displayed', 'isDisplayed')
def is_selected(self):
"""Is this checkbox, radio button, or option selected?"""
return self.execute('GET', '/selected', 'isSelected')
def get_attribute(self, name):
"""Get the value of an element property or attribute."""
return self.execute('GET', '/attribute/%s' % name, 'getElementAttribute')
@property
def text(self):
"""Get the visible text for this element."""
return self.execute('GET', '/text', 'text')
@property
def tag_name(self):
"""Get the tag name for this element"""
return self.execute('GET', '/name', 'getElementTagName')
def click(self):
"""Click on this element."""
return self.execute('POST', '/click', 'click')
def clear(self):
"""Clear the contents of the this text input."""
self.execute('POST', '/clear', 'clear')
def send_keys(self, keys):
"""Send keys to this text input or body element."""
if isinstance(keys, str):
keys = [keys]
self.execute('POST', '/value', 'sendKeys', {'value': keys})
def to_json(self):
return {'ELEMENT': self.id}
| mpl-2.0 |
PureNexusProject/android_kernel_asus_flo | tools/perf/scripts/python/net_dropmonitor.py | 4235 | 1554 | # Monitor the system for dropped packets and proudce a report of drop locations and counts
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
drop_log = {}
kallsyms = []
def get_kallsyms_table():
global kallsyms
try:
f = open("/proc/kallsyms", "r")
linecount = 0
for line in f:
linecount = linecount+1
f.seek(0)
except:
return
j = 0
for line in f:
loc = int(line.split()[0], 16)
name = line.split()[2]
j = j +1
if ((j % 100) == 0):
print "\r" + str(j) + "/" + str(linecount),
kallsyms.append({ 'loc': loc, 'name' : name})
print "\r" + str(j) + "/" + str(linecount)
kallsyms.sort()
return
def get_sym(sloc):
loc = int(sloc)
for i in kallsyms:
if (i['loc'] >= loc):
return (i['name'], i['loc']-loc)
return (None, 0)
def print_drop_table():
print "%25s %25s %25s" % ("LOCATION", "OFFSET", "COUNT")
for i in drop_log.keys():
(sym, off) = get_sym(i)
if sym == None:
sym = i
print "%25s %25s %25s" % (sym, off, drop_log[i])
def trace_begin():
print "Starting trace (Ctrl-C to dump results)"
def trace_end():
print "Gathering kallsyms data"
get_kallsyms_table()
print_drop_table()
# called from perf, when it finds a correspoinding event
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, protocol, location):
slocation = str(location)
try:
drop_log[slocation] = drop_log[slocation] + 1
except:
drop_log[slocation] = 1
| gpl-2.0 |
Yukarumya/Yukarum-Redfoxes | testing/marionette/harness/marionette_harness/tests/unit/test_errors.py | 1 | 2549 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import sys
from marionette_driver import errors
from marionette_harness import marionette_test
def fake_cause():
try:
raise ValueError("bar")
except ValueError as e:
return sys.exc_info()
message = "foo"
cause = fake_cause()
stacktrace = "first\nsecond"
class TestErrors(marionette_test.MarionetteTestCase):
def test_defaults(self):
exc = errors.MarionetteException()
self.assertIsNone(exc.message)
self.assertIsNone(exc.cause)
self.assertIsNone(exc.stacktrace)
def test_construction(self):
exc = errors.MarionetteException(
message=message, cause=cause, stacktrace=stacktrace)
self.assertEquals(exc.message, message)
self.assertEquals(exc.cause, cause)
self.assertEquals(exc.stacktrace, stacktrace)
def test_str(self):
exc = errors.MarionetteException(
message=message, cause=cause, stacktrace=stacktrace)
r = str(exc)
self.assertIn(message, r)
self.assertIn(", caused by {0!r}".format(cause[0]), r)
self.assertIn("\nstacktrace:\n\tfirst\n\tsecond", r)
def test_cause_string(self):
exc = errors.MarionetteException(cause="foo")
self.assertEqual(exc.cause, "foo")
r = str(exc)
self.assertIn(", caused by foo", r)
def test_cause_tuple(self):
exc = errors.MarionetteException(cause=cause)
self.assertEqual(exc.cause, cause)
r = str(exc)
self.assertIn(", caused by {0!r}".format(cause[0]), r)
class TestLookup(marionette_test.MarionetteTestCase):
def test_by_unknown_number(self):
self.assertEqual(errors.MarionetteException, errors.lookup(123456))
def test_by_known_string(self):
self.assertEqual(errors.NoSuchElementException,
errors.lookup("no such element"))
def test_by_unknown_string(self):
self.assertEqual(errors.MarionetteException, errors.lookup("barbera"))
def test_by_known_unicode_string(self):
self.assertEqual(errors.NoSuchElementException,
errors.lookup(u"no such element"))
class TestAllErrors(marionette_test.MarionetteTestCase):
def test_properties(self):
for exc in errors.es_:
self.assertTrue(hasattr(exc, "status"),
"expected exception to have attribute `status'")
| mpl-2.0 |
FederatedAI/FATE | python/federatedml/protobuf/generated/poisson_model_meta_pb2.py | 1 | 6300 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: poisson-model-meta.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='poisson-model-meta.proto',
package='com.webank.ai.fate.core.mlmodel.buffer',
syntax='proto3',
serialized_options=_b('B\025PoissonModelMetaProto'),
serialized_pb=_b('\n\x18poisson-model-meta.proto\x12&com.webank.ai.fate.core.mlmodel.buffer\"\xd4\x01\n\x10PoissonModelMeta\x12\x0f\n\x07penalty\x18\x01 \x01(\t\x12\x0b\n\x03tol\x18\x02 \x01(\x01\x12\r\n\x05\x61lpha\x18\x03 \x01(\x01\x12\x11\n\toptimizer\x18\x04 \x01(\t\x12\x12\n\nbatch_size\x18\x05 \x01(\x03\x12\x15\n\rlearning_rate\x18\x06 \x01(\x01\x12\x10\n\x08max_iter\x18\x07 \x01(\x03\x12\x12\n\nearly_stop\x18\x08 \x01(\t\x12\x15\n\rfit_intercept\x18\t \x01(\x08\x12\x18\n\x10\x65xposure_colname\x18\n \x01(\tB\x17\x42\x15PoissonModelMetaProtob\x06proto3')
)
_POISSONMODELMETA = _descriptor.Descriptor(
name='PoissonModelMeta',
full_name='com.webank.ai.fate.core.mlmodel.buffer.PoissonModelMeta',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='penalty', full_name='com.webank.ai.fate.core.mlmodel.buffer.PoissonModelMeta.penalty', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='tol', full_name='com.webank.ai.fate.core.mlmodel.buffer.PoissonModelMeta.tol', index=1,
number=2, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='alpha', full_name='com.webank.ai.fate.core.mlmodel.buffer.PoissonModelMeta.alpha', index=2,
number=3, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='optimizer', full_name='com.webank.ai.fate.core.mlmodel.buffer.PoissonModelMeta.optimizer', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='batch_size', full_name='com.webank.ai.fate.core.mlmodel.buffer.PoissonModelMeta.batch_size', index=4,
number=5, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='learning_rate', full_name='com.webank.ai.fate.core.mlmodel.buffer.PoissonModelMeta.learning_rate', index=5,
number=6, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='max_iter', full_name='com.webank.ai.fate.core.mlmodel.buffer.PoissonModelMeta.max_iter', index=6,
number=7, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='early_stop', full_name='com.webank.ai.fate.core.mlmodel.buffer.PoissonModelMeta.early_stop', index=7,
number=8, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='fit_intercept', full_name='com.webank.ai.fate.core.mlmodel.buffer.PoissonModelMeta.fit_intercept', index=8,
number=9, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='exposure_colname', full_name='com.webank.ai.fate.core.mlmodel.buffer.PoissonModelMeta.exposure_colname', index=9,
number=10, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=69,
serialized_end=281,
)
DESCRIPTOR.message_types_by_name['PoissonModelMeta'] = _POISSONMODELMETA
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
PoissonModelMeta = _reflection.GeneratedProtocolMessageType('PoissonModelMeta', (_message.Message,), dict(
DESCRIPTOR = _POISSONMODELMETA,
__module__ = 'poisson_model_meta_pb2'
# @@protoc_insertion_point(class_scope:com.webank.ai.fate.core.mlmodel.buffer.PoissonModelMeta)
))
_sym_db.RegisterMessage(PoissonModelMeta)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| apache-2.0 |
sergio-teruel/bank-payment | __unported__/account_banking/wizard/banking_transaction_wizard.py | 13 | 19538 | # -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2009 EduSense BV (<http://www.edusense.nl>).
# (C) 2011 - 2013 Therp BV (<http://therp.nl>).
# (C) 2011 Smile (<http://smile.fr>).
# All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import orm, fields
from openerp.tools.translate import _
"""
The banking transaction wizard is linked to a button in the statement line
tree view. It allows the user to undo the duplicate flag, select between
multiple matches or select a manual match.
"""
class banking_transaction_wizard(orm.TransientModel):
_name = 'banking.transaction.wizard'
_description = 'Match transaction'
def create(self, cr, uid, vals, context=None):
"""
Make sure that the statement line has an import transaction
"""
res = super(banking_transaction_wizard, self).create(
cr, uid, vals, context=context)
if res and vals.get('statement_line_id'):
line_pool = self.pool.get('account.bank.statement.line')
line_pool.create_instant_transaction(
cr, uid, vals['statement_line_id'], context=context)
return res
def create_act_window(self, cr, uid, ids, nodestroy=True, context=None):
"""
Return a popup window for this model
"""
if isinstance(ids, (int, long)):
ids = [ids]
return {
'name': self._description,
'view_type': 'form',
'view_mode': 'form',
'res_model': self._name,
'domain': [],
'context': context,
'type': 'ir.actions.act_window',
'target': 'new',
'res_id': ids[0],
'nodestroy': nodestroy,
}
def trigger_match(self, cr, uid, ids, context=None):
"""
Call the automatic matching routine for one or
more bank transactions
"""
if isinstance(ids, (int, long)):
ids = [ids]
import_transaction_obj = self.pool.get('banking.import.transaction')
trans_id = self.read(
cr, uid, ids[0], ['import_transaction_id'],
context=context)['import_transaction_id'][0] # many2one tuple
import_transaction_obj.match(cr, uid, [trans_id], context=context)
return self.create_act_window(cr, uid, ids, context=None)
def write(self, cr, uid, ids, vals, context=None):
"""
Implement a trigger to retrieve the corresponding move line
when the invoice_id changes
"""
statement_line_obj = self.pool.get('account.bank.statement.line')
transaction_obj = self.pool.get('banking.import.transaction')
if not vals or not ids:
return True
wiz = self.browse(cr, uid, ids[0], context=context)
# The following fields get never written
# they are just triggers for manual matching
# which populates regular fields on the transaction
manual_invoice_ids = vals.pop('manual_invoice_ids', [])
manual_move_line_ids = vals.pop('manual_move_line_ids', [])
res = super(banking_transaction_wizard, self).write(
cr, uid, ids, vals, context=context)
wiz.refresh()
# Process the logic of the written values
# An invoice is selected from multiple candidates
if vals and 'invoice_id' in vals:
if (wiz.import_transaction_id.match_type == 'invoice' and
wiz.import_transaction_id.invoice_id):
found = False
# the current value might apply
if (wiz.move_line_id and wiz.move_line_id.invoice and
wiz.move_line_id.invoice == wiz.invoice_id):
found = True
else:
# Otherwise, retrieve the move line for this invoice
# Given the arity of the relation, there is are always
# multiple possibilities but the move lines here are
# prefiltered for having account_id.type payable/receivable
# and the regular invoice workflow should only come up with
# one of those only.
for move_line in wiz.import_transaction_id.move_line_ids:
if (move_line.invoice ==
wiz.import_transaction_id.invoice_id):
transaction_obj.write(
cr, uid, wiz.import_transaction_id.id,
{'move_line_id': move_line.id, },
context=context
)
statement_line_obj.write(
cr, uid,
wiz.import_transaction_id.statement_line_id.id,
{
'partner_id': (
move_line.partner_id.id or False),
'account_id': move_line.account_id.id,
}, context=context)
found = True
break
# Cannot match the invoice
if not found:
orm.except_orm(
_("No entry found for the selected invoice"),
_("No entry found for the selected invoice. " +
"Try manual reconciliation."))
if manual_move_line_ids or manual_invoice_ids:
move_line_obj = self.pool.get('account.move.line')
invoice_obj = self.pool.get('account.invoice')
statement_line_obj = self.pool.get('account.bank.statement.line')
# Rewrite *2many directive notation
if manual_invoice_ids:
manual_invoice_ids = (
[i[1] for i in manual_invoice_ids if i[0] == 4] +
[j for i in manual_invoice_ids if i[0] == 6 for j in i[2]])
if manual_move_line_ids:
manual_move_line_ids = (
[i[1] for i in manual_move_line_ids if i[0] == 4] +
[j for i in manual_move_line_ids
if i[0] == 6 for j in i[2]])
for wiz in self.browse(cr, uid, ids, context=context):
# write can be called multiple times for the same values
# that doesn't hurt above, but it does here
if wiz.match_type and (
len(manual_move_line_ids) > 1 or
len(manual_invoice_ids) > 1):
continue
todo = []
for invoice in invoice_obj.browse(
cr, uid, manual_invoice_ids, context=context):
found_move_line = False
if invoice.move_id:
for line in invoice.move_id.line_id:
if line.account_id.type in ('receivable',
'payable'):
todo.append((invoice.id, line.id))
found_move_line = True
break
if not found_move_line:
raise orm.except_orm(
_("Cannot select for reconcilion"),
_("No entry found for the selected invoice. "))
for move_line_id in manual_move_line_ids:
todo_entry = [False, move_line_id]
move_line = move_line_obj.read(
cr,
uid,
move_line_id,
['invoice'],
context=context
)
if move_line['invoice']:
todo_entry[0] = move_line['invoice'][0]
todo.append(todo_entry)
while todo:
todo_entry = todo.pop()
move_line = move_line_obj.browse(
cr, uid, todo_entry[1], context)
transaction_id = wiz.import_transaction_id.id
statement_line_id = wiz.statement_line_id.id
if len(todo) > 0:
statement_line_id = wiz.statement_line_id.split_off(
move_line.debit or -move_line.credit)[0]
transaction_id = statement_line_obj.browse(
cr,
uid,
statement_line_id,
context=context
).import_transaction_id.id
vals = {
'move_line_id': todo_entry[1],
'move_line_ids': [(6, 0, [todo_entry[1]])],
'invoice_id': todo_entry[0],
'invoice_ids': [
(6, 0, [todo_entry[0]] if todo_entry[0] else [])
],
'match_type': 'manual',
}
transaction_obj.clear_and_write(
cr, uid, transaction_id, vals, context=context)
st_line_vals = {
'account_id': move_line_obj.read(
cr, uid, todo_entry[1],
['account_id'], context=context)['account_id'][0],
}
if todo_entry[0]:
st_line_vals['partner_id'] = invoice_obj.browse(
cr, uid, todo_entry[0], context=context
).partner_id.commercial_partner_id.id
statement_line_obj.write(
cr, uid, statement_line_id,
st_line_vals, context=context)
return res
def trigger_write(self, cr, uid, ids, context=None):
"""
Just a button that triggers a write.
"""
return self.create_act_window(cr, uid, ids, context=None)
def disable_match(self, cr, uid, ids, context=None):
"""
Clear manual and automatic match information
"""
settings_pool = self.pool.get('account.banking.account.settings')
statement_pool = self.pool.get('account.bank.statement.line')
if isinstance(ids, (int, long)):
ids = [ids]
for wiz in self.browse(cr, uid, ids, context=context):
# Get the bank account setting record, to reset the account
account_id = False
journal_id = wiz.statement_line_id.statement_id.journal_id.id
setting_ids = settings_pool.find(
cr, uid, journal_id, context=context
)
# Restore partner id from the bank account or else reset
partner_id = False
if (wiz.statement_line_id.partner_bank_id and
wiz.statement_line_id.partner_bank_id.partner_id):
partner_id = (
wiz.statement_line_id.partner_bank_id.partner_id.id
)
wiz.write({'partner_id': partner_id})
bank_partner = False
if partner_id:
bank_partner = wiz.statement_line_id.partner_bank_id.partner_id
if wiz.amount < 0:
if bank_partner:
account_id = bank_partner.\
def_journal_account_bank_decr()[bank_partner.id]
elif setting_ids:
account_id = settings_pool.browse(
cr, uid, setting_ids[0],
context=context).default_credit_account_id.id
else:
if bank_partner:
account_id = bank_partner.\
def_journal_account_bank_incr()[bank_partner.id]
elif setting_ids:
account_id = settings_pool.browse(
cr, uid, setting_ids[0],
context=context).default_debit_account_id.id
if account_id:
wiz.statement_line_id.write({'account_id': account_id})
if wiz.statement_line_id:
# delete splits causing an unsplit if this is a split
# transaction
statement_pool.unlink(
cr,
uid,
statement_pool.search(
cr, uid,
[('parent_id', '=', wiz.statement_line_id.id)],
context=context
),
context=context
)
if wiz.import_transaction_id:
wiz.import_transaction_id.clear_and_write()
return self.create_act_window(cr, uid, ids, context=None)
def reverse_duplicate(self, cr, uid, ids, context=None):
if isinstance(ids, (int, long)):
ids = [ids]
transaction_obj = self.pool.get('banking.import.transaction')
for wiz in self.read(
cr, uid, ids, ['duplicate', 'import_transaction_id'],
context=context):
transaction_obj.write(
cr, uid, wiz['import_transaction_id'][0],
{'duplicate': not wiz['duplicate']}, context=context)
return self.create_act_window(cr, uid, ids, context=None)
def button_done(self, cr, uid, ids, context=None):
return {'type': 'ir.actions.act_window_close'}
_columns = {
'name': fields.char('Name', size=64),
'statement_line_id': fields.many2one(
'account.bank.statement.line', 'Statement line',
),
'amount': fields.related(
'statement_line_id', 'amount', type='float',
string="Amount", readonly=True),
'date': fields.related(
'statement_line_id', 'date', type='date',
string="Date", readonly=True),
'ref': fields.related(
'statement_line_id', 'ref', type='char', size=32,
string="Reference", readonly=True),
'message': fields.related(
'statement_line_id', 'import_transaction_id', 'message',
type='char', size=1024,
string="Message", readonly=True),
'partner_id': fields.related(
'statement_line_id', 'partner_id',
type='many2one', relation='res.partner',
string="Partner", readonly=True),
'statement_line_parent_id': fields.related(
'statement_line_id', 'parent_id', type='many2one',
relation='account.bank.statement.line', readonly=True),
'import_transaction_id': fields.related(
'statement_line_id', 'import_transaction_id',
string="Import transaction",
type='many2one', relation='banking.import.transaction'),
'residual': fields.related(
'import_transaction_id', 'residual', type='float',
string='Residual', readonly=True),
'writeoff_account_id': fields.related(
'import_transaction_id', 'writeoff_account_id',
type='many2one', relation='account.account',
string='Write-off account'),
'invoice_ids': fields.related(
'import_transaction_id', 'invoice_ids', string="Matching invoices",
type='many2many', relation='account.invoice'),
'invoice_id': fields.related(
'import_transaction_id',
'invoice_id',
string="Invoice to reconcile",
type='many2one',
relation='account.invoice',
),
'move_line_ids': fields.related(
'import_transaction_id', 'move_line_ids', string="Entry lines",
type='many2many', relation='account.move.line'),
'move_line_id': fields.related(
'import_transaction_id', 'move_line_id', string="Entry line",
type='many2one', relation='account.move.line'),
'duplicate': fields.related(
'import_transaction_id',
'duplicate',
string='Flagged as duplicate',
type='boolean',
),
'match_multi': fields.related(
'import_transaction_id', 'match_multi',
type="boolean", string='Multiple matches'),
'match_type': fields.related(
'import_transaction_id',
'match_type',
type='selection',
selection=[
('move', 'Move'),
('invoice', 'Invoice'),
('payment', 'Payment line'),
('payment_order', 'Payment order'),
('storno', 'Storno'),
('manual', 'Manual'),
('payment_manual', 'Payment line (manual)'),
('payment_order_manual', 'Payment order (manual)'),
],
string='Match type',
readonly=True,
),
'manual_invoice_ids': fields.many2many(
'account.invoice',
'banking_transaction_wizard_account_invoice_rel',
'wizard_id', 'invoice_id', string='Match one or more invoices',
domain=[('reconciled', '=', False)]),
'manual_move_line_ids': fields.many2many(
'account.move.line',
'banking_transaction_wizard_account_move_line_rel',
'wizard_id', 'move_line_id', string='Or match one or more entries',
domain=[('account_id.reconcile', '=', True),
('reconcile_id', '=', False)]),
'payment_option': fields.related(
'import_transaction_id',
'payment_option',
string='Payment Difference',
type='selection',
required=True,
selection=[
('without_writeoff', 'Keep Open'),
('with_writeoff', 'Reconcile Payment Balance')
],
),
'writeoff_analytic_id': fields.related(
'import_transaction_id', 'writeoff_analytic_id',
type='many2one', relation='account.analytic.account',
string='Write-off analytic account'),
'analytic_account_id': fields.related(
'statement_line_id', 'analytic_account_id',
type='many2one', relation='account.analytic.account',
string="Analytic Account"),
'move_currency_amount': fields.related(
'import_transaction_id',
'move_currency_amount',
type='float',
string='Match Currency Amount',
readonly=True,
),
}
| agpl-3.0 |
mvesper/invenio | modules/bibcirculation/lib/bibcirculation_cern_ldap.py | 3 | 3885 | # This file is part of Invenio.
# Copyright (C) 2009, 2010, 2011, 2014 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Invenio LDAP interface for BibCirculation at CERN. """
from time import sleep
from thread import get_ident
from invenio.config import CFG_CERN_SITE
try:
import ldap
import ldap.filter
#from invenio.external_authentication_cern_wrapper import _cern_nice_soap_auth
CFG_BIBCIRCULATION_HAS_LDAP = CFG_CERN_SITE
except (ImportError, IOError):
CFG_BIBCIRCULATION_HAS_LDAP = False
# from base64 import decodestring
# This is the old configuration
# CFG_CERN_LDAP_URI = "ldaps://ldap.cern.ch:636"
# CFG_CERN_LDAP_BIND = "n=%s,ou=users,o=cern,c=ch"
# CFG_CERN_LDAP_BASE = "O=CERN,C=CH"
CFG_CERN_LDAP_URI = "ldap://xldap.cern.ch:389"
#CFG_CERN_LDAP_BASE = "ou=users,ou=organic units,dc=cern,dc=ch"
CFG_CERN_LDAP_BASE = "dc=cern,dc=ch"
# This one also works but the previous one is recommended
# CFG_CERN_LDAP_URI = "ldap://ldap.cern.ch"
# CFG_CERN_LDAP_BIND = "cn=%s,ou=users,ou=organic units,dc=cern,dc=ch"
# CFG_CERN_LDAP_BASE = "O=CERN,C=CH"
_ldap_connection_pool = {}
def _cern_ldap_login():
#user, password = decodestring(_cern_nice_soap_auth).split(':', 1)
connection = ldap.initialize(CFG_CERN_LDAP_URI)
#connection.simple_bind(CFG_CERN_LDAP_BIND % user, password)
return connection
def get_user_info_from_ldap(nickname="", email="", ccid=""):
"""Query the CERN LDAP server for information about a user.
Return a dictionary of information"""
try:
connection = _ldap_connection_pool[get_ident()]
except KeyError:
connection = _ldap_connection_pool[get_ident()] = _cern_ldap_login()
if nickname:
query = '(displayName=%s)' % ldap.filter.escape_filter_chars(nickname)
elif email:
query = '(mail=%s)' % ldap.filter.escape_filter_chars(email)
elif ccid:
query = '(employeeID=%s)' % ldap.filter.escape_filter_chars(str(ccid))
else:
return {}
query_filter = "(& %s (| (employeetype=primary) (employeetype=external) (employeetype=ExCern) ) )" % query
try:
results = connection.search_st(CFG_CERN_LDAP_BASE, ldap.SCOPE_SUBTREE,
query_filter, timeout=5)
except ldap.LDAPError:
## Mmh.. connection error? Let's reconnect at least once just in case
sleep(1)
connection = _ldap_connection_pool[get_ident()] = _cern_ldap_login()
results = connection.search_st(CFG_CERN_LDAP_BASE, ldap.SCOPE_SUBTREE,
query_filter, timeout=5)
if len(results) > 1:
## Maybe one ExCern and primary at the same time. In this case let's give precedence to ExCern
types = {}
for result in results:
if result[1]['employeeType'][0] == 'Primary' and result[1]['userAccountControl'][0] == '512':
return result[1]
types[result[1]['employeeType'][0]] = result[1]
if 'ExCern' in types and 'Primary' in types:
return types['ExCern']
if 'Primary' in types:
return types['Primary']
## Ok otherwise we just pick up something :-)
if results:
return results[0][1]
else:
return {}
| gpl-2.0 |
samsu/neutron | plugins/vmware/dbexts/servicerouter.py | 14 | 1025 | # Copyright 2013 VMware, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from neutron.db import l3_dvr_db
from neutron.plugins.vmware.extensions import servicerouter
class ServiceRouter_mixin(l3_dvr_db.L3_NAT_with_dvr_db_mixin):
"""Mixin class to enable service router support."""
extra_attributes = (
l3_dvr_db.L3_NAT_with_dvr_db_mixin.extra_attributes + [{
'name': servicerouter.SERVICE_ROUTER,
'default': False
}])
| apache-2.0 |
frreiss/tensorflow-fred | tensorflow/python/eager/memory_tests/memory_test.py | 15 | 3925 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for memory leaks in eager execution.
It is possible that this test suite will eventually become flaky due to taking
too long to run (since the tests iterate many times), but for now they are
helpful for finding memory leaks since not all PyObject leaks are found by
introspection (test_util decorators). Please be careful adding new tests here.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import backprop
from tensorflow.python.eager import def_function
from tensorflow.python.eager import test
from tensorflow.python.eager.memory_tests import memory_test_util
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients as gradient_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.variables import Variable
class MemoryTest(test.TestCase):
def testMemoryLeakAnonymousVariable(self):
if not memory_test_util.memory_profiler_is_available():
self.skipTest("memory_profiler required to run this test")
def f():
inputs = Variable(array_ops.zeros([32, 100], dtypes.float32))
del inputs
memory_test_util.assert_no_leak(f, num_iters=10000)
def testMemoryLeakInFunction(self):
if not memory_test_util.memory_profiler_is_available():
self.skipTest("memory_profiler required to run this test")
def f():
@def_function.function
def graph(x):
return x * x + x
graph(constant_op.constant(42))
memory_test_util.assert_no_leak(
f, num_iters=1000, increase_threshold_absolute_mb=30)
@test_util.assert_no_new_pyobjects_executing_eagerly
def testNestedFunctionsDeleted(self):
@def_function.function
def f(x):
@def_function.function
def my_sin(x):
return math_ops.sin(x)
return my_sin(x)
x = constant_op.constant(1.)
with backprop.GradientTape() as t1:
t1.watch(x)
with backprop.GradientTape() as t2:
t2.watch(x)
y = f(x)
dy_dx = t2.gradient(y, x)
dy2_dx2 = t1.gradient(dy_dx, x)
self.assertAllClose(0.84147096, y.numpy()) # sin(1.)
self.assertAllClose(0.54030230, dy_dx.numpy()) # cos(1.)
self.assertAllClose(-0.84147096, dy2_dx2.numpy()) # -sin(1.)
def testMemoryLeakInGlobalGradientRegistry(self):
# Past leak: b/139819011
if not memory_test_util.memory_profiler_is_available():
self.skipTest("memory_profiler required to run this test")
def f():
@def_function.function(autograph=False)
def graph(x):
@def_function.function(autograph=False)
def cubed(a):
return a * a * a
y = cubed(x)
# To ensure deleting the function does not affect the gradient
# computation.
del cubed
return gradient_ops.gradients(gradient_ops.gradients(y, x), x)
return graph(constant_op.constant(1.5))[0].numpy()
memory_test_util.assert_no_leak(
f, num_iters=300, increase_threshold_absolute_mb=50)
if __name__ == "__main__":
test.main()
| apache-2.0 |
albertomurillo/ansible | lib/ansible/modules/remote_management/redfish/redfish_config.py | 2 | 5834 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2017-2018 Dell EMC Inc.
# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: redfish_config
version_added: "2.7"
short_description: Manages Out-Of-Band controllers using Redfish APIs
description:
- Builds Redfish URIs locally and sends them to remote OOB controllers to
set or update a configuration attribute.
- Manages BIOS configuration settings.
- Manages OOB controller configuration settings.
options:
category:
required: true
description:
- Category to execute on OOB controller
command:
required: true
description:
- List of commands to execute on OOB controller
baseuri:
required: true
description:
- Base URI of OOB controller
username:
required: true
description:
- User for authentication with OOB controller
version_added: "2.8"
password:
required: true
description:
- Password for authentication with OOB controller
bios_attribute_name:
required: false
description:
- name of BIOS attribute to update
default: 'null'
version_added: "2.8"
bios_attribute_value:
required: false
description:
- value of BIOS attribute to update
default: 'null'
version_added: "2.8"
timeout:
description:
- Timeout in seconds for URL requests to OOB controller
default: 10
type: int
version_added: "2.8"
author: "Jose Delarosa (@jose-delarosa)"
'''
EXAMPLES = '''
- name: Set BootMode to UEFI
redfish_config:
category: Systems
command: SetBiosAttributes
bios_attribute_name: BootMode
bios_attribute_value: Uefi
baseuri: "{{ baseuri }}"
username: "{{ username }}"
password: "{{ password }}"
- name: Set BootMode to Legacy BIOS
redfish_config:
category: Systems
command: SetBiosAttributes
bios_attribute_name: BootMode
bios_attribute_value: Bios
baseuri: "{{ baseuri }}"
username: "{{ username }}"
password: "{{ password }}"
- name: Enable PXE Boot for NIC1
redfish_config:
category: Systems
command: SetBiosAttributes
bios_attribute_name: PxeDev1EnDis
bios_attribute_value: Enabled
baseuri: "{{ baseuri }}"
username: "{{ username }}"
password: "{{ password }}"
- name: Set BIOS default settings with a timeout of 20 seconds
redfish_config:
category: Systems
command: SetBiosDefaultSettings
baseuri: "{{ baseuri }}"
username: "{{ username }}"
password: "{{ password }}"
timeout: 20
'''
RETURN = '''
msg:
description: Message with action result or error description
returned: always
type: str
sample: "Action was successful"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.redfish_utils import RedfishUtils
from ansible.module_utils._text import to_native
# More will be added as module features are expanded
CATEGORY_COMMANDS_ALL = {
"Systems": ["SetBiosDefaultSettings", "SetBiosAttributes"]
}
def main():
result = {}
module = AnsibleModule(
argument_spec=dict(
category=dict(required=True),
command=dict(required=True, type='list'),
baseuri=dict(required=True),
username=dict(required=True),
password=dict(required=True, no_log=True),
bios_attribute_name=dict(default='null'),
bios_attribute_value=dict(default='null'),
timeout=dict(type='int', default=10)
),
supports_check_mode=False
)
category = module.params['category']
command_list = module.params['command']
# admin credentials used for authentication
creds = {'user': module.params['username'],
'pswd': module.params['password']}
# timeout
timeout = module.params['timeout']
# BIOS attributes to update
bios_attributes = {'bios_attr_name': module.params['bios_attribute_name'],
'bios_attr_value': module.params['bios_attribute_value']}
# Build root URI
root_uri = "https://" + module.params['baseuri']
rf_utils = RedfishUtils(creds, root_uri, timeout, module)
# Check that Category is valid
if category not in CATEGORY_COMMANDS_ALL:
module.fail_json(msg=to_native("Invalid Category '%s'. Valid Categories = %s" % (category, CATEGORY_COMMANDS_ALL.keys())))
# Check that all commands are valid
for cmd in command_list:
# Fail if even one command given is invalid
if cmd not in CATEGORY_COMMANDS_ALL[category]:
module.fail_json(msg=to_native("Invalid Command '%s'. Valid Commands = %s" % (cmd, CATEGORY_COMMANDS_ALL[category])))
# Organize by Categories / Commands
if category == "Systems":
# execute only if we find a System resource
result = rf_utils._find_systems_resource()
if result['ret'] is False:
module.fail_json(msg=to_native(result['msg']))
for command in command_list:
if command == "SetBiosDefaultSettings":
result = rf_utils.set_bios_default_settings()
elif command == "SetBiosAttributes":
result = rf_utils.set_bios_attributes(bios_attributes)
# Return data back or fail with proper message
if result['ret'] is True:
module.exit_json(changed=result['changed'], msg=to_native(result['msg']))
else:
module.fail_json(msg=to_native(result['msg']))
if __name__ == '__main__':
main()
| gpl-3.0 |
frossigneux/blazar | climate/openstack/common/fileutils.py | 2 | 3855 | # Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import errno
import os
import tempfile
from climate.openstack.common import excutils
from climate.openstack.common import log as logging
LOG = logging.getLogger(__name__)
_FILE_CACHE = {}
def ensure_tree(path):
"""Create a directory (and any ancestor directories required)
:param path: Directory to create
"""
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST:
if not os.path.isdir(path):
raise
else:
raise
def read_cached_file(filename, force_reload=False):
"""Read from a file if it has been modified.
:param force_reload: Whether to reload the file.
:returns: A tuple with a boolean specifying if the data is fresh
or not.
"""
global _FILE_CACHE
if force_reload and filename in _FILE_CACHE:
del _FILE_CACHE[filename]
reloaded = False
mtime = os.path.getmtime(filename)
cache_info = _FILE_CACHE.setdefault(filename, {})
if not cache_info or mtime > cache_info.get('mtime', 0):
LOG.debug("Reloading cached file %s" % filename)
with open(filename) as fap:
cache_info['data'] = fap.read()
cache_info['mtime'] = mtime
reloaded = True
return (reloaded, cache_info['data'])
def delete_if_exists(path, remove=os.unlink):
"""Delete a file, but ignore file not found error.
:param path: File to delete
:param remove: Optional function to remove passed path
"""
try:
remove(path)
except OSError as e:
if e.errno != errno.ENOENT:
raise
@contextlib.contextmanager
def remove_path_on_error(path, remove=delete_if_exists):
"""Protect code that wants to operate on PATH atomically.
Any exception will cause PATH to be removed.
:param path: File to work with
:param remove: Optional function to remove passed path
"""
try:
yield
except Exception:
with excutils.save_and_reraise_exception():
remove(path)
def file_open(*args, **kwargs):
"""Open file
see built-in file() documentation for more details
Note: The reason this is kept in a separate module is to easily
be able to provide a stub module that doesn't alter system
state at all (for unit tests)
"""
return file(*args, **kwargs)
def write_to_tempfile(content, path=None, suffix='', prefix='tmp'):
"""Create temporary file or use existing file.
This util is needed for creating temporary file with
specified content, suffix and prefix. If path is not None,
it will be used for writing content. If the path doesn't
exist it'll be created.
:param content: content for temporary file.
:param path: same as parameter 'dir' for mkstemp
:param suffix: same as parameter 'suffix' for mkstemp
:param prefix: same as parameter 'prefix' for mkstemp
For example: it can be used in database tests for creating
configuration files.
"""
if path:
ensure_tree(path)
(fd, path) = tempfile.mkstemp(suffix=suffix, dir=path, prefix=prefix)
try:
os.write(fd, content)
finally:
os.close(fd)
return path
| apache-2.0 |
msmolens/VTK | IO/EnSight/Testing/Python/EnSightGoldElements.py | 20 | 1238 | #!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# create a rendering window and renderer
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren1)
renWin.StereoCapableWindowOn()
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
reader = vtk.vtkGenericEnSightReader()
# Make sure all algorithms use the composite data pipeline
cdp = vtk.vtkCompositeDataPipeline()
reader.SetDefaultExecutivePrototype(cdp)
reader.SetCaseFileName("" + str(VTK_DATA_ROOT) + "/Data/EnSight/elements.case")
geom0 = vtk.vtkGeometryFilter()
geom0.SetInputConnection(reader.GetOutputPort())
mapper0 = vtk.vtkHierarchicalPolyDataMapper()
mapper0.SetInputConnection(geom0.GetOutputPort())
mapper0.SetColorModeToMapScalars()
mapper0.SetScalarModeToUsePointFieldData()
mapper0.ColorByArrayComponent("pointScalars",0)
mapper0.SetScalarRange(0,112)
actor0 = vtk.vtkActor()
actor0.SetMapper(mapper0)
# assign our actor to the renderer
ren1.AddActor(actor0)
# enable user interface interactor
iren.Initialize()
# prevent the tk window from showing up then start the event loop
reader.SetDefaultExecutivePrototype(None)
# --- end of script --
| bsd-3-clause |
stackforge/networking-mlnx | networking_mlnx/db/models/sdn_maintenance_db.py | 2 | 1213 | # Copyright 2016 Mellanox Technologies, Ltd
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.db import model_base
import sqlalchemy as sa
from networking_mlnx.plugins.ml2.drivers.sdn import constants as sdn_const
class SdnMaintenance(model_base.BASEV2, model_base.HasId):
__tablename__ = 'sdn_maintenance'
state = sa.Column(sa.Enum(sdn_const.PENDING, sdn_const.PROCESSING),
nullable=False)
processing_operation = sa.Column(sa.String(70))
lock_updated = sa.Column(sa.TIMESTAMP, nullable=False,
server_default=sa.func.now(),
onupdate=sa.func.now())
| apache-2.0 |
Lafaiet/Drop_Pobre | client/pynotify.py | 2 | 2373 | import sys
import time
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
from client_config import *
from xmlrpc import *
from threading import Thread
import os
def get_dir(path):
p=path.split('/')
size=len(p)
return p[size-2],p[size-1]
class custom_handler(FileSystemEventHandler):
def on_created(self, event):
info=get_dir(event.src_path)
notify_server("created", info[0],info[1])
def on_deleted(self, event):
info=get_dir(event.src_path)
notify_server("deleted", info[0],info[1])
def on_modified(self, event):
r=os.stat(event.src_path)
info=get_dir(event.src_path)
notify_server("modified", info[0],info[1])
def sync(client, password,observer):
server2 = xmlrpclib.Server('https://localhost:8443')
while True:
time.sleep(time_to_sync)
r = server2.client_sync(client, password)
#print r
if len(r) > 1:
observer.stop()
f=path + "/" + r[1] + "/" + r[2]
if r[0] == "D":
print "Deleted!"
os.system("rm %s" % (f))
pass
if r[0] == "C":
print "Created!"
sim_key,iv,f_h=r[3],r[4],r[5]
f_server(f, sim_key, iv)
if test_integrity(f, f_h):
print "Successfully Transfered!"
else:
print "An error has occurred!"
if r[0] == "M":
print "Modified!!"
sim_key,iv,f_h=r[3],r[4],r[5]
f_server(f, sim_key, iv)
if test_integrity(f, f_h):
print "Successfully Transfered!"
else:
print "An error has occurred!"
observer = Observer()
observer.schedule(custom_handler(),path,recursive=True)
observer.start()
else:
print "No changes!"
def run_observer():
observer = Observer()
observer.schedule(custom_handler(),path,recursive=True)
observer.start()
print "Client Daemon running... "
th=Thread( target=sync, args = (client,password,observer ) )
th.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
| gpl-2.0 |
chosak/fdic-call-reports | web/settings/dev.py | 1 | 1865 | import os, socket
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
SECRET_KEY = '4h=6tp37*3c&92f$y00%!r4+s!l*w*iij07n-cjqk&tuf=%3wa'
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
INSTALLED_APPS = (
'django.contrib.contenttypes',
'django.contrib.staticfiles',
'debug_toolbar',
'reports',
)
MIDDLEWARE_CLASSES = (
'debug_toolbar.middleware.DebugToolbarMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'reports.urls'
WSGI_APPLICATION = 'wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'django',
'HOST': os.environ['DJANGO_RDS_HOST'],
'USER': os.environ['DJANGO_RDS_USER'],
'PASSWORD': os.environ['DJANGO_RDS_PASSWORD'],
'PORT': 3306,
},
}
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = '/static/'
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'standard': {
'format': '%(asctime)s [%(levelname)s] %(name)s: %(message)s'
},
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'standard',
},
},
'loggers': {
'reports': {
'handlers': ['console'],
'level': 'DEBUG',
}
},
}
try:
socket.create_connection(('localhost', 11211))
except socket.error:
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
}
}
else:
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': ['localhost:11211',]
}
}
| mit |
vishnugonela/boto | boto/cloudhsm/layer1.py | 135 | 16187 | # Copyright (c) 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import boto
from boto.compat import json
from boto.connection import AWSQueryConnection
from boto.regioninfo import RegionInfo
from boto.exception import JSONResponseError
from boto.cloudhsm import exceptions
class CloudHSMConnection(AWSQueryConnection):
"""
AWS CloudHSM Service
"""
APIVersion = "2014-05-30"
DefaultRegionName = "us-east-1"
DefaultRegionEndpoint = "cloudhsm.us-east-1.amazonaws.com"
ServiceName = "CloudHSM"
TargetPrefix = "CloudHsmFrontendService"
ResponseError = JSONResponseError
_faults = {
"InvalidRequestException": exceptions.InvalidRequestException,
"CloudHsmServiceException": exceptions.CloudHsmServiceException,
"CloudHsmInternalException": exceptions.CloudHsmInternalException,
}
def __init__(self, **kwargs):
region = kwargs.pop('region', None)
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
if 'host' not in kwargs or kwargs['host'] is None:
kwargs['host'] = region.endpoint
super(CloudHSMConnection, self).__init__(**kwargs)
self.region = region
def _required_auth_capability(self):
return ['hmac-v4']
def create_hapg(self, label):
"""
Creates a high-availability partition group. A high-
availability partition group is a group of partitions that
spans multiple physical HSMs.
:type label: string
:param label: The label of the new high-availability partition group.
"""
params = {'Label': label, }
return self.make_request(action='CreateHapg',
body=json.dumps(params))
def create_hsm(self, subnet_id, ssh_key, iam_role_arn, subscription_type,
eni_ip=None, external_id=None, client_token=None,
syslog_ip=None):
"""
Creates an uninitialized HSM instance. Running this command
provisions an HSM appliance and will result in charges to your
AWS account for the HSM.
:type subnet_id: string
:param subnet_id: The identifier of the subnet in your VPC in which to
place the HSM.
:type ssh_key: string
:param ssh_key: The SSH public key to install on the HSM.
:type eni_ip: string
:param eni_ip: The IP address to assign to the HSM's ENI.
:type iam_role_arn: string
:param iam_role_arn: The ARN of an IAM role to enable the AWS CloudHSM
service to allocate an ENI on your behalf.
:type external_id: string
:param external_id: The external ID from **IamRoleArn**, if present.
:type subscription_type: string
:param subscription_type: The subscription type.
:type client_token: string
:param client_token: A user-defined token to ensure idempotence.
Subsequent calls to this action with the same token will be
ignored.
:type syslog_ip: string
:param syslog_ip: The IP address for the syslog monitoring server.
"""
params = {
'SubnetId': subnet_id,
'SshKey': ssh_key,
'IamRoleArn': iam_role_arn,
'SubscriptionType': subscription_type,
}
if eni_ip is not None:
params['EniIp'] = eni_ip
if external_id is not None:
params['ExternalId'] = external_id
if client_token is not None:
params['ClientToken'] = client_token
if syslog_ip is not None:
params['SyslogIp'] = syslog_ip
return self.make_request(action='CreateHsm',
body=json.dumps(params))
def create_luna_client(self, certificate, label=None):
"""
Creates an HSM client.
:type label: string
:param label: The label for the client.
:type certificate: string
:param certificate: The contents of a Base64-Encoded X.509 v3
certificate to be installed on the HSMs used by this client.
"""
params = {'Certificate': certificate, }
if label is not None:
params['Label'] = label
return self.make_request(action='CreateLunaClient',
body=json.dumps(params))
def delete_hapg(self, hapg_arn):
"""
Deletes a high-availability partition group.
:type hapg_arn: string
:param hapg_arn: The ARN of the high-availability partition group to
delete.
"""
params = {'HapgArn': hapg_arn, }
return self.make_request(action='DeleteHapg',
body=json.dumps(params))
def delete_hsm(self, hsm_arn):
"""
Deletes an HSM. Once complete, this operation cannot be undone
and your key material cannot be recovered.
:type hsm_arn: string
:param hsm_arn: The ARN of the HSM to delete.
"""
params = {'HsmArn': hsm_arn, }
return self.make_request(action='DeleteHsm',
body=json.dumps(params))
def delete_luna_client(self, client_arn):
"""
Deletes a client.
:type client_arn: string
:param client_arn: The ARN of the client to delete.
"""
params = {'ClientArn': client_arn, }
return self.make_request(action='DeleteLunaClient',
body=json.dumps(params))
def describe_hapg(self, hapg_arn):
"""
Retrieves information about a high-availability partition
group.
:type hapg_arn: string
:param hapg_arn: The ARN of the high-availability partition group to
describe.
"""
params = {'HapgArn': hapg_arn, }
return self.make_request(action='DescribeHapg',
body=json.dumps(params))
def describe_hsm(self, hsm_arn=None, hsm_serial_number=None):
"""
Retrieves information about an HSM. You can identify the HSM
by its ARN or its serial number.
:type hsm_arn: string
:param hsm_arn: The ARN of the HSM. Either the HsmArn or the
SerialNumber parameter must be specified.
:type hsm_serial_number: string
:param hsm_serial_number: The serial number of the HSM. Either the
HsmArn or the HsmSerialNumber parameter must be specified.
"""
params = {}
if hsm_arn is not None:
params['HsmArn'] = hsm_arn
if hsm_serial_number is not None:
params['HsmSerialNumber'] = hsm_serial_number
return self.make_request(action='DescribeHsm',
body=json.dumps(params))
def describe_luna_client(self, client_arn=None,
certificate_fingerprint=None):
"""
Retrieves information about an HSM client.
:type client_arn: string
:param client_arn: The ARN of the client.
:type certificate_fingerprint: string
:param certificate_fingerprint: The certificate fingerprint.
"""
params = {}
if client_arn is not None:
params['ClientArn'] = client_arn
if certificate_fingerprint is not None:
params['CertificateFingerprint'] = certificate_fingerprint
return self.make_request(action='DescribeLunaClient',
body=json.dumps(params))
def get_config(self, client_arn, client_version, hapg_list):
"""
Gets the configuration files necessary to connect to all high
availability partition groups the client is associated with.
:type client_arn: string
:param client_arn: The ARN of the client.
:type client_version: string
:param client_version: The client version.
:type hapg_list: list
:param hapg_list: A list of ARNs that identify the high-availability
partition groups that are associated with the client.
"""
params = {
'ClientArn': client_arn,
'ClientVersion': client_version,
'HapgList': hapg_list,
}
return self.make_request(action='GetConfig',
body=json.dumps(params))
def list_available_zones(self):
"""
Lists the Availability Zones that have available AWS CloudHSM
capacity.
"""
params = {}
return self.make_request(action='ListAvailableZones',
body=json.dumps(params))
def list_hapgs(self, next_token=None):
"""
Lists the high-availability partition groups for the account.
This operation supports pagination with the use of the
NextToken member. If more results are available, the NextToken
member of the response contains a token that you pass in the
next call to ListHapgs to retrieve the next set of items.
:type next_token: string
:param next_token: The NextToken value from a previous call to
ListHapgs. Pass null if this is the first call.
"""
params = {}
if next_token is not None:
params['NextToken'] = next_token
return self.make_request(action='ListHapgs',
body=json.dumps(params))
def list_hsms(self, next_token=None):
"""
Retrieves the identifiers of all of the HSMs provisioned for
the current customer.
This operation supports pagination with the use of the
NextToken member. If more results are available, the NextToken
member of the response contains a token that you pass in the
next call to ListHsms to retrieve the next set of items.
:type next_token: string
:param next_token: The NextToken value from a previous call to
ListHsms. Pass null if this is the first call.
"""
params = {}
if next_token is not None:
params['NextToken'] = next_token
return self.make_request(action='ListHsms',
body=json.dumps(params))
def list_luna_clients(self, next_token=None):
"""
Lists all of the clients.
This operation supports pagination with the use of the
NextToken member. If more results are available, the NextToken
member of the response contains a token that you pass in the
next call to ListLunaClients to retrieve the next set of
items.
:type next_token: string
:param next_token: The NextToken value from a previous call to
ListLunaClients. Pass null if this is the first call.
"""
params = {}
if next_token is not None:
params['NextToken'] = next_token
return self.make_request(action='ListLunaClients',
body=json.dumps(params))
def modify_hapg(self, hapg_arn, label=None, partition_serial_list=None):
"""
Modifies an existing high-availability partition group.
:type hapg_arn: string
:param hapg_arn: The ARN of the high-availability partition group to
modify.
:type label: string
:param label: The new label for the high-availability partition group.
:type partition_serial_list: list
:param partition_serial_list: The list of partition serial numbers to
make members of the high-availability partition group.
"""
params = {'HapgArn': hapg_arn, }
if label is not None:
params['Label'] = label
if partition_serial_list is not None:
params['PartitionSerialList'] = partition_serial_list
return self.make_request(action='ModifyHapg',
body=json.dumps(params))
def modify_hsm(self, hsm_arn, subnet_id=None, eni_ip=None,
iam_role_arn=None, external_id=None, syslog_ip=None):
"""
Modifies an HSM.
:type hsm_arn: string
:param hsm_arn: The ARN of the HSM to modify.
:type subnet_id: string
:param subnet_id: The new identifier of the subnet that the HSM is in.
:type eni_ip: string
:param eni_ip: The new IP address for the elastic network interface
attached to the HSM.
:type iam_role_arn: string
:param iam_role_arn: The new IAM role ARN.
:type external_id: string
:param external_id: The new external ID.
:type syslog_ip: string
:param syslog_ip: The new IP address for the syslog monitoring server.
"""
params = {'HsmArn': hsm_arn, }
if subnet_id is not None:
params['SubnetId'] = subnet_id
if eni_ip is not None:
params['EniIp'] = eni_ip
if iam_role_arn is not None:
params['IamRoleArn'] = iam_role_arn
if external_id is not None:
params['ExternalId'] = external_id
if syslog_ip is not None:
params['SyslogIp'] = syslog_ip
return self.make_request(action='ModifyHsm',
body=json.dumps(params))
def modify_luna_client(self, client_arn, certificate):
"""
Modifies the certificate used by the client.
This action can potentially start a workflow to install the
new certificate on the client's HSMs.
:type client_arn: string
:param client_arn: The ARN of the client.
:type certificate: string
:param certificate: The new certificate for the client.
"""
params = {
'ClientArn': client_arn,
'Certificate': certificate,
}
return self.make_request(action='ModifyLunaClient',
body=json.dumps(params))
def make_request(self, action, body):
headers = {
'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action),
'Host': self.region.endpoint,
'Content-Type': 'application/x-amz-json-1.1',
'Content-Length': str(len(body)),
}
http_request = self.build_base_http_request(
method='POST', path='/', auth_path='/', params={},
headers=headers, data=body)
response = self._mexe(http_request, sender=None,
override_num_retries=10)
response_body = response.read().decode('utf-8')
boto.log.debug(response_body)
if response.status == 200:
if response_body:
return json.loads(response_body)
else:
json_body = json.loads(response_body)
fault_name = json_body.get('__type', None)
exception_class = self._faults.get(fault_name, self.ResponseError)
raise exception_class(response.status, response.reason,
body=json_body)
| mit |
pnichols104/python-koans | python2/koans/about_scope.py | 100 | 2871 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
import jims
import joes
counter = 0 # Global
class AboutScope(Koan):
#
# NOTE:
# Look in jims.py and joes.py to see definitions of Dog used
# for this set of tests
#
def test_dog_is_not_available_in_the_current_scope(self):
try:
fido = Dog()
except Exception as ex:
self.assertMatch(__, ex[0])
def test_you_can_reference_nested_classes_using_the_scope_operator(self):
fido = jims.Dog()
# name 'jims' module name is taken from jims.py filename
rover = joes.Dog()
self.assertEqual(__, fido.identify())
self.assertEqual(__, rover.identify())
self.assertEqual(____, type(fido) == type(rover))
self.assertEqual(____, jims.Dog == joes.Dog)
# ------------------------------------------------------------------
class str(object):
pass
def test_bare_bones_class_names_do_not_assume_the_current_scope(self):
self.assertEqual(____, AboutScope.str == str)
def test_nested_string_is_not_the_same_as_the_system_string(self):
self.assertEqual(____, self.str == type("HI"))
def test_str_without_self_prefix_stays_in_the_global_scope(self):
self.assertEqual(____, str == type("HI"))
# ------------------------------------------------------------------
PI = 3.1416
def test_constants_are_defined_with_an_initial_uppercase_letter(self):
self.assertAlmostEqual(_____, self.PI)
# Note, floating point numbers in python are not precise.
# assertAlmostEqual will check that it is 'close enough'
def test_constants_are_assumed_by_convention_only(self):
self.PI = "rhubarb"
self.assertEqual(_____, self.PI)
# There aren't any real constants in python. Its up to the developer
# to keep to the convention and not modify them.
# ------------------------------------------------------------------
def increment_using_local_counter(self, counter):
counter = counter + 1
def increment_using_global_counter(self):
global counter
counter = counter + 1
def test_incrementing_with_local_counter(self):
global counter
start = counter
self.increment_using_local_counter(start)
self.assertEqual(____, counter == start + 1)
def test_incrementing_with_global_counter(self):
global counter
start = counter
self.increment_using_global_counter()
self.assertEqual(____, counter == start + 1)
# ------------------------------------------------------------------
global deadly_bingo
deadly_bingo = [4, 8, 15, 16, 23, 42]
def test_global_attributes_can_be_created_in_the_middle_of_a_class(self):
self.assertEqual(__, deadly_bingo[5])
| mit |
CristianBB/SickRage | sickbeard/providers/hounddawgs.py | 2 | 7436 | # Author: Idan Gutman
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
import re
import traceback
from sickbeard import logger
from sickbeard import tvcache
from sickbeard.bs4_parser import BS4Parser
from sickbeard.providers import generic
class HoundDawgsProvider(generic.TorrentProvider):
def __init__(self):
generic.TorrentProvider.__init__(self, "HoundDawgs")
self.username = None
self.password = None
self.ratio = None
self.minseed = None
self.minleech = None
self.cache = HoundDawgsCache(self)
self.urls = {'base_url': 'https://hounddawgs.org/',
'search': 'https://hounddawgs.org/torrents.php',
'login': 'https://hounddawgs.org/login.php'}
self.url = self.urls['base_url']
self.search_params = {
"filter_cat[85]": 1,
"filter_cat[58]": 1,
"filter_cat[57]": 1,
"filter_cat[74]": 1,
"filter_cat[92]": 1,
"filter_cat[93]": 1,
"order_by": "s3",
"order_way": "desc",
"type": '',
"userid": '',
"searchstr": '',
"searchimdb": '',
"searchtags": ''
}
def _doLogin(self):
login_params = {'username': self.username,
'password': self.password,
'keeplogged': 'on',
'login': 'Login'}
self.getURL(self.urls['base_url'], timeout=30)
response = self.getURL(self.urls['login'], post_data=login_params, timeout=30)
if not response:
logger.log(u"Unable to connect to provider", logger.WARNING)
return False
if re.search('Dit brugernavn eller kodeord er forkert.', response) \
or re.search('<title>Login :: HoundDawgs</title>', response) \
or re.search('Dine cookies er ikke aktiveret.', response):
logger.log(u"Invalid username or password. Check your settings", logger.WARNING)
return False
return True
def _doSearch(self, search_strings, search_mode='eponly', epcount=0, age=0, epObj=None):
results = []
items = {'Season': [], 'Episode': [], 'RSS': []}
if not self._doLogin():
return results
for mode in search_strings.keys():
logger.log(u"Search Mode: %s" % mode, logger.DEBUG)
for search_string in search_strings[mode]:
if mode != 'RSS':
logger.log(u"Search string: %s " % search_string, logger.DEBUG)
self.search_params['searchstr'] = search_string
data = self.getURL(self.urls['search'], params=self.search_params)
strTableStart = "<table class=\"torrent_table"
startTableIndex = data.find(strTableStart)
trimmedData = data[startTableIndex:]
if not trimmedData:
continue
try:
with BS4Parser(trimmedData, features=["html5lib", "permissive"]) as html:
result_table = html.find('table', {'id': 'torrent_table'})
if not result_table:
logger.log(u"Data returned from provider does not contain any torrents", logger.DEBUG)
continue
result_tbody = result_table.find('tbody')
entries = result_tbody.contents
del entries[1::2]
for result in entries[1:]:
torrent = result.find_all('td')
if len(torrent) <= 1:
break
allAs = (torrent[1]).find_all('a')
try:
# link = self.urls['base_url'] + allAs[2].attrs['href']
# url = result.find('td', attrs={'class': 'quickdownload'}).find('a')
title = allAs[2].string
# Trimming title so accepted by scene check(Feature has been rewuestet i forum)
title = title.replace("custom.", "")
title = title.replace("CUSTOM.", "")
title = title.replace("Custom.", "")
title = title.replace("dk", "")
title = title.replace("DK", "")
title = title.replace("Dk", "")
title = title.replace("subs.", "")
title = title.replace("SUBS.", "")
title = title.replace("Subs.", "")
download_url = self.urls['base_url']+allAs[0].attrs['href']
# FIXME
size = -1
seeders = 1
leechers = 0
except (AttributeError, TypeError):
continue
if not title or not download_url:
continue
# Filter unseeded torrent
# if seeders < self.minseed or leechers < self.minleech:
# if mode != 'RSS':
# logger.log(u"Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format(title, seeders, leechers), logger.DEBUG)
# continue
item = title, download_url, size, seeders, leechers
if mode != 'RSS':
logger.log(u"Found result: %s " % title, logger.DEBUG)
items[mode].append(item)
except Exception, e:
logger.log(u"Failed parsing provider. Traceback: %s" % traceback.format_exc(), logger.ERROR)
# For each search mode sort all the items by seeders if available
items[mode].sort(key=lambda tup: tup[3], reverse=True)
results += items[mode]
return results
def seedRatio(self):
return self.ratio
class HoundDawgsCache(tvcache.TVCache):
def __init__(self, provider_obj):
tvcache.TVCache.__init__(self, provider_obj)
# only poll HoundDawgs every 20 minutes max
self.minTime = 20
def _getRSSData(self):
search_strings = {'RSS': ['']}
return {'entries': self.provider._doSearch(search_strings)}
provider = HoundDawgsProvider()
| gpl-3.0 |
venmo/ansible | lib/ansible/plugins/action/group_by.py | 172 | 1401 | # Copyright 2012, Jeroen Hoekx <jeroen@hoekx.be>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.errors import *
from ansible.plugins.action import ActionBase
class ActionModule(ActionBase):
''' Create inventory groups based on variables '''
### We need to be able to modify the inventory
BYPASS_HOST_LOOP = True
TRANSFERS_FILES = False
def run(self, tmp=None, task_vars=dict()):
if not 'key' in self._task.args:
return dict(failed=True, msg="the 'key' param is required when using group_by")
group_name = self._task.args.get('key')
group_name = group_name.replace(' ','-')
return dict(changed=True, add_group=group_name)
| gpl-3.0 |
mrquim/mrquimrepo | script.module.pycryptodome/lib/Crypto/SelfTest/PublicKey/test_ElGamal.py | 5 | 8650 | # -*- coding: utf-8 -*-
#
# SelfTest/PublicKey/test_ElGamal.py: Self-test for the ElGamal primitive
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""Self-test suite for Crypto.PublicKey.ElGamal"""
__revision__ = "$Id$"
import unittest
from Crypto.SelfTest.st_common import list_test_cases, a2b_hex, b2a_hex
from Crypto import Random
from Crypto.PublicKey import ElGamal
from Crypto.Util.number import bytes_to_long
from Crypto.Util.py3compat import *
class ElGamalTest(unittest.TestCase):
#
# Test vectors
#
# There seem to be no real ElGamal test vectors available in the
# public domain. The following test vectors have been generated
# with libgcrypt 1.5.0.
#
# Encryption
tve=[
{
# 256 bits
'p' :'BA4CAEAAED8CBE952AFD2126C63EB3B345D65C2A0A73D2A3AD4138B6D09BD933',
'g' :'05',
'y' :'60D063600ECED7C7C55146020E7A31C4476E9793BEAED420FEC9E77604CAE4EF',
'x' :'1D391BA2EE3C37FE1BA175A69B2C73A11238AD77675932',
'k' :'F5893C5BAB4131264066F57AB3D8AD89E391A0B68A68A1',
'pt' :'48656C6C6F207468657265',
'ct1':'32BFD5F487966CEA9E9356715788C491EC515E4ED48B58F0F00971E93AAA5EC7',
'ct2':'7BE8FBFF317C93E82FCEF9BD515284BA506603FEA25D01C0CB874A31F315EE68'
},
{
# 512 bits
'p' :'F1B18AE9F7B4E08FDA9A04832F4E919D89462FD31BF12F92791A93519F75076D6CE3942689CDFF2F344CAFF0F82D01864F69F3AECF566C774CBACF728B81A227',
'g' :'07',
'y' :'688628C676E4F05D630E1BE39D0066178CA7AA83836B645DE5ADD359B4825A12B02EF4252E4E6FA9BEC1DB0BE90F6D7C8629CABB6E531F472B2664868156E20C',
'x' :'14E60B1BDFD33436C0DA8A22FDC14A2CCDBBED0627CE68',
'k' :'38DBF14E1F319BDA9BAB33EEEADCAF6B2EA5250577ACE7',
'pt' :'48656C6C6F207468657265',
'ct1':'290F8530C2CC312EC46178724F196F308AD4C523CEABB001FACB0506BFED676083FE0F27AC688B5C749AB3CB8A80CD6F7094DBA421FB19442F5A413E06A9772B',
'ct2':'1D69AAAD1DC50493FB1B8E8721D621D683F3BF1321BE21BC4A43E11B40C9D4D9C80DE3AAC2AB60D31782B16B61112E68220889D53C4C3136EE6F6CE61F8A23A0'
}
]
# Signature
tvs=[
{
# 256 bits
'p' :'D2F3C41EA66530838A704A48FFAC9334F4701ECE3A97CEE4C69DD01AE7129DD7',
'g' :'05',
'y' :'C3F9417DC0DAFEA6A05C1D2333B7A95E63B3F4F28CC962254B3256984D1012E7',
'x' :'165E4A39BE44D5A2D8B1332D416BC559616F536BC735BB',
'k' :'C7F0C794A7EAD726E25A47FF8928013680E73C51DD3D7D99BFDA8F492585928F',
'h' :'48656C6C6F207468657265',
'sig1':'35CA98133779E2073EF31165AFCDEB764DD54E96ADE851715495F9C635E1E7C2',
'sig2':'0135B88B1151279FE5D8078D4FC685EE81177EE9802AB123A73925FC1CB059A7',
},
{
# 512 bits
'p' :'E24CF3A4B8A6AF749DCA6D714282FE4AABEEE44A53BB6ED15FBE32B5D3C3EF9CC4124A2ECA331F3C1C1B667ACA3766825217E7B5F9856648D95F05330C6A19CF',
'g' :'0B',
'y' :'2AD3A1049CA5D4ED207B2431C79A8719BB4073D4A94E450EA6CEE8A760EB07ADB67C0D52C275EE85D7B52789061EE45F2F37D9B2AE522A51C28329766BFE68AC',
'x' :'16CBB4F46D9ECCF24FF9F7E63CAA3BD8936341555062AB',
'k' :'8A3D89A4E429FD2476D7D717251FB79BF900FFE77444E6BB8299DC3F84D0DD57ABAB50732AE158EA52F5B9E7D8813E81FD9F79470AE22F8F1CF9AEC820A78C69',
'h' :'48656C6C6F207468657265',
'sig1':'BE001AABAFFF976EC9016198FBFEA14CBEF96B000CCC0063D3324016F9E91FE80D8F9325812ED24DDB2B4D4CF4430B169880B3CE88313B53255BD4EC0378586F',
'sig2':'5E266F3F837BA204E3BBB6DBECC0611429D96F8C7CE8F4EFDF9D4CB681C2A954468A357BF4242CEC7418B51DFC081BCD21299EF5B5A0DDEF3A139A1817503DDE',
}
]
def test_generate_180(self):
self._test_random_key(180)
def test_encryption(self):
for tv in self.tve:
d = self.convert_tv(tv, True)
key = ElGamal.construct(d['key'])
ct = key._encrypt(d['pt'], d['k'])
self.assertEquals(ct[0], d['ct1'])
self.assertEquals(ct[1], d['ct2'])
def test_decryption(self):
for tv in self.tve:
d = self.convert_tv(tv, True)
key = ElGamal.construct(d['key'])
pt = key._decrypt((d['ct1'], d['ct2']))
self.assertEquals(pt, d['pt'])
def test_signing(self):
for tv in self.tvs:
d = self.convert_tv(tv, True)
key = ElGamal.construct(d['key'])
sig1, sig2 = key._sign(d['h'], d['k'])
self.assertEquals(sig1, d['sig1'])
self.assertEquals(sig2, d['sig2'])
def test_verification(self):
for tv in self.tvs:
d = self.convert_tv(tv, True)
key = ElGamal.construct(d['key'])
# Positive test
res = key._verify( d['h'], (d['sig1'],d['sig2']) )
self.failUnless(res)
# Negative test
res = key._verify( d['h'], (d['sig1']+1,d['sig2']) )
self.failIf(res)
def test_bad_key3(self):
tup = tup0 = list(self.convert_tv(self.tvs[0], 1)['key'])[:3]
tup[0] += 1 # p += 1 (not prime)
self.assertRaises(ValueError, ElGamal.construct, tup)
tup = tup0
tup[1] = 1 # g = 1
self.assertRaises(ValueError, ElGamal.construct, tup)
tup = tup0
tup[2] = tup[0]*2 # y = 2*p
self.assertRaises(ValueError, ElGamal.construct, tup)
def test_bad_key4(self):
tup = tup0 = list(self.convert_tv(self.tvs[0], 1)['key'])
tup[3] += 1 # x += 1
self.assertRaises(ValueError, ElGamal.construct, tup)
def convert_tv(self, tv, as_longs=0):
"""Convert a test vector from textual form (hexadecimal ascii
to either integers or byte strings."""
key_comps = 'p','g','y','x'
tv2 = {}
for c in tv.keys():
tv2[c] = a2b_hex(tv[c])
if as_longs or c in key_comps or c in ('sig1','sig2'):
tv2[c] = bytes_to_long(tv2[c])
tv2['key']=[]
for c in key_comps:
tv2['key'] += [tv2[c]]
del tv2[c]
return tv2
def _test_random_key(self, bits):
elgObj = ElGamal.generate(bits, Random.new().read)
self._check_private_key(elgObj)
self._exercise_primitive(elgObj)
pub = elgObj.publickey()
self._check_public_key(pub)
self._exercise_public_primitive(elgObj)
def _check_private_key(self, elgObj):
# Check capabilities
self.failUnless(elgObj.has_private())
# Sanity check key data
self.failUnless(1<elgObj.g<(elgObj.p-1))
self.assertEquals(pow(elgObj.g, elgObj.p-1, elgObj.p), 1)
self.failUnless(1<elgObj.x<(elgObj.p-1))
self.assertEquals(pow(elgObj.g, elgObj.x, elgObj.p), elgObj.y)
def _check_public_key(self, elgObj):
# Check capabilities
self.failIf(elgObj.has_private())
# Sanity check key data
self.failUnless(1<elgObj.g<(elgObj.p-1))
self.assertEquals(pow(elgObj.g, elgObj.p-1, elgObj.p), 1)
def _exercise_primitive(self, elgObj):
# Test encryption/decryption
plaintext = 127218
ciphertext = elgObj._encrypt(plaintext, 123456789L)
plaintextP = elgObj._decrypt(ciphertext)
self.assertEquals(plaintext, plaintextP)
# Test signature/verification
signature = elgObj._sign(plaintext, 987654321L)
elgObj._verify(plaintext, signature)
def _exercise_public_primitive(self, elgObj):
plaintext = 92987276
ciphertext = elgObj._encrypt(plaintext, 123456789L)
def get_tests(config={}):
tests = []
tests += list_test_cases(ElGamalTest)
return tests
if __name__ == '__main__':
suite = lambda: unittest.TestSuite(get_tests())
unittest.main(defaultTest='suite')
| gpl-2.0 |
pbaesse/Sissens | lib/python2.7/site-packages/sqlalchemy/ext/horizontal_shard.py | 32 | 4814 | # ext/horizontal_shard.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Horizontal sharding support.
Defines a rudimental 'horizontal sharding' system which allows a Session to
distribute queries and persistence operations across multiple databases.
For a usage example, see the :ref:`examples_sharding` example included in
the source distribution.
"""
from .. import util
from ..orm.session import Session
from ..orm.query import Query
__all__ = ['ShardedSession', 'ShardedQuery']
class ShardedQuery(Query):
def __init__(self, *args, **kwargs):
super(ShardedQuery, self).__init__(*args, **kwargs)
self.id_chooser = self.session.id_chooser
self.query_chooser = self.session.query_chooser
self._shard_id = None
def set_shard(self, shard_id):
"""return a new query, limited to a single shard ID.
all subsequent operations with the returned query will
be against the single shard regardless of other state.
"""
q = self._clone()
q._shard_id = shard_id
return q
def _execute_and_instances(self, context):
def iter_for_shard(shard_id):
context.attributes['shard_id'] = shard_id
result = self._connection_from_session(
mapper=self._mapper_zero(),
shard_id=shard_id).execute(
context.statement,
self._params)
return self.instances(result, context)
if self._shard_id is not None:
return iter_for_shard(self._shard_id)
else:
partial = []
for shard_id in self.query_chooser(self):
partial.extend(iter_for_shard(shard_id))
# if some kind of in memory 'sorting'
# were done, this is where it would happen
return iter(partial)
def get(self, ident, **kwargs):
if self._shard_id is not None:
return super(ShardedQuery, self).get(ident)
else:
ident = util.to_list(ident)
for shard_id in self.id_chooser(self, ident):
o = self.set_shard(shard_id).get(ident, **kwargs)
if o is not None:
return o
else:
return None
class ShardedSession(Session):
def __init__(self, shard_chooser, id_chooser, query_chooser, shards=None,
query_cls=ShardedQuery, **kwargs):
"""Construct a ShardedSession.
:param shard_chooser: A callable which, passed a Mapper, a mapped
instance, and possibly a SQL clause, returns a shard ID. This id
may be based off of the attributes present within the object, or on
some round-robin scheme. If the scheme is based on a selection, it
should set whatever state on the instance to mark it in the future as
participating in that shard.
:param id_chooser: A callable, passed a query and a tuple of identity
values, which should return a list of shard ids where the ID might
reside. The databases will be queried in the order of this listing.
:param query_chooser: For a given Query, returns the list of shard_ids
where the query should be issued. Results from all shards returned
will be combined together into a single listing.
:param shards: A dictionary of string shard names
to :class:`~sqlalchemy.engine.Engine` objects.
"""
super(ShardedSession, self).__init__(query_cls=query_cls, **kwargs)
self.shard_chooser = shard_chooser
self.id_chooser = id_chooser
self.query_chooser = query_chooser
self.__binds = {}
self.connection_callable = self.connection
if shards is not None:
for k in shards:
self.bind_shard(k, shards[k])
def connection(self, mapper=None, instance=None, shard_id=None, **kwargs):
if shard_id is None:
shard_id = self.shard_chooser(mapper, instance)
if self.transaction is not None:
return self.transaction.connection(mapper, shard_id=shard_id)
else:
return self.get_bind(
mapper,
shard_id=shard_id,
instance=instance
).contextual_connect(**kwargs)
def get_bind(self, mapper, shard_id=None,
instance=None, clause=None, **kw):
if shard_id is None:
shard_id = self.shard_chooser(mapper, instance, clause=clause)
return self.__binds[shard_id]
def bind_shard(self, shard_id, bind):
self.__binds[shard_id] = bind
| gpl-3.0 |
adrienbrault/home-assistant | tests/components/smartthings/test_config_flow.py | 6 | 30216 | """Tests for the SmartThings config flow module."""
from unittest.mock import AsyncMock, Mock, patch
from uuid import uuid4
from aiohttp import ClientResponseError
from pysmartthings import APIResponseError
from pysmartthings.installedapp import format_install_url
from homeassistant import data_entry_flow
from homeassistant.components.smartthings import smartapp
from homeassistant.components.smartthings.const import (
CONF_APP_ID,
CONF_INSTALLED_APP_ID,
CONF_LOCATION_ID,
DOMAIN,
)
from homeassistant.config import async_process_ha_core_config
from homeassistant.const import (
CONF_ACCESS_TOKEN,
CONF_CLIENT_ID,
CONF_CLIENT_SECRET,
HTTP_FORBIDDEN,
HTTP_NOT_FOUND,
HTTP_UNAUTHORIZED,
)
from tests.common import MockConfigEntry
async def test_import_shows_user_step(hass):
"""Test import source shows the user form."""
# Webhook confirmation shown
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "import"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["description_placeholders"][
"webhook_url"
] == smartapp.get_webhook_url(hass)
async def test_entry_created(hass, app, app_oauth_client, location, smartthings_mock):
"""Test local webhook, new app, install event creates entry."""
token = str(uuid4())
installed_app_id = str(uuid4())
refresh_token = str(uuid4())
smartthings_mock.apps.return_value = []
smartthings_mock.create_app.return_value = (app, app_oauth_client)
smartthings_mock.locations.return_value = [location]
request = Mock()
request.installed_app_id = installed_app_id
request.auth_token = token
request.location_id = location.location_id
request.refresh_token = refresh_token
# Webhook confirmation shown
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "user"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["description_placeholders"][
"webhook_url"
] == smartapp.get_webhook_url(hass)
# Advance to PAT screen
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "pat"
assert "token_url" in result["description_placeholders"]
assert "component_url" in result["description_placeholders"]
# Enter token and advance to location screen
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {CONF_ACCESS_TOKEN: token}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "select_location"
# Select location and advance to external auth
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {CONF_LOCATION_ID: location.location_id}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_EXTERNAL_STEP
assert result["step_id"] == "authorize"
assert result["url"] == format_install_url(app.app_id, location.location_id)
# Complete external auth and advance to install
await smartapp.smartapp_install(hass, request, None, app)
# Finish
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["data"]["app_id"] == app.app_id
assert result["data"]["installed_app_id"] == installed_app_id
assert result["data"]["location_id"] == location.location_id
assert result["data"]["access_token"] == token
assert result["data"]["refresh_token"] == request.refresh_token
assert result["data"][CONF_CLIENT_SECRET] == app_oauth_client.client_secret
assert result["data"][CONF_CLIENT_ID] == app_oauth_client.client_id
assert result["title"] == location.name
entry = next(
(entry for entry in hass.config_entries.async_entries(DOMAIN)),
None,
)
assert entry.unique_id == smartapp.format_unique_id(
app.app_id, location.location_id
)
async def test_entry_created_from_update_event(
hass, app, app_oauth_client, location, smartthings_mock
):
"""Test local webhook, new app, update event creates entry."""
token = str(uuid4())
installed_app_id = str(uuid4())
refresh_token = str(uuid4())
smartthings_mock.apps.return_value = []
smartthings_mock.create_app.return_value = (app, app_oauth_client)
smartthings_mock.locations.return_value = [location]
request = Mock()
request.installed_app_id = installed_app_id
request.auth_token = token
request.location_id = location.location_id
request.refresh_token = refresh_token
# Webhook confirmation shown
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "user"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["description_placeholders"][
"webhook_url"
] == smartapp.get_webhook_url(hass)
# Advance to PAT screen
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "pat"
assert "token_url" in result["description_placeholders"]
assert "component_url" in result["description_placeholders"]
# Enter token and advance to location screen
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {CONF_ACCESS_TOKEN: token}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "select_location"
# Select location and advance to external auth
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {CONF_LOCATION_ID: location.location_id}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_EXTERNAL_STEP
assert result["step_id"] == "authorize"
assert result["url"] == format_install_url(app.app_id, location.location_id)
# Complete external auth and advance to install
await smartapp.smartapp_update(hass, request, None, app)
# Finish
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["data"]["app_id"] == app.app_id
assert result["data"]["installed_app_id"] == installed_app_id
assert result["data"]["location_id"] == location.location_id
assert result["data"]["access_token"] == token
assert result["data"]["refresh_token"] == request.refresh_token
assert result["data"][CONF_CLIENT_SECRET] == app_oauth_client.client_secret
assert result["data"][CONF_CLIENT_ID] == app_oauth_client.client_id
assert result["title"] == location.name
entry = next(
(entry for entry in hass.config_entries.async_entries(DOMAIN)),
None,
)
assert entry.unique_id == smartapp.format_unique_id(
app.app_id, location.location_id
)
async def test_entry_created_existing_app_new_oauth_client(
hass, app, app_oauth_client, location, smartthings_mock
):
"""Test entry is created with an existing app and generation of a new oauth client."""
token = str(uuid4())
installed_app_id = str(uuid4())
refresh_token = str(uuid4())
smartthings_mock.apps.return_value = [app]
smartthings_mock.generate_app_oauth.return_value = app_oauth_client
smartthings_mock.locations.return_value = [location]
request = Mock()
request.installed_app_id = installed_app_id
request.auth_token = token
request.location_id = location.location_id
request.refresh_token = refresh_token
# Webhook confirmation shown
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "user"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["description_placeholders"][
"webhook_url"
] == smartapp.get_webhook_url(hass)
# Advance to PAT screen
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "pat"
assert "token_url" in result["description_placeholders"]
assert "component_url" in result["description_placeholders"]
# Enter token and advance to location screen
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {CONF_ACCESS_TOKEN: token}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "select_location"
# Select location and advance to external auth
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {CONF_LOCATION_ID: location.location_id}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_EXTERNAL_STEP
assert result["step_id"] == "authorize"
assert result["url"] == format_install_url(app.app_id, location.location_id)
# Complete external auth and advance to install
await smartapp.smartapp_install(hass, request, None, app)
# Finish
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["data"]["app_id"] == app.app_id
assert result["data"]["installed_app_id"] == installed_app_id
assert result["data"]["location_id"] == location.location_id
assert result["data"]["access_token"] == token
assert result["data"]["refresh_token"] == request.refresh_token
assert result["data"][CONF_CLIENT_SECRET] == app_oauth_client.client_secret
assert result["data"][CONF_CLIENT_ID] == app_oauth_client.client_id
assert result["title"] == location.name
entry = next(
(entry for entry in hass.config_entries.async_entries(DOMAIN)),
None,
)
assert entry.unique_id == smartapp.format_unique_id(
app.app_id, location.location_id
)
async def test_entry_created_existing_app_copies_oauth_client(
hass, app, location, smartthings_mock
):
"""Test entry is created with an existing app and copies the oauth client from another entry."""
token = str(uuid4())
installed_app_id = str(uuid4())
refresh_token = str(uuid4())
oauth_client_id = str(uuid4())
oauth_client_secret = str(uuid4())
smartthings_mock.apps.return_value = [app]
smartthings_mock.locations.return_value = [location]
request = Mock()
request.installed_app_id = installed_app_id
request.auth_token = token
request.location_id = location.location_id
request.refresh_token = refresh_token
entry = MockConfigEntry(
domain=DOMAIN,
data={
CONF_APP_ID: app.app_id,
CONF_CLIENT_ID: oauth_client_id,
CONF_CLIENT_SECRET: oauth_client_secret,
CONF_LOCATION_ID: str(uuid4()),
CONF_INSTALLED_APP_ID: str(uuid4()),
CONF_ACCESS_TOKEN: token,
},
)
entry.add_to_hass(hass)
# Webhook confirmation shown
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "user"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["description_placeholders"][
"webhook_url"
] == smartapp.get_webhook_url(hass)
# Advance to PAT screen
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "pat"
assert "token_url" in result["description_placeholders"]
assert "component_url" in result["description_placeholders"]
# Assert access token is defaulted to an existing entry for convenience.
assert result["data_schema"]({}) == {CONF_ACCESS_TOKEN: token}
# Enter token and advance to location screen
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {CONF_ACCESS_TOKEN: token}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "select_location"
# Select location and advance to external auth
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {CONF_LOCATION_ID: location.location_id}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_EXTERNAL_STEP
assert result["step_id"] == "authorize"
assert result["url"] == format_install_url(app.app_id, location.location_id)
# Complete external auth and advance to install
await smartapp.smartapp_install(hass, request, None, app)
# Finish
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["data"]["app_id"] == app.app_id
assert result["data"]["installed_app_id"] == installed_app_id
assert result["data"]["location_id"] == location.location_id
assert result["data"]["access_token"] == token
assert result["data"]["refresh_token"] == request.refresh_token
assert result["data"][CONF_CLIENT_SECRET] == oauth_client_secret
assert result["data"][CONF_CLIENT_ID] == oauth_client_id
assert result["title"] == location.name
entry = next(
(
entry
for entry in hass.config_entries.async_entries(DOMAIN)
if entry.data[CONF_INSTALLED_APP_ID] == installed_app_id
),
None,
)
assert entry.unique_id == smartapp.format_unique_id(
app.app_id, location.location_id
)
async def test_entry_created_with_cloudhook(
hass, app, app_oauth_client, location, smartthings_mock
):
"""Test cloud, new app, install event creates entry."""
hass.config.components.add("cloud")
# Unload the endpoint so we can reload it under the cloud.
await smartapp.unload_smartapp_endpoint(hass)
token = str(uuid4())
installed_app_id = str(uuid4())
refresh_token = str(uuid4())
smartthings_mock.apps.return_value = []
smartthings_mock.create_app = AsyncMock(return_value=(app, app_oauth_client))
smartthings_mock.locations = AsyncMock(return_value=[location])
request = Mock()
request.installed_app_id = installed_app_id
request.auth_token = token
request.location_id = location.location_id
request.refresh_token = refresh_token
with patch.object(
hass.components.cloud, "async_active_subscription", Mock(return_value=True)
), patch.object(
hass.components.cloud,
"async_create_cloudhook",
AsyncMock(return_value="http://cloud.test"),
) as mock_create_cloudhook:
await smartapp.setup_smartapp_endpoint(hass)
# Webhook confirmation shown
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "user"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["description_placeholders"][
"webhook_url"
] == smartapp.get_webhook_url(hass)
assert mock_create_cloudhook.call_count == 1
# Advance to PAT screen
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "pat"
assert "token_url" in result["description_placeholders"]
assert "component_url" in result["description_placeholders"]
# Enter token and advance to location screen
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {CONF_ACCESS_TOKEN: token}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "select_location"
# Select location and advance to external auth
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {CONF_LOCATION_ID: location.location_id}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_EXTERNAL_STEP
assert result["step_id"] == "authorize"
assert result["url"] == format_install_url(app.app_id, location.location_id)
# Complete external auth and advance to install
await smartapp.smartapp_install(hass, request, None, app)
# Finish
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["data"]["app_id"] == app.app_id
assert result["data"]["installed_app_id"] == installed_app_id
assert result["data"]["location_id"] == location.location_id
assert result["data"]["access_token"] == token
assert result["data"]["refresh_token"] == request.refresh_token
assert result["data"][CONF_CLIENT_SECRET] == app_oauth_client.client_secret
assert result["data"][CONF_CLIENT_ID] == app_oauth_client.client_id
assert result["title"] == location.name
entry = next(
(entry for entry in hass.config_entries.async_entries(DOMAIN)),
None,
)
assert entry.unique_id == smartapp.format_unique_id(
app.app_id, location.location_id
)
async def test_invalid_webhook_aborts(hass):
"""Test flow aborts if webhook is invalid."""
# Webhook confirmation shown
await async_process_ha_core_config(
hass,
{"external_url": "http://example.local:8123"},
)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "user"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "invalid_webhook_url"
assert result["description_placeholders"][
"webhook_url"
] == smartapp.get_webhook_url(hass)
assert "component_url" in result["description_placeholders"]
async def test_invalid_token_shows_error(hass):
"""Test an error is shown for invalid token formats."""
token = "123456789"
# Webhook confirmation shown
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "user"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["description_placeholders"][
"webhook_url"
] == smartapp.get_webhook_url(hass)
# Advance to PAT screen
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "pat"
assert "token_url" in result["description_placeholders"]
assert "component_url" in result["description_placeholders"]
# Enter token
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {CONF_ACCESS_TOKEN: token}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "pat"
assert result["data_schema"]({}) == {CONF_ACCESS_TOKEN: token}
assert result["errors"] == {CONF_ACCESS_TOKEN: "token_invalid_format"}
assert "token_url" in result["description_placeholders"]
assert "component_url" in result["description_placeholders"]
async def test_unauthorized_token_shows_error(hass, smartthings_mock):
"""Test an error is shown for unauthorized token formats."""
token = str(uuid4())
request_info = Mock(real_url="http://example.com")
smartthings_mock.apps.side_effect = ClientResponseError(
request_info=request_info, history=None, status=HTTP_UNAUTHORIZED
)
# Webhook confirmation shown
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "user"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["description_placeholders"][
"webhook_url"
] == smartapp.get_webhook_url(hass)
# Advance to PAT screen
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "pat"
assert "token_url" in result["description_placeholders"]
assert "component_url" in result["description_placeholders"]
# Enter token
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {CONF_ACCESS_TOKEN: token}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "pat"
assert result["data_schema"]({}) == {CONF_ACCESS_TOKEN: token}
assert result["errors"] == {CONF_ACCESS_TOKEN: "token_unauthorized"}
assert "token_url" in result["description_placeholders"]
assert "component_url" in result["description_placeholders"]
async def test_forbidden_token_shows_error(hass, smartthings_mock):
"""Test an error is shown for forbidden token formats."""
token = str(uuid4())
request_info = Mock(real_url="http://example.com")
smartthings_mock.apps.side_effect = ClientResponseError(
request_info=request_info, history=None, status=HTTP_FORBIDDEN
)
# Webhook confirmation shown
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "user"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["description_placeholders"][
"webhook_url"
] == smartapp.get_webhook_url(hass)
# Advance to PAT screen
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "pat"
assert "token_url" in result["description_placeholders"]
assert "component_url" in result["description_placeholders"]
# Enter token
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {CONF_ACCESS_TOKEN: token}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "pat"
assert result["data_schema"]({}) == {CONF_ACCESS_TOKEN: token}
assert result["errors"] == {CONF_ACCESS_TOKEN: "token_forbidden"}
assert "token_url" in result["description_placeholders"]
assert "component_url" in result["description_placeholders"]
async def test_webhook_problem_shows_error(hass, smartthings_mock):
"""Test an error is shown when there's an problem with the webhook endpoint."""
token = str(uuid4())
data = {"error": {}}
request_info = Mock(real_url="http://example.com")
error = APIResponseError(
request_info=request_info, history=None, data=data, status=422
)
error.is_target_error = Mock(return_value=True)
smartthings_mock.apps.side_effect = error
# Webhook confirmation shown
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "user"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["description_placeholders"][
"webhook_url"
] == smartapp.get_webhook_url(hass)
# Advance to PAT screen
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "pat"
assert "token_url" in result["description_placeholders"]
assert "component_url" in result["description_placeholders"]
# Enter token
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {CONF_ACCESS_TOKEN: token}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "pat"
assert result["data_schema"]({}) == {CONF_ACCESS_TOKEN: token}
assert result["errors"] == {"base": "webhook_error"}
assert "token_url" in result["description_placeholders"]
assert "component_url" in result["description_placeholders"]
async def test_api_error_shows_error(hass, smartthings_mock):
"""Test an error is shown when other API errors occur."""
token = str(uuid4())
data = {"error": {}}
request_info = Mock(real_url="http://example.com")
error = APIResponseError(
request_info=request_info, history=None, data=data, status=400
)
smartthings_mock.apps.side_effect = error
# Webhook confirmation shown
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "user"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["description_placeholders"][
"webhook_url"
] == smartapp.get_webhook_url(hass)
# Advance to PAT screen
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "pat"
assert "token_url" in result["description_placeholders"]
assert "component_url" in result["description_placeholders"]
# Enter token
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {CONF_ACCESS_TOKEN: token}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "pat"
assert result["data_schema"]({}) == {CONF_ACCESS_TOKEN: token}
assert result["errors"] == {"base": "app_setup_error"}
assert "token_url" in result["description_placeholders"]
assert "component_url" in result["description_placeholders"]
async def test_unknown_response_error_shows_error(hass, smartthings_mock):
"""Test an error is shown when there is an unknown API error."""
token = str(uuid4())
request_info = Mock(real_url="http://example.com")
error = ClientResponseError(
request_info=request_info, history=None, status=HTTP_NOT_FOUND
)
smartthings_mock.apps.side_effect = error
# Webhook confirmation shown
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "user"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["description_placeholders"][
"webhook_url"
] == smartapp.get_webhook_url(hass)
# Advance to PAT screen
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "pat"
assert "token_url" in result["description_placeholders"]
assert "component_url" in result["description_placeholders"]
# Enter token
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {CONF_ACCESS_TOKEN: token}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "pat"
assert result["data_schema"]({}) == {CONF_ACCESS_TOKEN: token}
assert result["errors"] == {"base": "app_setup_error"}
assert "token_url" in result["description_placeholders"]
assert "component_url" in result["description_placeholders"]
async def test_unknown_error_shows_error(hass, smartthings_mock):
"""Test an error is shown when there is an unknown API error."""
token = str(uuid4())
smartthings_mock.apps.side_effect = Exception("Unknown error")
# Webhook confirmation shown
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "user"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["description_placeholders"][
"webhook_url"
] == smartapp.get_webhook_url(hass)
# Advance to PAT screen
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "pat"
assert "token_url" in result["description_placeholders"]
assert "component_url" in result["description_placeholders"]
# Enter token
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {CONF_ACCESS_TOKEN: token}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "pat"
assert result["data_schema"]({}) == {CONF_ACCESS_TOKEN: token}
assert result["errors"] == {"base": "app_setup_error"}
assert "token_url" in result["description_placeholders"]
assert "component_url" in result["description_placeholders"]
async def test_no_available_locations_aborts(
hass, app, app_oauth_client, location, smartthings_mock
):
"""Test select location aborts if no available locations."""
token = str(uuid4())
smartthings_mock.apps.return_value = []
smartthings_mock.create_app.return_value = (app, app_oauth_client)
smartthings_mock.locations.return_value = [location]
entry = MockConfigEntry(
domain=DOMAIN, data={CONF_LOCATION_ID: location.location_id}
)
entry.add_to_hass(hass)
# Webhook confirmation shown
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "user"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["description_placeholders"][
"webhook_url"
] == smartapp.get_webhook_url(hass)
# Advance to PAT screen
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "pat"
assert "token_url" in result["description_placeholders"]
assert "component_url" in result["description_placeholders"]
# Enter token and advance to location screen
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {CONF_ACCESS_TOKEN: token}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "no_available_locations"
| mit |
Maximilian-Reuter/SickRage-1 | sickbeard/helpers.py | 1 | 59305 | # coding=utf-8
# Author: Nic Wolfe <nic@wolfeden.ca>
# URL: https://sickrage.github.io
# Git: https://github.com/SickRage/SickRage.git
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
# pylint:disable=too-many-lines
from __future__ import unicode_literals
import ast
import base64
import ctypes
import datetime
import hashlib
import io
import operator
import os
import platform
import random
import re
import shutil
import socket
import ssl
import stat
import time
import traceback
import urllib
import uuid
import xml.etree.ElementTree as ET
import zipfile
from contextlib import closing
from itertools import cycle, izip
import adba
import certifi
import cfscrape
import requests
from cachecontrol import CacheControl
from requests.utils import urlparse
import sickbeard
from sickbeard import classes, db, logger
from sickbeard.common import USER_AGENT
from sickrage.helper import MEDIA_EXTENSIONS, SUBTITLE_EXTENSIONS, episode_num, pretty_file_size
from sickrage.helper.encoding import ek
from sickrage.show.Show import Show
# pylint: disable=protected-access
# Access to a protected member of a client class
urllib._urlopener = classes.SickBeardURLopener()
def indentXML(elem, level=0):
"""
Does our pretty printing, makes Matt very happy
"""
i = "\n" + level * " "
if elem:
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
indentXML(elem, level + 1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
def remove_non_release_groups(name):
"""
Remove non release groups from name
"""
if not name:
return name
# Do not remove all [....] suffixes, or it will break anime releases ## Need to verify this is true now
# Check your database for funky release_names and add them here, to improve failed handling, archiving, and history.
# select release_name from tv_episodes WHERE LENGTH(release_name);
# [eSc], [SSG], [GWC] are valid release groups for non-anime
removeWordsList = {
r'\[rartv\]$': 'searchre',
r'\[rarbg\]$': 'searchre',
r'\.\[eztv\]$': 'searchre',
r'\[eztv\]$': 'searchre',
r'\[ettv\]$': 'searchre',
r'\[cttv\]$': 'searchre',
r'\.\[vtv\]$': 'searchre',
r'\[vtv\]$': 'searchre',
r'\[EtHD\]$': 'searchre',
r'\[GloDLS\]$': 'searchre',
r'\[silv4\]$': 'searchre',
r'\[Seedbox\]$': 'searchre',
r'\[PublicHD\]$': 'searchre',
r'\.\[PublicHD\]$': 'searchre',
r'\.\[NO.RAR\]$': 'searchre',
r'\[NO.RAR\]$': 'searchre',
r'-\=\{SPARROW\}\=-$': 'searchre',
r'\=\{SPARR$': 'searchre',
r'\.\[720P\]\[HEVC\]$': 'searchre',
r'\[AndroidTwoU\]$': 'searchre',
r'\[brassetv\]$': 'searchre',
r'\[Talamasca32\]$': 'searchre',
r'\(musicbolt\.com\)$': 'searchre',
r'\.\(NLsub\)$': 'searchre',
r'\(NLsub\)$': 'searchre',
r'\.\[BT\]$': 'searchre',
r' \[1044\]$': 'searchre',
r'\.RiPSaLoT$': 'searchre',
r'\.GiuseppeTnT$': 'searchre',
r'\.Renc$': 'searchre',
r'\.gz$': 'searchre',
r'\.English$': 'searchre',
r'\.German$': 'searchre',
r'\.\.Italian$': 'searchre',
r'\.Italian$': 'searchre',
r'(?<![57])\.1$': 'searchre',
r'-NZBGEEK$': 'searchre',
r'-Siklopentan$': 'searchre',
r'-Chamele0n$': 'searchre',
r'-Obfuscated$': 'searchre',
r'-BUYMORE$': 'searchre',
r'-\[SpastikusTV\]$': 'searchre',
r'-RP$': 'searchre',
r'-20-40$': 'searchre',
r'\.\[www\.usabit\.com\]$': 'searchre',
r'^\[www\.Cpasbien\.pe\] ': 'searchre',
r'^\[www\.Cpasbien\.com\] ': 'searchre',
r'^\[ www\.Cpasbien\.pw \] ': 'searchre',
r'^\.www\.Cpasbien\.pw': 'searchre',
r'^\[www\.newpct1\.com\]': 'searchre',
r'^\[ www\.Cpasbien\.com \] ': 'searchre',
r'- \{ www\.SceneTime\.com \}$': 'searchre',
r'^\{ www\.SceneTime\.com \} - ': 'searchre',
r'^\]\.\[www\.tensiontorrent.com\] - ': 'searchre',
r'^\]\.\[ www\.tensiontorrent.com \] - ': 'searchre',
r'- \[ www\.torrentday\.com \]$': 'searchre',
r'^\[ www\.TorrentDay\.com \] - ': 'searchre',
r'\[NO-RAR\] - \[ www\.torrentday\.com \]$': 'searchre',
}
_name = name
for remove_string, remove_type in removeWordsList.iteritems():
if remove_type == 'search':
_name = _name.replace(remove_string, '')
elif remove_type == 'searchre':
_name = re.sub(r'(?i)' + remove_string, '', _name)
return _name
def isMediaFile(filename):
"""
Check if named file may contain media
:param filename: Filename to check
:return: True if this is a known media file, False if not
"""
# ignore samples
try:
if re.search(r'(^|[\W_])(?<!shomin.)(sample\d*)[\W_]', filename, re.I):
return False
# ignore RARBG release intro
if re.search(r'^RARBG\.(\w+\.)?(mp4|avi|txt)$', filename, re.I):
return False
# ignore MAC OS's retarded "resource fork" files
if filename.startswith('._'):
return False
filname_parts = filename.rpartition(".")
if re.search('extras?$', filname_parts[0], re.I):
return False
return filname_parts[-1].lower() in MEDIA_EXTENSIONS
except TypeError as error: # Not a string
logger.log('Invalid filename. Filename must be a string. {0}'.format(error), logger.DEBUG) # pylint: disable=no-member
return False
def isRarFile(filename):
"""
Check if file is a RAR file, or part of a RAR set
:param filename: Filename to check
:return: True if this is RAR/Part file, False if not
"""
archive_regex = r'(?P<file>^(?P<base>(?:(?!\.part\d+\.rar$).)*)\.(?:(?:part0*1\.)?rar)$)'
if re.search(archive_regex, filename):
return True
return False
def isBeingWritten(filepath):
"""
Check if file has been written in last 60 seconds
:param filepath: Filename to check
:return: True if file has been written recently, False if none
"""
# Return True if file was modified within 60 seconds. it might still be being written to.
ctime = max(ek(os.path.getctime, filepath), ek(os.path.getmtime, filepath))
if ctime > time.time() - 60:
return True
return False
def remove_file_failed(failed_file):
"""
Remove file from filesystem
:param file: File to remove
"""
try:
ek(os.remove, failed_file)
except Exception:
pass
def makeDir(path):
"""
Make a directory on the filesystem
:param path: directory to make
:return: True if success, False if failure
"""
if not ek(os.path.isdir, path):
try:
ek(os.makedirs, path)
# do the library update for synoindex
sickbeard.notifiers.synoindex_notifier.addFolder(path)
except OSError:
return False
return True
def searchIndexerForShowID(regShowName, indexer=None, indexer_id=None, ui=None):
"""
Contacts indexer to check for information on shows by showid
:param regShowName: Name of show
:param indexer: Which indexer to use
:param indexer_id: Which indexer ID to look for
:param ui: Custom UI for indexer use
:return:
"""
showNames = [re.sub('[. -]', ' ', regShowName)]
# Query Indexers for each search term and build the list of results
for i in sickbeard.indexerApi().indexers if not indexer else int(indexer or []):
# Query Indexers for each search term and build the list of results
lINDEXER_API_PARMS = sickbeard.indexerApi(i).api_params.copy()
if ui is not None:
lINDEXER_API_PARMS['custom_ui'] = ui
t = sickbeard.indexerApi(i).indexer(**lINDEXER_API_PARMS)
for name in showNames:
logger.log("Trying to find " + name + " on " + sickbeard.indexerApi(i).name, logger.DEBUG)
try:
search = t[indexer_id] if indexer_id else t[name]
except Exception:
continue
try:
seriesname = search[0][b'seriesname']
except Exception:
seriesname = None
try:
series_id = search[0][b'id']
except Exception:
series_id = None
if not (seriesname and series_id):
continue
ShowObj = Show.find(sickbeard.showList, int(series_id))
# Check if we can find the show in our list (if not, it's not the right show)
if (indexer_id is None) and (ShowObj is not None) and (ShowObj.indexerid == int(series_id)):
return seriesname, i, int(series_id)
elif (indexer_id is not None) and (int(indexer_id) == int(series_id)):
return seriesname, i, int(indexer_id)
if indexer:
break
return None, None, None
def listMediaFiles(path):
"""
Get a list of files possibly containing media in a path
:param path: Path to check for files
:return: list of files
"""
if not dir or not ek(os.path.isdir, path):
return []
files = []
for curFile in ek(os.listdir, path):
fullCurFile = ek(os.path.join, path, curFile)
# if it's a folder do it recursively
if ek(os.path.isdir, fullCurFile) and not curFile.startswith('.') and not curFile == 'Extras':
files += listMediaFiles(fullCurFile)
elif isMediaFile(curFile):
files.append(fullCurFile)
return files
def copyFile(srcFile, destFile):
"""
Copy a file from source to destination
:param srcFile: Path of source file
:param destFile: Path of destination file
"""
try:
from shutil import SpecialFileError, Error
except ImportError:
from shutil import Error
SpecialFileError = Error
try:
ek(shutil.copyfile, srcFile, destFile)
except (SpecialFileError, Error) as error:
logger.log('{0}'.format(error), logger.WARNING)
except Exception as error:
logger.log('{0}'.format(error), logger.ERROR)
else:
try:
ek(shutil.copymode, srcFile, destFile)
except OSError:
pass
def moveFile(srcFile, destFile):
"""
Move a file from source to destination
:param srcFile: Path of source file
:param destFile: Path of destination file
"""
try:
ek(shutil.move, srcFile, destFile)
fixSetGroupID(destFile)
except OSError:
copyFile(srcFile, destFile)
ek(os.unlink, srcFile)
def link(src, dst):
"""
Create a file link from source to destination.
TODO: Make this unicode proof
:param src: Source file
:param dst: Destination file
"""
if platform.system() == 'Windows':
if ctypes.windll.kernel32.CreateHardLinkW(ctypes.c_wchar_p(unicode(dst)), ctypes.c_wchar_p(unicode(src)), None) == 0:
raise ctypes.WinError()
else:
ek(os.link, src, dst)
def hardlinkFile(srcFile, destFile):
"""
Create a hard-link (inside filesystem link) between source and destination
:param srcFile: Source file
:param destFile: Destination file
"""
try:
ek(link, srcFile, destFile)
fixSetGroupID(destFile)
except Exception as error:
logger.log("Failed to create hardlink of {0} at {1}. Error: {2}. Copying instead".format
(srcFile, destFile, error), logger.WARNING)
copyFile(srcFile, destFile)
def symlink(src, dst):
"""
Create a soft/symlink between source and destination
:param src: Source file
:param dst: Destination file
"""
if platform.system() == 'Windows':
if ctypes.windll.kernel32.CreateSymbolicLinkW(ctypes.c_wchar_p(unicode(dst)), ctypes.c_wchar_p(unicode(src)), 1 if ek(os.path.isdir, src) else 0) in [0, 1280]:
raise ctypes.WinError()
else:
ek(os.symlink, src, dst)
def moveAndSymlinkFile(srcFile, destFile):
"""
Move a file from source to destination, then create a symlink back from destination from source. If this fails, copy
the file from source to destination
:param srcFile: Source file
:param destFile: Destination file
"""
try:
moveFile(srcFile, destFile)
symlink(destFile, srcFile)
except Exception as error:
logger.log("Failed to create symlink of {0} at {1}. Error: {2}. Copying instead".format
(srcFile, destFile, error), logger.WARNING)
copyFile(srcFile, destFile)
def make_dirs(path):
"""
Creates any folders that are missing and assigns them the permissions of their
parents
"""
logger.log("Checking if the path {0} already exists".format(path), logger.DEBUG)
if not ek(os.path.isdir, path):
# Windows, create all missing folders
if platform.system() == 'Windows':
try:
logger.log("Folder {0} didn't exist, creating it".format(path), logger.DEBUG)
ek(os.makedirs, path)
except (OSError, IOError) as error:
logger.log("Failed creating {0} : {1}".format(path, error), logger.ERROR)
return False
# not Windows, create all missing folders and set permissions
else:
sofar = ''
folder_list = path.split(os.path.sep)
# look through each subfolder and make sure they all exist
for cur_folder in folder_list:
sofar += cur_folder + os.path.sep
# if it exists then just keep walking down the line
if ek(os.path.isdir, sofar):
continue
try:
logger.log("Folder {0} didn't exist, creating it".format(sofar), logger.DEBUG)
ek(os.mkdir, sofar)
# use normpath to remove end separator, otherwise checks permissions against itself
chmodAsParent(ek(os.path.normpath, sofar))
# do the library update for synoindex
sickbeard.notifiers.synoindex_notifier.addFolder(sofar)
except (OSError, IOError) as error:
logger.log("Failed creating {0} : {1}".format(sofar, error), logger.ERROR)
return False
return True
def rename_ep_file(cur_path, new_path, old_path_length=0):
"""
Creates all folders needed to move a file to its new location, renames it, then cleans up any folders
left that are now empty.
:param cur_path: The absolute path to the file you want to move/rename
:param new_path: The absolute path to the destination for the file WITHOUT THE EXTENSION
:param old_path_length: The length of media file path (old name) WITHOUT THE EXTENSION
"""
# new_dest_dir, new_dest_name = ek(os.path.split, new_path) # @UnusedVariable
if old_path_length == 0 or old_path_length > len(cur_path):
# approach from the right
cur_file_name, cur_file_ext = ek(os.path.splitext, cur_path) # @UnusedVariable
else:
# approach from the left
cur_file_ext = cur_path[old_path_length:]
cur_file_name = cur_path[:old_path_length]
if cur_file_ext[1:] in SUBTITLE_EXTENSIONS:
# Extract subtitle language from filename
sublang = ek(os.path.splitext, cur_file_name)[1][1:]
# Check if the language extracted from filename is a valid language
if sublang in sickbeard.subtitles.subtitle_code_filter():
cur_file_ext = '.' + sublang + cur_file_ext
# put the extension on the incoming file
new_path += cur_file_ext
make_dirs(ek(os.path.dirname, new_path))
# move the file
try:
logger.log("Renaming file from {0} to {1}".format(cur_path, new_path))
ek(shutil.move, cur_path, new_path)
except (OSError, IOError) as error:
logger.log("Failed renaming {0} to {1} : {2}".format(cur_path, new_path, error), logger.ERROR)
return False
# clean up any old folders that are empty
delete_empty_folders(ek(os.path.dirname, cur_path))
return True
def delete_empty_folders(check_empty_dir, keep_dir=None):
"""
Walks backwards up the path and deletes any empty folders found.
:param check_empty_dir: The path to clean (absolute path to a folder)
:param keep_dir: Clean until this path is reached
"""
# treat check_empty_dir as empty when it only contains these items
ignore_items = []
logger.log("Trying to clean any empty folders under " + check_empty_dir)
# as long as the folder exists and doesn't contain any files, delete it
while ek(os.path.isdir, check_empty_dir) and check_empty_dir != keep_dir:
check_files = ek(os.listdir, check_empty_dir)
if not check_files or (len(check_files) <= len(ignore_items) and all(
check_file in ignore_items for check_file in check_files)):
# directory is empty or contains only ignore_items
try:
logger.log("Deleting empty folder: " + check_empty_dir)
# need shutil.rmtree when ignore_items is really implemented
ek(os.rmdir, check_empty_dir)
# do the library update for synoindex
sickbeard.notifiers.synoindex_notifier.deleteFolder(check_empty_dir)
except OSError as error:
logger.log("Unable to delete {0}. Error: {1}".format(check_empty_dir, error), logger.WARNING)
break
check_empty_dir = ek(os.path.dirname, check_empty_dir)
else:
break
def fileBitFilter(mode):
"""
Strip special filesystem bits from file
:param mode: mode to check and strip
:return: required mode for media file
"""
for bit in [stat.S_IXUSR, stat.S_IXGRP, stat.S_IXOTH, stat.S_ISUID, stat.S_ISGID]:
if mode & bit:
mode -= bit
return mode
def chmodAsParent(childPath):
"""
Retain permissions of parent for childs
(Does not work for Windows hosts)
:param childPath: Child Path to change permissions to sync from parent
"""
if platform.system() == 'Windows':
return
parentPath = ek(os.path.dirname, childPath)
if not parentPath:
logger.log("No parent path provided in " + childPath + ", unable to get permissions from it", logger.DEBUG)
return
childPath = ek(os.path.join, parentPath, ek(os.path.basename, childPath))
parentPathStat = ek(os.stat, parentPath)
parentMode = stat.S_IMODE(parentPathStat[stat.ST_MODE])
childPathStat = ek(os.stat, childPath.encode(sickbeard.SYS_ENCODING))
childPath_mode = stat.S_IMODE(childPathStat[stat.ST_MODE])
if ek(os.path.isfile, childPath):
childMode = fileBitFilter(parentMode)
else:
childMode = parentMode
if childPath_mode == childMode:
return
childPath_owner = childPathStat.st_uid # pylint: disable=no-member
user_id = os.geteuid() # @UndefinedVariable - only available on UNIX
if user_id not in (childPath_owner, 0):
logger.log("Not running as root or owner of " + childPath + ", not trying to set permissions", logger.DEBUG)
return
try:
ek(os.chmod, childPath, childMode)
except OSError:
logger.log("Failed to set permission for {0} to {1:o}, parent directory has {2:o}".format(childPath, childMode, parentMode), logger.DEBUG)
def fixSetGroupID(childPath):
"""
Inherid SGID from parent
(does not work on Windows hosts)
:param childPath: Path to inherit SGID permissions from parent
"""
if platform.system() == 'Windows':
return
parentPath = ek(os.path.dirname, childPath)
parentStat = ek(os.stat, parentPath)
parentMode = stat.S_IMODE(parentStat[stat.ST_MODE])
childPath = ek(os.path.join, parentPath, ek(os.path.basename, childPath))
if parentMode & stat.S_ISGID:
parentGID = parentStat[stat.ST_GID]
childStat = ek(os.stat, childPath.encode(sickbeard.SYS_ENCODING))
childGID = childStat[stat.ST_GID]
if childGID == parentGID:
return
childPath_owner = childStat.st_uid # pylint: disable=no-member
user_id = os.geteuid() # @UndefinedVariable - only available on UNIX
if user_id != 0 and user_id != childPath_owner:
logger.log("Not running as root or owner of " + childPath + ", not trying to set the set-group-ID",
logger.DEBUG)
return
try:
ek(os.chown, childPath, -1, parentGID) # @UndefinedVariable - only available on UNIX
logger.log("Respecting the set-group-ID bit on the parent directory for {0}".format(childPath), logger.DEBUG)
except OSError:
logger.log(
"Failed to respect the set-group-ID bit on the parent directory for {0} (setting group ID {1})".format(
childPath, parentGID), logger.ERROR)
def is_anime_in_show_list():
"""
Check if any shows in list contain anime
:return: True if global showlist contains Anime, False if not
"""
for show in sickbeard.showList:
if show.is_anime:
return True
return False
def update_anime_support():
"""Check if we need to support anime, and if we do, enable the feature"""
sickbeard.ANIMESUPPORT = is_anime_in_show_list()
def get_absolute_number_from_season_and_episode(show, season, episode):
"""
Find the absolute number for a show episode
:param show: Show object
:param season: Season number
:param episode: Episode number
:return: The absolute number
"""
absolute_number = None
if season and episode:
main_db_con = db.DBConnection()
sql = "SELECT * FROM tv_episodes WHERE showid = ? and season = ? and episode = ?"
sql_results = main_db_con.select(sql, [show.indexerid, season, episode])
if len(sql_results) == 1:
absolute_number = int(sql_results[0][b"absolute_number"])
logger.log("Found absolute number {absolute} for show {show} {ep}".format
(absolute=absolute_number, show=show.name,
ep=episode_num(season, episode)), logger.DEBUG)
else:
logger.log("No entries for absolute number for show {show} {ep}".format
(show=show.name, ep=episode_num(season, episode)), logger.DEBUG)
return absolute_number
def get_all_episodes_from_absolute_number(show, absolute_numbers, indexer_id=None):
episodes = []
season = None
if len(absolute_numbers):
if not show and indexer_id:
show = Show.find(sickbeard.showList, indexer_id)
for absolute_number in absolute_numbers if show else []:
ep = show.getEpisode(None, None, absolute_number=absolute_number)
if ep:
episodes.append(ep.episode)
season = ep.season # this will always take the last found season so eps that cross the season border are not handeled well
return season, episodes
def sanitizeSceneName(name, anime=False):
"""
Takes a show name and returns the "scenified" version of it.
:param anime: Some show have a ' in their name(Kuroko's Basketball) and is needed for search.
:return: A string containing the scene version of the show name given.
"""
# assert isinstance(name, unicode), name + ' is not unicode'
if not name:
return ''
bad_chars = ',:()!?\u2019'
if not anime:
bad_chars += "'"
# strip out any bad chars
for x in bad_chars:
name = name.replace(x, "")
# tidy up stuff that doesn't belong in scene names
name = name.replace("&", "and")
name = re.sub(r"[- /]+", ".", name)
name = re.sub(r"[.]+", ".", name)
if name.endswith('.'):
name = name[:-1]
return name
_binOps = {
ast.Add: operator.add,
ast.Sub: operator.sub,
ast.Mult: operator.mul,
ast.Div: operator.div,
ast.Mod: operator.mod
}
def arithmeticEval(s):
"""
A safe eval supporting basic arithmetic operations.
:param s: expression to evaluate
:return: value
"""
node = ast.parse(s, mode='eval')
def _eval(node):
if isinstance(node, ast.Expression):
return _eval(node.body)
elif isinstance(node, ast.Str):
return node.s
elif isinstance(node, ast.Num):
return node.n
elif isinstance(node, ast.BinOp):
return _binOps[type(node.op)](_eval(node.left), _eval(node.right))
else:
raise Exception('Unsupported type {0}'.format(node))
return _eval(node.body)
def create_https_certificates(ssl_cert, ssl_key):
"""
Create self-signed HTTPS certificares and store in paths 'ssl_cert' and 'ssl_key'
:param ssl_cert: Path of SSL certificate file to write
:param ssl_key: Path of SSL keyfile to write
:return: True on success, False on failure
"""
# assert isinstance(ssl_key, unicode)
# assert isinstance(ssl_cert, unicode)
try:
from OpenSSL import crypto # @UnresolvedImport
from certgen import createKeyPair, createCertRequest, createCertificate, TYPE_RSA, \
serial # @UnresolvedImport
except Exception:
logger.log("pyopenssl module missing, please install for https access", logger.WARNING)
return False
# Create the CA Certificate
cakey = createKeyPair(TYPE_RSA, 1024)
careq = createCertRequest(cakey, CN='Certificate Authority')
cacert = createCertificate(careq, (careq, cakey), serial, (0, 60 * 60 * 24 * 365 * 10)) # ten years
cname = 'SickRage'
pkey = createKeyPair(TYPE_RSA, 1024)
req = createCertRequest(pkey, CN=cname)
cert = createCertificate(req, (cacert, cakey), serial, (0, 60 * 60 * 24 * 365 * 10)) # ten years
# Save the key and certificate to disk
try:
# pylint: disable=no-member
# Module has no member
io.open(ssl_key, 'wb').write(crypto.dump_privatekey(crypto.FILETYPE_PEM, pkey))
io.open(ssl_cert, 'wb').write(crypto.dump_certificate(crypto.FILETYPE_PEM, cert))
except Exception:
logger.log("Error creating SSL key and certificate", logger.ERROR)
return False
return True
def backupVersionedFile(old_file, version):
"""
Back up an old version of a file
:param old_file: Original file, to take a backup from
:param version: Version of file to store in backup
:return: True if success, False if failure
"""
numTries = 0
new_file = old_file + '.' + 'v' + str(version)
while not ek(os.path.isfile, new_file):
if not ek(os.path.isfile, old_file):
logger.log("Not creating backup, {0} doesn't exist".format(old_file), logger.DEBUG)
break
try:
logger.log("Trying to back up {0} to {1}".format(old_file, new_file), logger.DEBUG)
shutil.copy(old_file, new_file)
logger.log("Backup done", logger.DEBUG)
break
except Exception as error:
logger.log("Error while trying to back up {0} to {1} : {2}".format(old_file, new_file, error), logger.WARNING)
numTries += 1
time.sleep(1)
logger.log("Trying again.", logger.DEBUG)
if numTries >= 10:
logger.log("Unable to back up {0} to {1} please do it manually.".format(old_file, new_file), logger.ERROR)
return False
return True
def restoreVersionedFile(backup_file, version):
"""
Restore a file version to original state
:param backup_file: File to restore
:param version: Version of file to restore
:return: True on success, False on failure
"""
numTries = 0
new_file, ext_ = ek(os.path.splitext, backup_file)
restore_file = new_file + '.' + 'v' + str(version)
if not ek(os.path.isfile, new_file):
logger.log("Not restoring, {0} doesn't exist".format(new_file), logger.DEBUG)
return False
try:
logger.log("Trying to backup {0} to {1}.r{2} before restoring backup".format
(new_file, new_file, version), logger.DEBUG)
shutil.move(new_file, new_file + '.' + 'r' + str(version))
except Exception as error:
logger.log("Error while trying to backup DB file {0} before proceeding with restore: {1}".format
(restore_file, error), logger.WARNING)
return False
while not ek(os.path.isfile, new_file):
if not ek(os.path.isfile, restore_file):
logger.log("Not restoring, {0} doesn't exist".format(restore_file), logger.DEBUG)
break
try:
logger.log("Trying to restore file {0} to {1}".format(restore_file, new_file), logger.DEBUG)
shutil.copy(restore_file, new_file)
logger.log("Restore done", logger.DEBUG)
break
except Exception as error:
logger.log("Error while trying to restore file {0}. Error: {1}".format(restore_file, error), logger.WARNING)
numTries += 1
time.sleep(1)
logger.log("Trying again. Attempt #: {0}".format(numTries), logger.DEBUG)
if numTries >= 10:
logger.log("Unable to restore file {0} to {1}".format(restore_file, new_file), logger.WARNING)
return False
return True
def get_lan_ip():
"""Returns IP of system"""
try:
return [ip for ip in socket.gethostbyname_ex(socket.gethostname())[2] if not ip.startswith("127.")][0]
except Exception:
return socket.gethostname()
def check_url(url):
"""
Check if a URL exists without downloading the whole file.
We only check the URL header.
"""
try:
requests.head(url, verify=False).raise_for_status()
except Exception as error:
handle_requests_exception(error)
return False
return True
def anon_url(*url):
"""
Return a URL string consisting of the Anonymous redirect URL and an arbitrary number of values appended.
"""
return '' if None in url else '{0}{1}'.format(sickbeard.ANON_REDIRECT, ''.join(str(s) for s in url))
"""
Encryption
==========
By Pedro Jose Pereira Vieito <pvieito@gmail.com> (@pvieito)
* If encryption_version==0 then return data without encryption
* The keys should be unique for each device
To add a new encryption_version:
1) Code your new encryption_version
2) Update the last encryption_version available in webserve.py
3) Remember to maintain old encryption versions and key generators for retrocompatibility
"""
# Key Generators
unique_key1 = hex(uuid.getnode() ** 2) # Used in encryption v1
# Encryption Functions
def encrypt(data, encryption_version=0, _decrypt=False):
# Version 1: Simple XOR encryption (this is not very secure, but works)
if encryption_version == 1:
if _decrypt:
return ''.join(chr(ord(x) ^ ord(y)) for (x, y) in izip(base64.decodestring(data), cycle(unique_key1)))
else:
return base64.encodestring(
''.join(chr(ord(x) ^ ord(y)) for (x, y) in izip(data, cycle(unique_key1)))).strip()
# Version 2: Simple XOR encryption (this is not very secure, but works)
elif encryption_version == 2:
if _decrypt:
return ''.join(chr(ord(x) ^ ord(y)) for (x, y) in izip(base64.decodestring(data), cycle(sickbeard.ENCRYPTION_SECRET)))
else:
return base64.encodestring(
''.join(chr(ord(x) ^ ord(y)) for (x, y) in izip(data, cycle(sickbeard.ENCRYPTION_SECRET)))).strip()
# Version 0: Plain text
else:
return data
def decrypt(data, encryption_version=0):
return encrypt(data, encryption_version, _decrypt=True)
def full_sanitizeSceneName(name):
return re.sub('[. -]', ' ', sanitizeSceneName(name)).lower().strip()
def _check_against_names(nameInQuestion, show, season=-1):
showNames = []
if season in [-1, 1]:
showNames = [show.name]
showNames.extend(sickbeard.scene_exceptions.get_scene_exceptions(show.indexerid, season=season))
for showName in showNames:
nameFromList = full_sanitizeSceneName(showName)
if nameFromList == nameInQuestion:
return True
return False
def get_show(name, tryIndexers=False):
if not sickbeard.showList:
return
showObj = None
fromCache = False
if not name:
return showObj
try:
# check cache for show
cache = sickbeard.name_cache.retrieveNameFromCache(name)
if cache:
fromCache = True
showObj = Show.find(sickbeard.showList, int(cache))
# try indexers
if not showObj and tryIndexers:
showObj = Show.find(
sickbeard.showList, searchIndexerForShowID(full_sanitizeSceneName(name), ui=classes.ShowListUI)[2])
# try scene exceptions
if not showObj:
ShowID = sickbeard.scene_exceptions.get_scene_exception_by_name(name)[0]
if ShowID:
showObj = Show.find(sickbeard.showList, int(ShowID))
# add show to cache
if showObj and not fromCache:
sickbeard.name_cache.addNameToCache(name, showObj.indexerid)
except Exception as error:
logger.log("Error when attempting to find show: {0} in SickRage. Error: {1} ".format(name, error), logger.DEBUG)
return showObj
def is_hidden_folder(folder):
"""
Returns True if folder is hidden.
On Linux based systems hidden folders start with . (dot)
:param folder: Full path of folder to check
"""
def is_hidden(filepath):
name = ek(os.path.basename, ek(os.path.abspath, filepath))
return name.startswith('.') or has_hidden_attribute(filepath)
def has_hidden_attribute(filepath):
try:
attrs = ctypes.windll.kernel32.GetFileAttributesW(ctypes.c_wchar_p(unicode(filepath)))
assert attrs != -1
result = bool(attrs & 2)
except (AttributeError, AssertionError):
result = False
return result
if ek(os.path.isdir, folder):
if is_hidden(folder):
return True
return False
def real_path(path):
"""
Returns: the canonicalized absolute pathname. The resulting path will have no symbolic link, '/./' or '/../' components.
"""
return ek(os.path.normpath, ek(os.path.normcase, ek(os.path.realpath, path)))
def validateShow(show, season=None, episode=None):
indexer_lang = show.lang
try:
lINDEXER_API_PARMS = sickbeard.indexerApi(show.indexer).api_params.copy()
lINDEXER_API_PARMS['language'] = indexer_lang or sickbeard.INDEXER_DEFAULT_LANGUAGE
if show.dvdorder:
lINDEXER_API_PARMS['dvdorder'] = True
t = sickbeard.indexerApi(show.indexer).indexer(**lINDEXER_API_PARMS)
if season is None and episode is None:
return t
return t[show.indexerid][season][episode]
except (sickbeard.indexer_episodenotfound, sickbeard.indexer_seasonnotfound):
pass
def set_up_anidb_connection():
"""Connect to anidb"""
if not sickbeard.USE_ANIDB:
logger.log("Usage of anidb disabled. Skiping", logger.DEBUG)
return False
if not sickbeard.ANIDB_USERNAME and not sickbeard.ANIDB_PASSWORD:
logger.log("anidb username and/or password are not set. Aborting anidb lookup.", logger.DEBUG)
return False
if not sickbeard.ADBA_CONNECTION:
def anidb_logger(msg):
return logger.log("anidb: {0} ".format(msg), logger.DEBUG)
try:
sickbeard.ADBA_CONNECTION = adba.Connection(keepAlive=True, log=anidb_logger)
except Exception as error:
logger.log("anidb exception msg: {0} ".format(error), logger.WARNING)
return False
try:
if not sickbeard.ADBA_CONNECTION.authed():
sickbeard.ADBA_CONNECTION.auth(sickbeard.ANIDB_USERNAME, sickbeard.ANIDB_PASSWORD)
else:
return True
except Exception as error:
logger.log("anidb exception msg: {0} ".format(error), logger.WARNING)
return False
return sickbeard.ADBA_CONNECTION.authed()
def makeZip(fileList, archive):
"""
Create a ZIP of files
:param fileList: A list of file names - full path each name
:param archive: File name for the archive with a full path
"""
try:
a = zipfile.ZipFile(archive, 'w', zipfile.ZIP_DEFLATED, allowZip64=True)
for f in fileList:
a.write(f)
a.close()
return True
except Exception as error:
logger.log("Zip creation error: {0} ".format(error), logger.ERROR)
return False
def extractZip(archive, targetDir):
"""
Unzip a file to a directory
:param fileList: A list of file names - full path each name
:param archive: The file name for the archive with a full path
"""
try:
if not ek(os.path.exists, targetDir):
ek(os.mkdir, targetDir)
zip_file = zipfile.ZipFile(archive, 'r', allowZip64=True)
for member in zip_file.namelist():
filename = ek(os.path.basename, member)
# skip directories
if not filename:
continue
# copy file (taken from zipfile's extract)
source = zip_file.open(member)
target = file(ek(os.path.join, targetDir, filename), "wb")
shutil.copyfileobj(source, target)
source.close()
target.close()
zip_file.close()
return True
except Exception as error:
logger.log("Zip extraction error: {0} ".format(error), logger.ERROR)
return False
def backupConfigZip(fileList, archive, arcname=None):
"""
Store the config file as a ZIP
:param fileList: List of files to store
:param archive: ZIP file name
:param arcname: Archive path
:return: True on success, False on failure
"""
try:
a = zipfile.ZipFile(archive, 'w', zipfile.ZIP_DEFLATED, allowZip64=True)
for f in fileList:
a.write(f, ek(os.path.relpath, f, arcname))
a.close()
return True
except Exception as error:
logger.log("Zip creation error: {0} ".format(error), logger.ERROR)
return False
def restoreConfigZip(archive, targetDir):
"""
Restores a Config ZIP file back in place
:param archive: ZIP filename
:param targetDir: Directory to restore to
:return: True on success, False on failure
"""
try:
if not ek(os.path.exists, targetDir):
ek(os.mkdir, targetDir)
else:
def path_leaf(path):
head, tail = ek(os.path.split, path)
return tail or ek(os.path.basename, head)
bakFilename = '{0}-{1}'.format(path_leaf(targetDir), datetime.datetime.now().strftime('%Y%m%d_%H%M%S'))
shutil.move(targetDir, ek(os.path.join, ek(os.path.dirname, targetDir), bakFilename))
zip_file = zipfile.ZipFile(archive, 'r', allowZip64=True)
for member in zip_file.namelist():
zip_file.extract(member, targetDir)
zip_file.close()
return True
except Exception as error:
logger.log("Zip extraction error: {0}".format(error), logger.ERROR)
shutil.rmtree(targetDir)
return False
def mapIndexersToShow(showObj):
mapped = {}
# init mapped indexers object
for indexer in sickbeard.indexerApi().indexers:
mapped[indexer] = showObj.indexerid if int(indexer) == int(showObj.indexer) else 0
main_db_con = db.DBConnection()
sql_results = main_db_con.select(
"SELECT * FROM indexer_mapping WHERE indexer_id = ? AND indexer = ?",
[showObj.indexerid, showObj.indexer])
# for each mapped entry
for curResult in sql_results:
nlist = [i for i in curResult if i is not None]
# Check if its mapped with both tvdb and tvrage.
if len(nlist) >= 4:
logger.log("Found indexer mapping in cache for show: " + showObj.name, logger.DEBUG)
mapped[int(curResult[b'mindexer'])] = int(curResult[b'mindexer_id'])
break
else:
sql_l = []
for indexer in sickbeard.indexerApi().indexers:
if indexer == showObj.indexer:
mapped[indexer] = showObj.indexerid
continue
lINDEXER_API_PARMS = sickbeard.indexerApi(indexer).api_params.copy()
lINDEXER_API_PARMS['custom_ui'] = classes.ShowListUI
t = sickbeard.indexerApi(indexer).indexer(**lINDEXER_API_PARMS)
try:
mapped_show = t[showObj.name]
except Exception:
logger.log("Unable to map " + sickbeard.indexerApi(showObj.indexer).name + "->" + sickbeard.indexerApi(
indexer).name + " for show: " + showObj.name + ", skipping it", logger.DEBUG)
continue
if mapped_show and len(mapped_show) == 1:
logger.log("Mapping " + sickbeard.indexerApi(showObj.indexer).name + "->" + sickbeard.indexerApi(
indexer).name + " for show: " + showObj.name, logger.DEBUG)
mapped[indexer] = int(mapped_show[0][b'id'])
logger.log("Adding indexer mapping to DB for show: " + showObj.name, logger.DEBUG)
sql_l.append([
"INSERT OR IGNORE INTO indexer_mapping (indexer_id, indexer, mindexer_id, mindexer) VALUES (?,?,?,?)",
[showObj.indexerid, showObj.indexer, int(mapped_show[0][b'id']), indexer]])
if sql_l:
main_db_con = db.DBConnection()
main_db_con.mass_action(sql_l)
return mapped
def touchFile(fname, atime=None):
"""
Touch a file (change modification date)
:param fname: Filename to touch
:param atime: Specific access time (defaults to None)
:return: True on success, False on failure
"""
if atime and fname and ek(os.path.isfile, fname):
ek(os.utime, fname, (atime, atime))
return True
return False
def make_session():
session = requests.Session()
session.headers.update({'User-Agent': USER_AGENT, 'Accept-Encoding': 'gzip,deflate'})
session = cfscrape.create_scraper(sess=session)
return CacheControl(sess=session, cache_etags=True)
def request_defaults(kwargs):
hooks = kwargs.pop('hooks', None)
cookies = kwargs.pop('cookies', None)
allow_proxy = kwargs.pop('allow_proxy', True)
verify = certifi.old_where() if all([sickbeard.SSL_VERIFY, kwargs.pop('verify', True)]) else False
# request session proxies
if allow_proxy and sickbeard.PROXY_SETTING:
logger.log("Using global proxy: " + sickbeard.PROXY_SETTING, logger.DEBUG)
parsed_url = urlparse(sickbeard.PROXY_SETTING)
address = sickbeard.PROXY_SETTING if parsed_url.scheme else 'http://' + sickbeard.PROXY_SETTING
proxies = {
"http": address,
"https": address,
}
else:
proxies = None
return hooks, cookies, verify, proxies
def getURL(url, post_data=None, params=None, headers=None, # pylint:disable=too-many-arguments, too-many-return-statements, too-many-branches, too-many-locals
timeout=30, session=None, **kwargs):
"""
Returns data retrieved from the url provider.
"""
try:
response_type = kwargs.pop('returns', 'text')
stream = kwargs.pop('stream', False)
hooks, cookies, verify, proxies = request_defaults(kwargs)
if params and isinstance(params, (list, dict)):
for param in params:
if isinstance(params[param], unicode):
params[param] = params[param].encode('utf-8')
if post_data and isinstance(post_data, (list, dict)):
for param in post_data:
if isinstance(post_data[param], unicode):
post_data[param] = post_data[param].encode('utf-8')
resp = session.request(
'POST' if post_data else 'GET', url, data=post_data or {}, params=params or {},
timeout=timeout, allow_redirects=True, hooks=hooks, stream=stream,
headers=headers, cookies=cookies, proxies=proxies, verify=verify
)
resp.raise_for_status()
except Exception as error:
handle_requests_exception(error)
return None
try:
return resp if response_type == 'response' or response_type is None else resp.json() if response_type == 'json' else getattr(resp, response_type, resp)
except ValueError:
logger.log('Requested a json response but response was not json, check the url: {0}'.format(url), logger.DEBUG)
return None
def download_file(url, filename, session=None, headers=None, **kwargs): # pylint:disable=too-many-return-statements
"""
Downloads a file specified
:param url: Source URL
:param filename: Target file on filesystem
:param session: request session to use
:param headers: override existing headers in request session
:return: True on success, False on failure
"""
try:
hooks, cookies, verify, proxies = request_defaults(kwargs)
with closing(session.get(url, allow_redirects=True, stream=True,
verify=verify, headers=headers, cookies=cookies,
hooks=hooks, proxies=proxies)) as resp:
resp.raise_for_status()
try:
with io.open(filename, 'wb') as fp:
for chunk in resp.iter_content(chunk_size=1024):
if chunk:
fp.write(chunk)
fp.flush()
chmodAsParent(filename)
except Exception as error:
logger.log("Problem downloading file, setting permissions or writing file to \"{0}\" - ERROR: {1}".format(filename, error), logger.WARNING)
except Exception as error:
handle_requests_exception(error)
return False
return True
def handle_requests_exception(requests_exception): # pylint: disable=too-many-branches, too-many-statements
default = "Request failed: {0}"
try:
raise requests_exception
except requests.exceptions.SSLError as error:
if ssl.OPENSSL_VERSION_INFO < (1, 0, 1, 5):
logger.log("SSL Error requesting url: '{0}' You have {1}, try upgrading OpenSSL to 1.0.1e+".format(error.request.url, ssl.OPENSSL_VERSION))
if sickbeard.SSL_VERIFY:
logger.log("SSL Error requesting url: '{0}' Try disabling Cert Verification on the advanced tab of /config/general")
logger.log(default.format(error), logger.DEBUG)
logger.log(traceback.format_exc(), logger.DEBUG)
except requests.exceptions.HTTPError as error:
if not (hasattr(error, 'response') and error.response and \
hasattr(error.response, 'status_code') and error.response.status_code == 404 and \
hasattr(error.response, 'headers') and error.response.headers.get('X-Content-Type-Options') == 'nosniff'):
logger.log(default.format(error))
except requests.exceptions.TooManyRedirects as error:
logger.log(default.format(error))
except requests.exceptions.ConnectTimeout as error:
logger.log(default.format(error))
except requests.exceptions.ReadTimeout as error:
logger.log(default.format(error))
except requests.exceptions.ProxyError as error:
logger.log(default.format(error))
except requests.exceptions.ConnectionError as error:
logger.log(default.format(error))
except requests.exceptions.ContentDecodingError as error:
logger.log(default.format(error))
logger.log(traceback.format_exc(), logger.DEBUG)
except requests.exceptions.ChunkedEncodingError as error:
logger.log(default.format(error))
except requests.exceptions.InvalidURL as error:
logger.log(default.format(error))
except requests.exceptions.InvalidSchema as error:
logger.log(default.format(error))
except requests.exceptions.MissingSchema as error:
logger.log(default.format(error))
except requests.exceptions.RetryError as error:
logger.log(default.format(error))
except requests.exceptions.StreamConsumedError as error:
logger.log(default.format(error))
except requests.exceptions.URLRequired as error:
logger.log(default.format(error))
except Exception as error:
logger.log(default.format(error), logger.ERROR)
logger.log(traceback.format_exc(), logger.DEBUG)
def get_size(start_path='.'):
"""
Find the total dir and filesize of a path
:param start_path: Path to recursively count size
:return: total filesize
"""
if not ek(os.path.isdir, start_path):
return -1
total_size = 0
for dirpath, dirnames_, filenames in ek(os.walk, start_path):
for f in filenames:
fp = ek(os.path.join, dirpath, f)
try:
total_size += ek(os.path.getsize, fp)
except OSError as error:
logger.log("Unable to get size for file {0} Error: {1}".format(fp, error), logger.ERROR)
logger.log(traceback.format_exc(), logger.DEBUG)
return total_size
def generateApiKey():
""" Return a new randomized API_KEY"""
logger.log("Generating New API key")
secure_hash = hashlib.sha512(str(time.time()))
secure_hash.update(str(random.SystemRandom().getrandbits(4096)))
return secure_hash.hexdigest()[:32]
def remove_article(text=''):
"""Remove the english articles from a text string"""
return re.sub(r'(?i)^(?:(?:A(?!\s+to)n?)|The)\s(\w)', r'\1', text)
def generateCookieSecret():
"""Generate a new cookie secret"""
return base64.b64encode(uuid.uuid4().bytes + uuid.uuid4().bytes)
def disk_usage(path):
if platform.system() == 'Windows':
free = ctypes.c_ulonglong(0)
if ctypes.windll.kernel32.GetDiskFreeSpaceExW(ctypes.c_wchar_p(unicode(path)), None, None, ctypes.pointer(free)) == 0:
raise ctypes.WinError()
return free.value
elif hasattr(os, 'statvfs'): # POSIX
if platform.system() == 'Darwin':
try:
import subprocess
call = subprocess.Popen(["df", "-k", path], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = call.communicate()[0]
return int(output.split("\n")[1].split()[3]) * 1024
except Exception:
pass
st = ek(os.statvfs, path)
return st.f_bavail * st.f_frsize # pylint: disable=no-member
else:
raise Exception("Unable to determine free space on your OS")
def verify_freespace(src, dest, oldfile=None, method="copy"):
"""
Checks if the target system has enough free space to copy or move a file.
:param src: Source filename
:param dest: Destination path
:param oldfile: File to be replaced (defaults to None)
:return: True if there is enough space for the file, False if there isn't. Also returns True if the OS doesn't support this option
"""
if not isinstance(oldfile, list):
oldfile = [oldfile] if oldfile else []
logger.log("Trying to determine free space on destination drive", logger.DEBUG)
if not ek(os.path.isfile, src):
logger.log("A path to a file is required for the source. {0} is not a file.".format(src), logger.WARNING)
return True
if not (ek(os.path.exists, dest) or ek(os.path.exists, ek(os.path.dirname, dest))):
logger.log("A path is required for the destination. Check the root dir and show locations are correct for {0} (I got '{1}')".format(
oldfile[0].name, dest), logger.WARNING)
return False
# shortcut: if we are moving the file and the destination == src dir,
# then by definition there is enough space
if method == "move" and ek(os.stat, src).st_dev == ek(os.stat, dest if ek(os.path.exists, dest) else ek(os.path.dirname, dest)).st_dev: # pylint: disable=no-member
logger.log("Process method is 'move' and src and destination are on the same device, skipping free space check", logger.INFO)
return True
try:
diskfree = disk_usage(dest if ek(os.path.exists, dest) else ek(os.path.dirname, dest))
except Exception as error:
logger.log("Unable to determine free space, so I will assume there is enough.", logger.WARNING)
logger.log("Error: {error}".format(error=error), logger.DEBUG)
logger.log(traceback.format_exc(), logger.DEBUG)
return True
# Lets also do this for symlink and hardlink
if 'link' in method and diskfree > 1024**2:
return True
neededspace = ek(os.path.getsize, src)
if oldfile:
for f in oldfile:
if ek(os.path.isfile, f.location):
diskfree += ek(os.path.getsize, f.location)
if diskfree > neededspace:
return True
else:
logger.log("Not enough free space: Needed: {0} bytes ( {1} ), found: {2} bytes ( {3} )".format
(neededspace, pretty_file_size(neededspace), diskfree, pretty_file_size(diskfree)), logger.WARNING)
return False
def getDiskSpaceUsage(diskPath=None):
"""
returns the free space in human readable bytes for a given path or False if no path given
:param diskPath: the filesystem path being checked
"""
if diskPath and ek(os.path.exists, diskPath):
try:
free = disk_usage(diskPath)
except Exception as error:
logger.log("Unable to determine free space", logger.WARNING)
logger.log("Error: {error}".format(error=error), logger.DEBUG)
logger.log(traceback.format_exc(), logger.DEBUG)
else:
return pretty_file_size(free)
return False
# https://gist.github.com/thatalextaylor/7408395
def pretty_time_delta(seconds):
sign_string = '-' if seconds < 0 else ''
seconds = abs(int(seconds))
days, seconds = divmod(seconds, 86400)
hours, seconds = divmod(seconds, 3600)
minutes, seconds = divmod(seconds, 60)
time_delta = sign_string
if days > 0:
time_delta += '{0}d'.format(days)
if hours > 0:
time_delta += '{0}h'.format(hours)
if minutes > 0:
time_delta += '{0}m'.format(minutes)
if seconds > 0:
time_delta += '{0}s'.format(seconds)
return time_delta
def isFileLocked(checkfile, writeLockCheck=False):
"""
Checks to see if a file is locked. Performs three checks
1. Checks if the file even exists
2. Attempts to open the file for reading. This will determine if the file has a write lock.
Write locks occur when the file is being edited or copied to, e.g. a file copy destination
3. If the readLockCheck parameter is True, attempts to rename the file. If this fails the
file is open by some other process for reading. The file can be read, but not written to
or deleted.
:param file: the file being checked
:param writeLockCheck: when true will check if the file is locked for writing (prevents move operations)
"""
checkfile = ek(os.path.abspath, checkfile)
if not ek(os.path.exists, checkfile):
return True
try:
f = ek(io.open, checkfile, 'rb')
f.close() # pylint: disable=no-member
except IOError:
return True
if writeLockCheck:
lockFile = checkfile + ".lckchk"
if ek(os.path.exists, lockFile):
ek(os.remove, lockFile)
try:
ek(os.rename, checkfile, lockFile)
time.sleep(1)
ek(os.rename, lockFile, checkfile)
except (OSError, IOError):
return True
return False
def getTVDBFromID(indexer_id, indexer): # pylint:disable=too-many-return-statements
session = make_session()
tvdb_id = ''
if indexer == 'IMDB':
url = "http://www.thetvdb.com/api/GetSeriesByRemoteID.php?imdbid={0}".format(indexer_id)
data = getURL(url, session=session, returns='content')
if data is None:
return tvdb_id
try:
tree = ET.fromstring(data)
for show in tree.getiterator("Series"):
tvdb_id = show.findtext("seriesid")
except SyntaxError:
pass
return tvdb_id
elif indexer == 'ZAP2IT':
url = "http://www.thetvdb.com/api/GetSeriesByRemoteID.php?zap2it={0}".format(indexer_id)
data = getURL(url, session=session, returns='content')
if data is None:
return tvdb_id
try:
tree = ET.fromstring(data)
for show in tree.getiterator("Series"):
tvdb_id = show.findtext("seriesid")
except SyntaxError:
pass
return tvdb_id
elif indexer == 'TVMAZE':
url = "http://api.tvmaze.com/shows/{0}".format(indexer_id)
data = getURL(url, session=session, returns='json')
if data is None:
return tvdb_id
tvdb_id = data[b'externals'][b'thetvdb']
return tvdb_id
else:
return tvdb_id
def get_showname_from_indexer(indexer, indexer_id, lang='en'):
lINDEXER_API_PARMS = sickbeard.indexerApi(indexer).api_params.copy()
lINDEXER_API_PARMS['language'] = lang or sickbeard.INDEXER_DEFAULT_LANGUAGE
logger.log('{0}: {1!r}'.format(sickbeard.indexerApi(indexer).name, lINDEXER_API_PARMS))
t = sickbeard.indexerApi(indexer).indexer(**lINDEXER_API_PARMS)
s = t[int(indexer_id)]
if hasattr(s, 'data'):
return s.data.get('seriesname')
return None
def is_ip_private(ip):
priv_lo = re.compile(r"^127\.\d{1,3}\.\d{1,3}\.\d{1,3}$")
priv_24 = re.compile(r"^10\.\d{1,3}\.\d{1,3}\.\d{1,3}$")
priv_20 = re.compile(r"^192\.168\.\d{1,3}.\d{1,3}$")
priv_16 = re.compile(r"^172.(1[6-9]|2[0-9]|3[0-1]).[0-9]{1,3}.[0-9]{1,3}$")
return priv_lo.match(ip) or priv_24.match(ip) or priv_20.match(ip) or priv_16.match(ip)
| gpl-3.0 |
brendandahl/servo | tests/wpt/css-tests/tools/html5lib/html5lib/filters/inject_meta_charset.py | 1730 | 2746 | from __future__ import absolute_import, division, unicode_literals
from . import _base
class Filter(_base.Filter):
def __init__(self, source, encoding):
_base.Filter.__init__(self, source)
self.encoding = encoding
def __iter__(self):
state = "pre_head"
meta_found = (self.encoding is None)
pending = []
for token in _base.Filter.__iter__(self):
type = token["type"]
if type == "StartTag":
if token["name"].lower() == "head":
state = "in_head"
elif type == "EmptyTag":
if token["name"].lower() == "meta":
# replace charset with actual encoding
has_http_equiv_content_type = False
for (namespace, name), value in token["data"].items():
if namespace is not None:
continue
elif name.lower() == 'charset':
token["data"][(namespace, name)] = self.encoding
meta_found = True
break
elif name == 'http-equiv' and value.lower() == 'content-type':
has_http_equiv_content_type = True
else:
if has_http_equiv_content_type and (None, "content") in token["data"]:
token["data"][(None, "content")] = 'text/html; charset=%s' % self.encoding
meta_found = True
elif token["name"].lower() == "head" and not meta_found:
# insert meta into empty head
yield {"type": "StartTag", "name": "head",
"data": token["data"]}
yield {"type": "EmptyTag", "name": "meta",
"data": {(None, "charset"): self.encoding}}
yield {"type": "EndTag", "name": "head"}
meta_found = True
continue
elif type == "EndTag":
if token["name"].lower() == "head" and pending:
# insert meta into head (if necessary) and flush pending queue
yield pending.pop(0)
if not meta_found:
yield {"type": "EmptyTag", "name": "meta",
"data": {(None, "charset"): self.encoding}}
while pending:
yield pending.pop(0)
meta_found = True
state = "post_head"
if state == "in_head":
pending.append(token)
else:
yield token
| mpl-2.0 |
mollstam/UnrealPy | UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/Werkzeug-0.10.4/werkzeug/urls.py | 148 | 36596 | # -*- coding: utf-8 -*-
"""
werkzeug.urls
~~~~~~~~~~~~~
``werkzeug.urls`` used to provide several wrapper functions for Python 2
urlparse, whose main purpose were to work around the behavior of the Py2
stdlib and its lack of unicode support. While this was already a somewhat
inconvenient situation, it got even more complicated because Python 3's
``urllib.parse`` actually does handle unicode properly. In other words,
this module would wrap two libraries with completely different behavior. So
now this module contains a 2-and-3-compatible backport of Python 3's
``urllib.parse``, which is mostly API-compatible.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import os
import re
from werkzeug._compat import text_type, PY2, to_unicode, \
to_native, implements_to_string, try_coerce_native, \
normalize_string_tuple, make_literal_wrapper, \
fix_tuple_repr
from werkzeug._internal import _encode_idna, _decode_idna
from werkzeug.datastructures import MultiDict, iter_multi_items
from collections import namedtuple
# A regular expression for what a valid schema looks like
_scheme_re = re.compile(r'^[a-zA-Z0-9+-.]+$')
# Characters that are safe in any part of an URL.
_always_safe = (b'abcdefghijklmnopqrstuvwxyz'
b'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_.-+')
_hexdigits = '0123456789ABCDEFabcdef'
_hextobyte = dict(
((a + b).encode(), int(a + b, 16))
for a in _hexdigits for b in _hexdigits
)
_URLTuple = fix_tuple_repr(namedtuple('_URLTuple',
['scheme', 'netloc', 'path', 'query', 'fragment']))
class BaseURL(_URLTuple):
'''Superclass of :py:class:`URL` and :py:class:`BytesURL`.'''
__slots__ = ()
def replace(self, **kwargs):
"""Return an URL with the same values, except for those parameters
given new values by whichever keyword arguments are specified."""
return self._replace(**kwargs)
@property
def host(self):
"""The host part of the URL if available, otherwise `None`. The
host is either the hostname or the IP address mentioned in the
URL. It will not contain the port.
"""
return self._split_host()[0]
@property
def ascii_host(self):
"""Works exactly like :attr:`host` but will return a result that
is restricted to ASCII. If it finds a netloc that is not ASCII
it will attempt to idna decode it. This is useful for socket
operations when the URL might include internationalized characters.
"""
rv = self.host
if rv is not None and isinstance(rv, text_type):
rv = _encode_idna(rv)
return to_native(rv, 'ascii', 'ignore')
@property
def port(self):
"""The port in the URL as an integer if it was present, `None`
otherwise. This does not fill in default ports.
"""
try:
rv = int(to_native(self._split_host()[1]))
if 0 <= rv <= 65535:
return rv
except (ValueError, TypeError):
pass
@property
def auth(self):
"""The authentication part in the URL if available, `None`
otherwise.
"""
return self._split_netloc()[0]
@property
def username(self):
"""The username if it was part of the URL, `None` otherwise.
This undergoes URL decoding and will always be a unicode string.
"""
rv = self._split_auth()[0]
if rv is not None:
return _url_unquote_legacy(rv)
@property
def raw_username(self):
"""The username if it was part of the URL, `None` otherwise.
Unlike :attr:`username` this one is not being decoded.
"""
return self._split_auth()[0]
@property
def password(self):
"""The password if it was part of the URL, `None` otherwise.
This undergoes URL decoding and will always be a unicode string.
"""
rv = self._split_auth()[1]
if rv is not None:
return _url_unquote_legacy(rv)
@property
def raw_password(self):
"""The password if it was part of the URL, `None` otherwise.
Unlike :attr:`password` this one is not being decoded.
"""
return self._split_auth()[1]
def decode_query(self, *args, **kwargs):
"""Decodes the query part of the URL. Ths is a shortcut for
calling :func:`url_decode` on the query argument. The arguments and
keyword arguments are forwarded to :func:`url_decode` unchanged.
"""
return url_decode(self.query, *args, **kwargs)
def join(self, *args, **kwargs):
"""Joins this URL with another one. This is just a convenience
function for calling into :meth:`url_join` and then parsing the
return value again.
"""
return url_parse(url_join(self, *args, **kwargs))
def to_url(self):
"""Returns a URL string or bytes depending on the type of the
information stored. This is just a convenience function
for calling :meth:`url_unparse` for this URL.
"""
return url_unparse(self)
def decode_netloc(self):
"""Decodes the netloc part into a string."""
rv = _decode_idna(self.host or '')
if ':' in rv:
rv = '[%s]' % rv
port = self.port
if port is not None:
rv = '%s:%d' % (rv, port)
auth = ':'.join(filter(None, [
_url_unquote_legacy(self.raw_username or '', '/:%@'),
_url_unquote_legacy(self.raw_password or '', '/:%@'),
]))
if auth:
rv = '%s@%s' % (auth, rv)
return rv
def to_uri_tuple(self):
"""Returns a :class:`BytesURL` tuple that holds a URI. This will
encode all the information in the URL properly to ASCII using the
rules a web browser would follow.
It's usually more interesting to directly call :meth:`iri_to_uri` which
will return a string.
"""
return url_parse(iri_to_uri(self).encode('ascii'))
def to_iri_tuple(self):
"""Returns a :class:`URL` tuple that holds a IRI. This will try
to decode as much information as possible in the URL without
losing information similar to how a web browser does it for the
URL bar.
It's usually more interesting to directly call :meth:`uri_to_iri` which
will return a string.
"""
return url_parse(uri_to_iri(self))
def get_file_location(self, pathformat=None):
"""Returns a tuple with the location of the file in the form
``(server, location)``. If the netloc is empty in the URL or
points to localhost, it's represented as ``None``.
The `pathformat` by default is autodetection but needs to be set
when working with URLs of a specific system. The supported values
are ``'windows'`` when working with Windows or DOS paths and
``'posix'`` when working with posix paths.
If the URL does not point to to a local file, the server and location
are both represented as ``None``.
:param pathformat: The expected format of the path component.
Currently ``'windows'`` and ``'posix'`` are
supported. Defaults to ``None`` which is
autodetect.
"""
if self.scheme != 'file':
return None, None
path = url_unquote(self.path)
host = self.netloc or None
if pathformat is None:
if os.name == 'nt':
pathformat = 'windows'
else:
pathformat = 'posix'
if pathformat == 'windows':
if path[:1] == '/' and path[1:2].isalpha() and path[2:3] in '|:':
path = path[1:2] + ':' + path[3:]
windows_share = path[:3] in ('\\' * 3, '/' * 3)
import ntpath
path = ntpath.normpath(path)
# Windows shared drives are represented as ``\\host\\directory``.
# That results in a URL like ``file://///host/directory``, and a
# path like ``///host/directory``. We need to special-case this
# because the path contains the hostname.
if windows_share and host is None:
parts = path.lstrip('\\').split('\\', 1)
if len(parts) == 2:
host, path = parts
else:
host = parts[0]
path = ''
elif pathformat == 'posix':
import posixpath
path = posixpath.normpath(path)
else:
raise TypeError('Invalid path format %s' % repr(pathformat))
if host in ('127.0.0.1', '::1', 'localhost'):
host = None
return host, path
def _split_netloc(self):
if self._at in self.netloc:
return self.netloc.split(self._at, 1)
return None, self.netloc
def _split_auth(self):
auth = self._split_netloc()[0]
if not auth:
return None, None
if self._colon not in auth:
return auth, None
return auth.split(self._colon, 1)
def _split_host(self):
rv = self._split_netloc()[1]
if not rv:
return None, None
if not rv.startswith(self._lbracket):
if self._colon in rv:
return rv.split(self._colon, 1)
return rv, None
idx = rv.find(self._rbracket)
if idx < 0:
return rv, None
host = rv[1:idx]
rest = rv[idx + 1:]
if rest.startswith(self._colon):
return host, rest[1:]
return host, None
@implements_to_string
class URL(BaseURL):
"""Represents a parsed URL. This behaves like a regular tuple but
also has some extra attributes that give further insight into the
URL.
"""
__slots__ = ()
_at = '@'
_colon = ':'
_lbracket = '['
_rbracket = ']'
def __str__(self):
return self.to_url()
def encode_netloc(self):
"""Encodes the netloc part to an ASCII safe URL as bytes."""
rv = self.ascii_host or ''
if ':' in rv:
rv = '[%s]' % rv
port = self.port
if port is not None:
rv = '%s:%d' % (rv, port)
auth = ':'.join(filter(None, [
url_quote(self.raw_username or '', 'utf-8', 'strict', '/:%'),
url_quote(self.raw_password or '', 'utf-8', 'strict', '/:%'),
]))
if auth:
rv = '%s@%s' % (auth, rv)
return to_native(rv)
def encode(self, charset='utf-8', errors='replace'):
"""Encodes the URL to a tuple made out of bytes. The charset is
only being used for the path, query and fragment.
"""
return BytesURL(
self.scheme.encode('ascii'),
self.encode_netloc(),
self.path.encode(charset, errors),
self.query.encode(charset, errors),
self.fragment.encode(charset, errors)
)
class BytesURL(BaseURL):
"""Represents a parsed URL in bytes."""
__slots__ = ()
_at = b'@'
_colon = b':'
_lbracket = b'['
_rbracket = b']'
def __str__(self):
return self.to_url().decode('utf-8', 'replace')
def encode_netloc(self):
"""Returns the netloc unchanged as bytes."""
return self.netloc
def decode(self, charset='utf-8', errors='replace'):
"""Decodes the URL to a tuple made out of strings. The charset is
only being used for the path, query and fragment.
"""
return URL(
self.scheme.decode('ascii'),
self.decode_netloc(),
self.path.decode(charset, errors),
self.query.decode(charset, errors),
self.fragment.decode(charset, errors)
)
def _unquote_to_bytes(string, unsafe=''):
if isinstance(string, text_type):
string = string.encode('utf-8')
if isinstance(unsafe, text_type):
unsafe = unsafe.encode('utf-8')
unsafe = frozenset(bytearray(unsafe))
bits = iter(string.split(b'%'))
result = bytearray(next(bits, b''))
for item in bits:
try:
char = _hextobyte[item[:2]]
if char in unsafe:
raise KeyError()
result.append(char)
result.extend(item[2:])
except KeyError:
result.extend(b'%')
result.extend(item)
return bytes(result)
def _url_encode_impl(obj, charset, encode_keys, sort, key):
iterable = iter_multi_items(obj)
if sort:
iterable = sorted(iterable, key=key)
for key, value in iterable:
if value is None:
continue
if not isinstance(key, bytes):
key = text_type(key).encode(charset)
if not isinstance(value, bytes):
value = text_type(value).encode(charset)
yield url_quote_plus(key) + '=' + url_quote_plus(value)
def _url_unquote_legacy(value, unsafe=''):
try:
return url_unquote(value, charset='utf-8',
errors='strict', unsafe=unsafe)
except UnicodeError:
return url_unquote(value, charset='latin1', unsafe=unsafe)
def url_parse(url, scheme=None, allow_fragments=True):
"""Parses a URL from a string into a :class:`URL` tuple. If the URL
is lacking a scheme it can be provided as second argument. Otherwise,
it is ignored. Optionally fragments can be stripped from the URL
by setting `allow_fragments` to `False`.
The inverse of this function is :func:`url_unparse`.
:param url: the URL to parse.
:param scheme: the default schema to use if the URL is schemaless.
:param allow_fragments: if set to `False` a fragment will be removed
from the URL.
"""
s = make_literal_wrapper(url)
is_text_based = isinstance(url, text_type)
if scheme is None:
scheme = s('')
netloc = query = fragment = s('')
i = url.find(s(':'))
if i > 0 and _scheme_re.match(to_native(url[:i], errors='replace')):
# make sure "iri" is not actually a port number (in which case
# "scheme" is really part of the path)
rest = url[i + 1:]
if not rest or any(c not in s('0123456789') for c in rest):
# not a port number
scheme, url = url[:i].lower(), rest
if url[:2] == s('//'):
delim = len(url)
for c in s('/?#'):
wdelim = url.find(c, 2)
if wdelim >= 0:
delim = min(delim, wdelim)
netloc, url = url[2:delim], url[delim:]
if (s('[') in netloc and s(']') not in netloc) or \
(s(']') in netloc and s('[') not in netloc):
raise ValueError('Invalid IPv6 URL')
if allow_fragments and s('#') in url:
url, fragment = url.split(s('#'), 1)
if s('?') in url:
url, query = url.split(s('?'), 1)
result_type = is_text_based and URL or BytesURL
return result_type(scheme, netloc, url, query, fragment)
def url_quote(string, charset='utf-8', errors='strict', safe='/:', unsafe=''):
"""URL encode a single string with a given encoding.
:param s: the string to quote.
:param charset: the charset to be used.
:param safe: an optional sequence of safe characters.
:param unsafe: an optional sequence of unsafe characters.
.. versionadded:: 0.9.2
The `unsafe` parameter was added.
"""
if not isinstance(string, (text_type, bytes, bytearray)):
string = text_type(string)
if isinstance(string, text_type):
string = string.encode(charset, errors)
if isinstance(safe, text_type):
safe = safe.encode(charset, errors)
if isinstance(unsafe, text_type):
unsafe = unsafe.encode(charset, errors)
safe = frozenset(bytearray(safe) + _always_safe) - frozenset(bytearray(unsafe))
rv = bytearray()
for char in bytearray(string):
if char in safe:
rv.append(char)
else:
rv.extend(('%%%02X' % char).encode('ascii'))
return to_native(bytes(rv))
def url_quote_plus(string, charset='utf-8', errors='strict', safe=''):
"""URL encode a single string with the given encoding and convert
whitespace to "+".
:param s: The string to quote.
:param charset: The charset to be used.
:param safe: An optional sequence of safe characters.
"""
return url_quote(string, charset, errors, safe + ' ', '+').replace(' ', '+')
def url_unparse(components):
"""The reverse operation to :meth:`url_parse`. This accepts arbitrary
as well as :class:`URL` tuples and returns a URL as a string.
:param components: the parsed URL as tuple which should be converted
into a URL string.
"""
scheme, netloc, path, query, fragment = \
normalize_string_tuple(components)
s = make_literal_wrapper(scheme)
url = s('')
# We generally treat file:///x and file:/x the same which is also
# what browsers seem to do. This also allows us to ignore a schema
# register for netloc utilization or having to differenciate between
# empty and missing netloc.
if netloc or (scheme and path.startswith(s('/'))):
if path and path[:1] != s('/'):
path = s('/') + path
url = s('//') + (netloc or s('')) + path
elif path:
url += path
if scheme:
url = scheme + s(':') + url
if query:
url = url + s('?') + query
if fragment:
url = url + s('#') + fragment
return url
def url_unquote(string, charset='utf-8', errors='replace', unsafe=''):
"""URL decode a single string with a given encoding. If the charset
is set to `None` no unicode decoding is performed and raw bytes
are returned.
:param s: the string to unquote.
:param charset: the charset of the query string. If set to `None`
no unicode decoding will take place.
:param errors: the error handling for the charset decoding.
"""
rv = _unquote_to_bytes(string, unsafe)
if charset is not None:
rv = rv.decode(charset, errors)
return rv
def url_unquote_plus(s, charset='utf-8', errors='replace'):
"""URL decode a single string with the given `charset` and decode "+" to
whitespace.
Per default encoding errors are ignored. If you want a different behavior
you can set `errors` to ``'replace'`` or ``'strict'``. In strict mode a
:exc:`HTTPUnicodeError` is raised.
:param s: The string to unquote.
:param charset: the charset of the query string. If set to `None`
no unicode decoding will take place.
:param errors: The error handling for the `charset` decoding.
"""
if isinstance(s, text_type):
s = s.replace(u'+', u' ')
else:
s = s.replace(b'+', b' ')
return url_unquote(s, charset, errors)
def url_fix(s, charset='utf-8'):
r"""Sometimes you get an URL by a user that just isn't a real URL because
it contains unsafe characters like ' ' and so on. This function can fix
some of the problems in a similar way browsers handle data entered by the
user:
>>> url_fix(u'http://de.wikipedia.org/wiki/Elf (Begriffskl\xe4rung)')
'http://de.wikipedia.org/wiki/Elf%20(Begriffskl%C3%A4rung)'
:param s: the string with the URL to fix.
:param charset: The target charset for the URL if the url was given as
unicode string.
"""
# First step is to switch to unicode processing and to convert
# backslashes (which are invalid in URLs anyways) to slashes. This is
# consistent with what Chrome does.
s = to_unicode(s, charset, 'replace').replace('\\', '/')
# For the specific case that we look like a malformed windows URL
# we want to fix this up manually:
if s.startswith('file://') and s[7:8].isalpha() and s[8:10] in (':/', '|/'):
s = 'file:///' + s[7:]
url = url_parse(s)
path = url_quote(url.path, charset, safe='/%+$!*\'(),')
qs = url_quote_plus(url.query, charset, safe=':&%=+$!*\'(),')
anchor = url_quote_plus(url.fragment, charset, safe=':&%=+$!*\'(),')
return to_native(url_unparse((url.scheme, url.encode_netloc(),
path, qs, anchor)))
def uri_to_iri(uri, charset='utf-8', errors='replace'):
r"""
Converts a URI in a given charset to a IRI.
Examples for URI versus IRI:
>>> uri_to_iri(b'http://xn--n3h.net/')
u'http://\u2603.net/'
>>> uri_to_iri(b'http://%C3%BCser:p%C3%A4ssword@xn--n3h.net/p%C3%A5th')
u'http://\xfcser:p\xe4ssword@\u2603.net/p\xe5th'
Query strings are left unchanged:
>>> uri_to_iri('/?foo=24&x=%26%2f')
u'/?foo=24&x=%26%2f'
.. versionadded:: 0.6
:param uri: The URI to convert.
:param charset: The charset of the URI.
:param errors: The error handling on decode.
"""
if isinstance(uri, tuple):
uri = url_unparse(uri)
uri = url_parse(to_unicode(uri, charset))
path = url_unquote(uri.path, charset, errors, '%/;?')
query = url_unquote(uri.query, charset, errors, '%;/?:@&=+,$')
fragment = url_unquote(uri.fragment, charset, errors, '%;/?:@&=+,$')
return url_unparse((uri.scheme, uri.decode_netloc(),
path, query, fragment))
def iri_to_uri(iri, charset='utf-8', errors='strict', safe_conversion=False):
r"""
Converts any unicode based IRI to an acceptable ASCII URI. Werkzeug always
uses utf-8 URLs internally because this is what browsers and HTTP do as
well. In some places where it accepts an URL it also accepts a unicode IRI
and converts it into a URI.
Examples for IRI versus URI:
>>> iri_to_uri(u'http://☃.net/')
'http://xn--n3h.net/'
>>> iri_to_uri(u'http://üser:pässword@☃.net/påth')
'http://%C3%BCser:p%C3%A4ssword@xn--n3h.net/p%C3%A5th'
There is a general problem with IRI and URI conversion with some
protocols that appear in the wild that are in violation of the URI
specification. In places where Werkzeug goes through a forced IRI to
URI conversion it will set the `safe_conversion` flag which will
not perform a conversion if the end result is already ASCII. This
can mean that the return value is not an entirely correct URI but
it will not destroy such invalid URLs in the process.
As an example consider the following two IRIs::
magnet:?xt=uri:whatever
itms-services://?action=download-manifest
The internal representation after parsing of those URLs is the same
and there is no way to reconstruct the original one. If safe
conversion is enabled however this function becomes a noop for both of
those strings as they both can be considered URIs.
.. versionadded:: 0.6
.. versionchanged:: 0.9.6
The `safe_conversion` parameter was added.
:param iri: The IRI to convert.
:param charset: The charset for the URI.
:param safe_conversion: indicates if a safe conversion should take place.
For more information see the explanation above.
"""
if isinstance(iri, tuple):
iri = url_unparse(iri)
if safe_conversion:
try:
native_iri = to_native(iri)
ascii_iri = to_native(iri).encode('ascii')
if ascii_iri.split() == [ascii_iri]:
return native_iri
except UnicodeError:
pass
iri = url_parse(to_unicode(iri, charset, errors))
netloc = iri.encode_netloc()
path = url_quote(iri.path, charset, errors, '/:~+%')
query = url_quote(iri.query, charset, errors, '%&[]:;$*()+,!?*/=')
fragment = url_quote(iri.fragment, charset, errors, '=%&[]:;$()+,!?*/')
return to_native(url_unparse((iri.scheme, netloc,
path, query, fragment)))
def url_decode(s, charset='utf-8', decode_keys=False, include_empty=True,
errors='replace', separator='&', cls=None):
"""
Parse a querystring and return it as :class:`MultiDict`. There is a
difference in key decoding on different Python versions. On Python 3
keys will always be fully decoded whereas on Python 2, keys will
remain bytestrings if they fit into ASCII. On 2.x keys can be forced
to be unicode by setting `decode_keys` to `True`.
If the charset is set to `None` no unicode decoding will happen and
raw bytes will be returned.
Per default a missing value for a key will default to an empty key. If
you don't want that behavior you can set `include_empty` to `False`.
Per default encoding errors are ignored. If you want a different behavior
you can set `errors` to ``'replace'`` or ``'strict'``. In strict mode a
`HTTPUnicodeError` is raised.
.. versionchanged:: 0.5
In previous versions ";" and "&" could be used for url decoding.
This changed in 0.5 where only "&" is supported. If you want to
use ";" instead a different `separator` can be provided.
The `cls` parameter was added.
:param s: a string with the query string to decode.
:param charset: the charset of the query string. If set to `None`
no unicode decoding will take place.
:param decode_keys: Used on Python 2.x to control whether keys should
be forced to be unicode objects. If set to `True`
then keys will be unicode in all cases. Otherwise,
they remain `str` if they fit into ASCII.
:param include_empty: Set to `False` if you don't want empty values to
appear in the dict.
:param errors: the decoding error behavior.
:param separator: the pair separator to be used, defaults to ``&``
:param cls: an optional dict class to use. If this is not specified
or `None` the default :class:`MultiDict` is used.
"""
if cls is None:
cls = MultiDict
if isinstance(s, text_type) and not isinstance(separator, text_type):
separator = separator.decode(charset or 'ascii')
elif isinstance(s, bytes) and not isinstance(separator, bytes):
separator = separator.encode(charset or 'ascii')
return cls(_url_decode_impl(s.split(separator), charset, decode_keys,
include_empty, errors))
def url_decode_stream(stream, charset='utf-8', decode_keys=False,
include_empty=True, errors='replace', separator='&',
cls=None, limit=None, return_iterator=False):
"""Works like :func:`url_decode` but decodes a stream. The behavior
of stream and limit follows functions like
:func:`~werkzeug.wsgi.make_line_iter`. The generator of pairs is
directly fed to the `cls` so you can consume the data while it's
parsed.
.. versionadded:: 0.8
:param stream: a stream with the encoded querystring
:param charset: the charset of the query string. If set to `None`
no unicode decoding will take place.
:param decode_keys: Used on Python 2.x to control whether keys should
be forced to be unicode objects. If set to `True`,
keys will be unicode in all cases. Otherwise, they
remain `str` if they fit into ASCII.
:param include_empty: Set to `False` if you don't want empty values to
appear in the dict.
:param errors: the decoding error behavior.
:param separator: the pair separator to be used, defaults to ``&``
:param cls: an optional dict class to use. If this is not specified
or `None` the default :class:`MultiDict` is used.
:param limit: the content length of the URL data. Not necessary if
a limited stream is provided.
:param return_iterator: if set to `True` the `cls` argument is ignored
and an iterator over all decoded pairs is
returned
"""
from werkzeug.wsgi import make_chunk_iter
if return_iterator:
cls = lambda x: x
elif cls is None:
cls = MultiDict
pair_iter = make_chunk_iter(stream, separator, limit)
return cls(_url_decode_impl(pair_iter, charset, decode_keys,
include_empty, errors))
def _url_decode_impl(pair_iter, charset, decode_keys, include_empty, errors):
for pair in pair_iter:
if not pair:
continue
s = make_literal_wrapper(pair)
equal = s('=')
if equal in pair:
key, value = pair.split(equal, 1)
else:
if not include_empty:
continue
key = pair
value = s('')
key = url_unquote_plus(key, charset, errors)
if charset is not None and PY2 and not decode_keys:
key = try_coerce_native(key)
yield key, url_unquote_plus(value, charset, errors)
def url_encode(obj, charset='utf-8', encode_keys=False, sort=False, key=None,
separator=b'&'):
"""URL encode a dict/`MultiDict`. If a value is `None` it will not appear
in the result string. Per default only values are encoded into the target
charset strings. If `encode_keys` is set to ``True`` unicode keys are
supported too.
If `sort` is set to `True` the items are sorted by `key` or the default
sorting algorithm.
.. versionadded:: 0.5
`sort`, `key`, and `separator` were added.
:param obj: the object to encode into a query string.
:param charset: the charset of the query string.
:param encode_keys: set to `True` if you have unicode keys. (Ignored on
Python 3.x)
:param sort: set to `True` if you want parameters to be sorted by `key`.
:param separator: the separator to be used for the pairs.
:param key: an optional function to be used for sorting. For more details
check out the :func:`sorted` documentation.
"""
separator = to_native(separator, 'ascii')
return separator.join(_url_encode_impl(obj, charset, encode_keys, sort, key))
def url_encode_stream(obj, stream=None, charset='utf-8', encode_keys=False,
sort=False, key=None, separator=b'&'):
"""Like :meth:`url_encode` but writes the results to a stream
object. If the stream is `None` a generator over all encoded
pairs is returned.
.. versionadded:: 0.8
:param obj: the object to encode into a query string.
:param stream: a stream to write the encoded object into or `None` if
an iterator over the encoded pairs should be returned. In
that case the separator argument is ignored.
:param charset: the charset of the query string.
:param encode_keys: set to `True` if you have unicode keys. (Ignored on
Python 3.x)
:param sort: set to `True` if you want parameters to be sorted by `key`.
:param separator: the separator to be used for the pairs.
:param key: an optional function to be used for sorting. For more details
check out the :func:`sorted` documentation.
"""
separator = to_native(separator, 'ascii')
gen = _url_encode_impl(obj, charset, encode_keys, sort, key)
if stream is None:
return gen
for idx, chunk in enumerate(gen):
if idx:
stream.write(separator)
stream.write(chunk)
def url_join(base, url, allow_fragments=True):
"""Join a base URL and a possibly relative URL to form an absolute
interpretation of the latter.
:param base: the base URL for the join operation.
:param url: the URL to join.
:param allow_fragments: indicates whether fragments should be allowed.
"""
if isinstance(base, tuple):
base = url_unparse(base)
if isinstance(url, tuple):
url = url_unparse(url)
base, url = normalize_string_tuple((base, url))
s = make_literal_wrapper(base)
if not base:
return url
if not url:
return base
bscheme, bnetloc, bpath, bquery, bfragment = \
url_parse(base, allow_fragments=allow_fragments)
scheme, netloc, path, query, fragment = \
url_parse(url, bscheme, allow_fragments)
if scheme != bscheme:
return url
if netloc:
return url_unparse((scheme, netloc, path, query, fragment))
netloc = bnetloc
if path[:1] == s('/'):
segments = path.split(s('/'))
elif not path:
segments = bpath.split(s('/'))
if not query:
query = bquery
else:
segments = bpath.split(s('/'))[:-1] + path.split(s('/'))
# If the rightmost part is "./" we want to keep the slash but
# remove the dot.
if segments[-1] == s('.'):
segments[-1] = s('')
# Resolve ".." and "."
segments = [segment for segment in segments if segment != s('.')]
while 1:
i = 1
n = len(segments) - 1
while i < n:
if segments[i] == s('..') and \
segments[i - 1] not in (s(''), s('..')):
del segments[i - 1:i + 1]
break
i += 1
else:
break
# Remove trailing ".." if the URL is absolute
unwanted_marker = [s(''), s('..')]
while segments[:2] == unwanted_marker:
del segments[1]
path = s('/').join(segments)
return url_unparse((scheme, netloc, path, query, fragment))
class Href(object):
"""Implements a callable that constructs URLs with the given base. The
function can be called with any number of positional and keyword
arguments which than are used to assemble the URL. Works with URLs
and posix paths.
Positional arguments are appended as individual segments to
the path of the URL:
>>> href = Href('/foo')
>>> href('bar', 23)
'/foo/bar/23'
>>> href('foo', bar=23)
'/foo/foo?bar=23'
If any of the arguments (positional or keyword) evaluates to `None` it
will be skipped. If no keyword arguments are given the last argument
can be a :class:`dict` or :class:`MultiDict` (or any other dict subclass),
otherwise the keyword arguments are used for the query parameters, cutting
off the first trailing underscore of the parameter name:
>>> href(is_=42)
'/foo?is=42'
>>> href({'foo': 'bar'})
'/foo?foo=bar'
Combining of both methods is not allowed:
>>> href({'foo': 'bar'}, bar=42)
Traceback (most recent call last):
...
TypeError: keyword arguments and query-dicts can't be combined
Accessing attributes on the href object creates a new href object with
the attribute name as prefix:
>>> bar_href = href.bar
>>> bar_href("blub")
'/foo/bar/blub'
If `sort` is set to `True` the items are sorted by `key` or the default
sorting algorithm:
>>> href = Href("/", sort=True)
>>> href(a=1, b=2, c=3)
'/?a=1&b=2&c=3'
.. versionadded:: 0.5
`sort` and `key` were added.
"""
def __init__(self, base='./', charset='utf-8', sort=False, key=None):
if not base:
base = './'
self.base = base
self.charset = charset
self.sort = sort
self.key = key
def __getattr__(self, name):
if name[:2] == '__':
raise AttributeError(name)
base = self.base
if base[-1:] != '/':
base += '/'
return Href(url_join(base, name), self.charset, self.sort, self.key)
def __call__(self, *path, **query):
if path and isinstance(path[-1], dict):
if query:
raise TypeError('keyword arguments and query-dicts '
'can\'t be combined')
query, path = path[-1], path[:-1]
elif query:
query = dict([(k.endswith('_') and k[:-1] or k, v)
for k, v in query.items()])
path = '/'.join([to_unicode(url_quote(x, self.charset), 'ascii')
for x in path if x is not None]).lstrip('/')
rv = self.base
if path:
if not rv.endswith('/'):
rv += '/'
rv = url_join(rv, './' + path)
if query:
rv += '?' + to_unicode(url_encode(query, self.charset, sort=self.sort,
key=self.key), 'ascii')
return to_native(rv)
| mit |
dmroeder/pylogix | pylogix/lgx_device.py | 1 | 54769 | """
Copyright 2021 Dustin Roeder (dmroeder@gmail.com)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import socket
from struct import pack, unpack_from
class Device(object):
def __init__(self):
# structure of a logix device
self.Length=None
self.EncapsulationVersion=None
self.IPAddress=None
self.VendorID=None
self.Vendor=None
self.DeviceID=None
self.DeviceType=None
self.ProductCode=None
self.Revision=None
self.Status=None
self.SerialNumber=None
self.ProductNameLength=None
self.ProductName=None
self.State=None
def __repr__(self):
props = ''
props += 'Length={}, '.format(self.Length)
props += 'EncapsulationVersion={}, '.format(self.EncapsulationVersion)
props += 'IPAddress={}, '.format(self.IPAddress)
props += 'VendorID={}, '.format(self.VendorID)
props += 'Vendor={}, '.format(self.Vendor)
props += 'DeviceID={}, '.format(self.DeviceID)
props += 'DeviceType={}, '.format(self.DeviceType)
props += 'ProductCode={}, '.format(self.ProductCode)
props += 'Revision={}, '.format(self.Revision)
props += 'Status={}, '.format(self.Status)
props += 'SerialNumber={}, '.format(self.SerialNumber)
props += 'ProductNameLength={}, '.format(self.ProductNameLength)
props += 'ProductName={}, '.format(self.ProductName)
props += 'State={}'.format(self.State)
return 'LGXDevice({})'.format(props)
def __str__(self):
ret = "{} {} {} {} {} {} {} {} {} {} {} {} {} {}".format(
self.Length,
self.EncapsulationVersion,
self.IPAddress,
self.VendorID,
self.Vendor,
self.DeviceID,
self.DeviceType,
self.ProductCode,
self.Revision,
self.Status,
self.SerialNumber,
self.ProductNameLength,
self.ProductName,
self.State)
return ret
@staticmethod
def get_device(deviceID):
if deviceID in devices.keys():
return devices[deviceID]
else:
return "Unknown"
@staticmethod
def get_vendor(vendorID):
if vendorID in vendors.keys():
return vendors[vendorID]
else:
return "Unknown"
@staticmethod
def parse(data):
# we're going to take the packet and parse all
# the data that is in it.
resp = Device()
resp.Length = unpack_from('<H', data, 28)[0]
resp.EncapsulationVersion = unpack_from('<H', data, 30)[0]
longIP = unpack_from('<I', data, 36)[0]
resp.IPAddress = socket.inet_ntoa(pack('<L', longIP))
resp.VendorID = unpack_from('<H', data, 48)[0]
resp.Vendor = Device.get_vendor(resp.VendorID)
resp.DeviceID = unpack_from('<H', data, 50)[0]
resp.Device = Device.get_device(resp.DeviceID)
resp.ProductCode = unpack_from('<H', data, 52)[0]
major = unpack_from('<B', data, 54)[0]
minor = unpack_from('<B', data, 55)[0]
resp.Revision = str(major) + '.' + str(minor)
resp.Status = unpack_from('<H', data, 56)[0]
resp.SerialNumber = hex(unpack_from('<I', data, 58)[0])
resp.ProductNameLength = unpack_from('<B', data, 62)[0]
resp.ProductName = str(data[63:63+resp.ProductNameLength].decode('utf-8'))
state = data[-1:]
resp.State = unpack_from('<B', state, 0)[0]
return resp
# List originally came from Wireshark /epan/dissectors/packet-cip.c
devices = {0x00: 'Generic Device (deprecated)',
0x02: 'AC Drive',
0x03: 'Motor Overload',
0x04: 'Limit Switch',
0x05: 'Inductive Proximity Switch',
0x06: 'Photoelectric Sensor',
0x07: 'General Purpose Discrete I/O',
0x09: 'Resolver',
0x0C: 'Communications Adapter',
0x0E: 'Programmable Logic Controller',
0x10: 'Position Controller',
0x13: 'DC Drive',
0x15: 'Contactor',
0x16: 'Motor Starter',
0x17: 'Soft Start',
0x18: 'Human-Machine Interface',
0x1A: 'Mass Flow Controller',
0x1B: 'Pneumatic Valve',
0x1C: 'Vacuum Pressure Gauge',
0x1D: 'Process Control Value',
0x1E: 'Residual Gas Analyzer',
0x1F: 'DC Power Generator',
0x20: 'RF Power Generator',
0x21: 'Turbomolecular Vacuum Pump',
0x22: 'Encoder',
0x23: 'Safety Discrete I/O Device',
0x24: 'Fluid Flow Controller',
0x25: 'CIP Motion Drive',
0x26: 'CompoNet Repeater',
0x27: 'Mass Flow Controller, Enhanced',
0x28: 'CIP Modbus Device',
0x29: 'CIP Modbus Translator',
0x2A: 'Safety Analog I/O Device',
0x2B: 'Generic Device (keyable)',
0x2C: 'Managed Switch',
0x32: 'ControlNet Physical Layer Component'}
# List originally came from Wireshark /epan/dissectors/packet-cip.c
vendors = {0: 'Reserved',
1: 'Rockwell Automation/Allen-Bradley',
2: 'Namco Controls Corp.',
3: 'Honeywell Inc.',
4: 'Parker Hannifin Corp. (Veriflo Division)',
5: 'Rockwell Automation/Reliance Elec.',
6: 'Reserved',
7: 'SMC Corporation',
8: 'Molex Incorporated',
9: 'Western Reserve Controls Corp.',
10: 'Advanced Micro Controls Inc. (AMCI)',
11: 'ASCO Pneumatic Controls',
12: 'Banner Engineering Corp.',
13: 'Belden Wire & Cable Company',
14: 'Cooper Interconnect',
15: 'Reserved',
16: 'Daniel Woodhead Co. (Woodhead Connectivity)',
17: 'Dearborn Group Inc.',
18: 'Reserved',
19: 'Helm Instrument Company',
20: 'Huron Net Works',
21: 'Lumberg, Inc.',
22: 'Online Development Inc.(Automation Value)',
23: 'Vorne Industries, Inc.',
24: 'ODVA Special Reserve',
25: 'Reserved',
26: 'Festo Corporation',
27: 'Reserved',
28: 'Reserved',
29: 'Reserved',
30: 'Unico, Inc.',
31: 'Ross Controls',
32: 'Reserved',
33: 'Reserved',
34: 'Hohner Corp.',
35: 'Micro Mo Electronics, Inc.',
36: 'MKS Instruments, Inc.',
37: 'Yaskawa Electric America formerly Magnetek Drives',
38: 'Reserved',
39: 'AVG Automation (Uticor)',
40: 'Wago Corporation',
41: 'Kinetics (Unit Instruments)',
42: 'IMI Norgren Limited',
43: 'BALLUFF, Inc.',
44: 'Yaskawa Electric America, Inc.',
45: 'Eurotherm Controls Inc',
46: 'ABB Industrial Systems',
47: 'Omron Corporation',
48: 'TURCk, Inc.',
49: 'Grayhill Inc.',
50: 'Real Time Automation (C&ID)',
51: 'Reserved',
52: 'Numatics, Inc.',
53: 'Lutze, Inc.',
54: 'Reserved',
55: 'Reserved',
56: 'Softing GmbH',
57: 'Pepperl + Fuchs',
58: 'Spectrum Controls, Inc.',
59: 'D.I.P. Inc. MKS Inst.',
60: 'Applied Motion Products, Inc.',
61: 'Sencon Inc.',
62: 'High Country Tek',
63: 'SWAC Automation Consult GmbH',
64: 'Clippard Instrument Laboratory',
65: 'Reserved',
66: 'Reserved',
67: 'Reserved',
68: 'Eaton Electrical',
69: 'Reserved',
70: 'Reserved',
71: 'Toshiba International Corp.',
72: 'Control Technology Incorporated',
73: 'TCS (NZ) Ltd.',
74: 'Hitachi, Ltd.',
75: 'ABB Robotics Products AB',
76: 'NKE Corporation',
77: 'Rockwell Software, Inc.',
78: 'Escort Memory Systems (A Datalogic Group Co.)',
79: 'Reserved',
80: 'Industrial Devices Corporation',
81: 'IXXAT Automation GmbH',
82: 'Mitsubishi Electric Automation, Inc.',
83: 'OPTO-22',
84: 'Reserved',
85: 'Reserved',
86: 'Horner Electric',
87: 'Burkert Werke GmbH & Co. KG',
88: 'Reserved',
89: 'Industrial Indexing Systems, Inc.',
90: 'HMS Industrial Networks AB',
91: 'Robicon',
92: 'Helix Technology (Granville-Phillips)',
93: 'Arlington Laboratory',
94: 'Advantech Co. Ltd.',
95: 'Square D Company',
96: 'Digital Electronics Corp.',
97: 'Danfoss',
98: 'Reserved',
99: 'Reserved',
100: 'Bosch Rexroth Corporation, Pneumatics',
101: 'Applied Materials, Inc.',
102: 'Showa Electric Wire & Cable Co.',
103: 'Pacific Scientific (API Controls Inc.)',
104: 'Sharp Manufacturing Systems Corp.',
105: 'Olflex Wire & Cable, Inc.',
106: 'Reserved',
107: 'Unitrode',
108: 'Beckhoff Automation GmbH',
109: 'National Instruments',
110: 'Mykrolis Corporations (Millipore)',
111: 'International Motion Controls Corp.',
112: 'Reserved',
113: 'SEG Kempen GmbH',
114: 'Reserved',
115: 'Reserved',
116: 'MTS Systems Corp.',
117: 'Krones, Inc',
118: 'Reserved',
119: 'EXOR Electronic R & D',
120: 'SIEI S.p.A.',
121: 'KUKA Roboter GmbH',
122: 'Reserved',
123: 'SEC (Samsung Electronics Co., Ltd)',
124: 'Binary Electronics Ltd',
125: 'Flexible Machine Controls',
126: 'Reserved',
127: 'ABB Inc. (Entrelec)',
128: 'MAC Valves, Inc.',
129: 'Auma Actuators Inc',
130: 'Toyoda Machine Works, Ltd',
131: 'Reserved',
132: 'Reserved',
133: 'Balogh T.A.G., Corporation',
134: 'TR Systemtechnik GmbH',
135: 'UNIPULSE Corporation',
136: 'Reserved',
137: 'Reserved',
138: 'Conxall Corporation Inc.',
139: 'Reserved',
140: 'Reserved',
141: 'Kuramo Electric Co., Ltd.',
142: 'Creative Micro Designs',
143: 'GE Industrial Systems',
144: 'Leybold Vacuum GmbH',
145: 'Siemens Energy & Automation/Drives',
146: 'Kodensha Ltd',
147: 'Motion Engineering, Inc.',
148: 'Honda Engineering Co., Ltd',
149: 'EIM Valve Controls',
150: 'Melec Inc.',
151: 'Sony Manufacturing Systems Corporation',
152: 'North American Mfg.',
153: 'WATLOW',
154: 'Japan Radio Co., Ltd',
155: 'NADEX Co., Ltd',
156: 'Ametek Automation & Process Technologies',
157: 'Reserved',
158: 'KVASER AB',
159: 'IDEC IZUMI Corporation',
160: 'Mitsubishi Heavy Industries Ltd',
161: 'Mitsubishi Electric Corporation',
162: 'Horiba-STEC Inc.',
163: 'esd electronic system design gmbh',
164: 'DAIHEN Corporation',
165: 'Tyco Valves & Controls/Keystone',
166: 'EBARA Corporation',
167: 'Reserved',
168: 'Reserved',
169: 'Hokuyo Electric Co. Ltd',
170: 'Pyramid Solutions, Inc.',
171: 'Denso Wave Incorporated',
172: 'HLS Hard-Line Solutions Inc',
173: 'Caterpillar, Inc.',
174: 'PDL Electronics Ltd.',
175: 'Reserved',
176: 'Red Lion Controls',
177: 'ANELVA Corporation',
178: 'Toyo Denki Seizo KK',
179: 'Sanyo Denki Co., Ltd',
180: 'Advanced Energy Japan K.K. (Aera Japan)',
181: 'Pilz GmbH & Co',
182: 'Marsh Bellofram-Bellofram PCD Division',
183: 'Reserved',
184: 'M-SYSTEM Co. Ltd',
185: 'Nissin Electric Co., Ltd',
186: 'Hitachi Metals Ltd.',
187: 'Oriental Motor Company',
188: 'A&D Co., Ltd',
189: 'Phasetronics, Inc.',
190: 'Cummins Engine Company',
191: 'Deltron Inc.',
192: 'Geneer Corporation',
193: 'Anatol Automation, Inc.',
194: 'Reserved',
195: 'Reserved',
196: 'Medar, Inc.',
197: 'Comdel Inc.',
198: 'Advanced Energy Industries, Inc',
199: 'Reserved',
200: 'DAIDEN Co., Ltd',
201: 'CKD Corporation',
202: 'Toyo Electric Corporation',
203: 'Reserved',
204: 'AuCom Electronics Ltd',
205: 'Shinko Electric Co., Ltd',
206: 'Vector Informatik GmbH',
207: 'Reserved',
208: 'Moog Inc.',
209: 'Contemporary Controls',
210: 'Tokyo Sokki Kenkyujo Co., Ltd',
211: 'Schenck-AccuRate, Inc.',
212: 'The Oilgear Company',
213: 'Reserved',
214: 'ASM Japan K.K.',
215: 'HIRATA Corp.',
216: 'SUNX Limited',
217: 'Meidensha Corp.',
218: 'NIDEC SANKYO CORPORATION (Sankyo Seiki Mfg. Co., Ltd)',
219: 'KAMRO Corp.',
220: 'Nippon System Development Co., Ltd',
221: 'EBARA Technologies Inc.',
222: 'Reserved',
223: 'Reserved',
224: 'SG Co., Ltd',
225: 'Vaasa Institute of Technology',
226: 'MKS Instruments (ENI Technology)',
227: 'Tateyama System Laboratory Co., Ltd.',
228: 'QLOG Corporation',
229: 'Matric Limited Inc.',
230: 'NSD Corporation',
231: 'Reserved',
232: 'Sumitomo Wiring Systems, Ltd',
233: 'Group 3 Technology Ltd',
234: 'CTI Cryogenics',
235: 'POLSYS CORP',
236: 'Ampere Inc.',
237: 'Reserved',
238: 'Simplatroll Ltd',
239: 'Reserved',
240: 'Reserved',
241: 'Leading Edge Design',
242: 'Humphrey Products',
243: 'Schneider Automation, Inc.',
244: 'Westlock Controls Corp.',
245: 'Nihon Weidmuller Co., Ltd',
246: 'Brooks Instrument (Div. of Emerson)',
247: 'Reserved',
248: ' Moeller GmbH',
249: 'Varian Vacuum Products',
250: 'Yokogawa Electric Corporation',
251: 'Electrical Design Daiyu Co., Ltd',
252: 'Omron Software Co., Ltd',
253: 'BOC Edwards',
254: 'Control Technology Corporation',
255: 'Bosch Rexroth',
256: 'Turck',
257: 'Control Techniques PLC',
258: 'Hardy Instruments, Inc.',
259: 'LS Industrial Systems',
260: 'E.O.A. Systems Inc.',
261: 'Reserved',
262: 'New Cosmos Electric Co., Ltd.',
263: 'Sense Eletronica LTDA',
264: 'Xycom, Inc.',
265: 'Baldor Electric',
266: 'Reserved',
267: 'Patlite Corporation',
268: 'Reserved',
269: 'Mogami Wire & Cable Corporation',
270: 'Welding Technology Corporation (WTC)',
271: 'Reserved',
272: 'Deutschmann Automation GmbH',
273: 'ICP Panel-Tec Inc.',
274: 'Bray Controls USA',
275: 'Reserved',
276: 'Status Technologies',
277: 'Trio Motion Technology Ltd',
278: 'Sherrex Systems Ltd',
279: 'Adept Technology, Inc.',
280: 'Spang Power Electronics',
281: 'Reserved',
282: 'Acrosser Technology Co., Ltd',
283: 'Hilscher GmbH',
284: 'IMAX Corporation',
285: 'Electronic Innovation, Inc. (Falter Engineering)',
286: 'Netlogic Inc.',
287: 'Bosch Rexroth Corporation, Indramat',
288: 'Reserved',
289: 'Reserved',
290: 'Murata Machinery Ltd.',
291: 'MTT Company Ltd.',
292: 'Kanematsu Semiconductor Corp.',
293: 'Takebishi Electric Sales Co.',
294: 'Tokyo Electron Device Ltd',
295: 'PFU Limited',
296: 'Hakko Automation Co., Ltd.',
297: 'Advanet Inc.',
298: 'Tokyo Electron Software Technologies Ltd.',
299: 'Reserved',
300: 'Shinagawa Electric Wire Co., Ltd.',
301: 'Yokogawa M&C Corporation',
302: 'KONAN Electric Co., Ltd.',
303: 'Binar Elektronik AB',
304: 'Furukawa Electric Co.',
305: 'Cooper Energy Services',
306: 'Schleicher GmbH & Co.',
307: 'Hirose Electric Co., Ltd',
308: 'Western Servo Design Inc.',
309: 'Prosoft Technology',
310: 'Reserved',
311: 'Towa Shoko Co., Ltd',
312: 'Kyopal Co., Ltd',
313: 'Extron Co.',
314: 'Wieland Electric GmbH',
315: 'SEW Eurodrive GmbH',
316: 'Aera Corporation',
317: 'STA Reutlingen',
318: 'Reserved',
319: 'Fuji Electric Co., Ltd.',
320: 'Reserved',
321: 'Reserved',
322: 'ifm efector, inc.',
323: 'Reserved',
324: 'IDEACOD-Hohner Automation S.A.',
325: 'CommScope Inc.',
326: 'GE Fanuc Automation North America, Inc.',
327: 'Matsushita Electric Industrial Co., Ltd',
328: 'Okaya Electronics Corporation',
329: 'KASHIYAMA Industries, Ltd',
330: 'JVC',
331: 'Interface Corporation',
332: 'Grape Systems Inc.',
333: 'Reserved',
334: 'Reserved',
335: 'Toshiba IT & Control Systems Corporation',
336: 'Sanyo Machine Works, Ltd.',
337: 'Vansco Electronics Ltd.',
338: 'Dart Container Corp.',
339: 'Livingston & Co., Inc.',
340: 'Alfa Laval LKM as',
341: 'BF ENTRON Ltd. (British Federal)',
342: 'Bekaert Engineering NV',
343: 'Ferran Scientific Inc.',
344: 'KEBA AG',
345: 'Endress + Hauser',
346: 'Reserved',
347: 'ABB ALSTOM Power UK Ltd. (EGT)',
348: 'Berger Lahr GmbH',
349: 'Reserved',
350: 'Federal Signal Corp.',
351: 'Kawasaki Robotics (USA), Inc.',
352: 'Bently Nevada Corporation',
353: 'Reserved',
354: 'FRABA Posital GmbH',
355: 'Elsag Bailey, Inc.',
356: 'Fanuc Robotics America',
357: 'Reserved',
358: 'Surface Combustion, Inc.',
359: 'Reserved',
360: 'AILES Electronics Ind. Co., Ltd.',
361: 'Wonderware Corporation',
362: 'Particle Measuring Systems, Inc.',
363: 'Reserved',
364: 'Reserved',
365: 'BITS Co., Ltd',
366: 'Japan Aviation Electronics Industry Ltd',
367: 'Keyence Corporation',
368: 'Kuroda Precision Industries Ltd.',
369: 'Mitsubishi Electric Semiconductor Application',
370: 'Nippon Seisen Cable, Ltd.',
371: 'Omron ASO Co., Ltd',
372: 'Seiko Seiki Co., Ltd.',
373: 'Sumitomo Heavy Industries, Ltd.',
374: 'Tango Computer Service Corporation',
375: 'Technology Service, Inc.',
376: 'Toshiba Information Systems (Japan) Corporation',
377: 'TOSHIBA Schneider Inverter Corporation',
378: 'Toyooki Kogyo Co., Ltd.',
379: 'XEBEC',
380: 'Madison Cable Corporation',
381: 'Hitati Engineering & Services Co., Ltd',
382: 'TEM-TECH Lab Co., Ltd',
383: 'International Laboratory Corporation',
384: 'Dyadic Systems Co., Ltd.',
385: 'SETO Electronics Industry Co., Ltd',
386: 'Tokyo Electron Kyushu Limited',
387: 'KEI System Co., Ltd',
388: 'Reserved',
389: 'Asahi Engineering Co., Ltd',
390: 'Contrex Inc.',
391: 'Paradigm Controls Ltd.',
392: 'Reserved',
393: 'Ohm Electric Co., Ltd.',
394: 'RKC Instrument Inc.',
395: 'Suzuki Motor Corporation',
396: 'Custom Servo Motors Inc.',
397: 'PACE Control Systems',
398: 'Reserved',
399: 'Reserved',
400: 'LINTEC Co., Ltd.',
401: 'Hitachi Cable Ltd.',
402: 'BUSWARE Direct',
403: 'Eaton Electric B.V. (former Holec Holland N.V.)',
404: 'VAT Vakuumventile AG',
405: 'Scientific Technologies Incorporated',
406: 'Alfa Instrumentos Eletronicos Ltda',
407: 'TWK Elektronik GmbH',
408: 'ABB Welding Systems AB',
409: 'BYSTRONIC Maschinen AG',
410: 'Kimura Electric Co., Ltd',
411: 'Nissei Plastic Industrial Co., Ltd',
412: 'Reserved',
413: 'Kistler-Morse Corporation',
414: 'Proteous Industries Inc.',
415: 'IDC Corporation',
416: 'Nordson Corporation',
417: 'Rapistan Systems',
418: 'LP-Elektronik GmbH',
419: 'GERBI & FASE S.p.A.(Fase Saldatura)',
420: 'Phoenix Digital Corporation',
421: 'Z-World Engineering',
422: 'Honda R&D Co., Ltd.',
423: 'Bionics Instrument Co., Ltd.',
424: 'Teknic, Inc.',
425: 'R.Stahl, Inc.',
426: 'Reserved',
427: 'Ryco Graphic Manufacturing Inc.',
428: 'Giddings & Lewis, Inc.',
429: 'Koganei Corporation',
430: 'Reserved',
431: 'Nichigoh Communication Electric Wire Co., Ltd.',
432: 'Reserved',
433: 'Fujikura Ltd.',
434: 'AD Link Technology Inc.',
435: 'StoneL Corporation',
436: 'Computer Optical Products, Inc.',
437: 'CONOS Inc.',
438: 'Erhardt + Leimer GmbH',
439: 'UNIQUE Co. Ltd',
440: 'Roboticsware, Inc.',
441: 'Nachi Fujikoshi Corporation',
442: 'Hengstler GmbH',
443: 'Reserved',
444: 'SUNNY GIKEN Inc.',
445: 'Lenze Drive Systems GmbH',
446: 'CD Systems B.V.',
447: 'FMT/Aircraft Gate Support Systems AB',
448: 'Axiomatic Technologies Corp',
449: 'Embedded System Products, Inc.',
450: 'Reserved',
451: 'Mencom Corporation',
452: 'Reserved',453: 'Matsushita Welding Systems Co., Ltd.',
454: 'Dengensha Mfg. Co. Ltd.',
455: 'Quinn Systems Ltd.',
456: 'Tellima Technology Ltd',
457: 'MDT, Software',
458: 'Taiwan Keiso Co., Ltd',
459: 'Pinnacle Systems',
460: 'Ascom Hasler Mailing Sys',
461: 'INSTRUMAR Limited',
462: 'Reserved',
463: 'Navistar International Transportation Corp',
464: 'Huettinger Elektronik GmbH + Co. KG',
465: 'OCM Technology Inc.',
466: 'Professional Supply Inc.',
468: 'Baumer IVO GmbH & Co. KG',
469: 'Worcester Controls Corporation',
470: 'Pyramid Technical Consultants, Inc.',
471: 'Reserved',
472: 'Apollo Fire Detectors Limited',
473: 'Avtron Manufacturing, Inc.',
474: 'Reserved',
475: 'Tokyo Keiso Co., Ltd.',
476: 'Daishowa Swiki Co., Ltd.',
477: 'Kojima Instruments Inc.',
478: 'Shimadzu Corporation',
479: 'Tatsuta Electric Wire & Cable Co., Ltd.',
480: 'MECS Corporation',
481: 'Tahara Electric',
482: 'Koyo Electronics',
483: 'Clever Devices',
484: 'GCD Hardware & Software GmbH',
485: 'Reserved',
486: 'Miller Electric Mfg Co.',
487: 'GEA Tuchenhagen GmbH',
488: 'Riken Keiki Co., LTD',
489: 'Keisokugiken Corporation',
490: 'Fuji Machine Mfg. Co., Ltd',
491: 'Reserved',
492: 'Nidec-Shimpo Corp.',
493: 'UTEC Corporation',
494: 'Sanyo Electric Co. Ltd.',
495: 'Reserved',
496: 'Reserved',
497: 'Okano Electric Wire Co. Ltd',
498: 'Shimaden Co. Ltd.',
499: 'Teddington Controls Ltd',
500: 'Reserved',
501: 'VIPA GmbH',
502: 'Warwick Manufacturing Group',
503: 'Danaher Controls',
504: 'Reserved',
505: 'Reserved',
506: 'American Science & Engineering',
507: 'Accutron Controls International Inc.',
508: 'Norcott Technologies Ltd',
509: 'TB Woods, Inc',
510: 'Proportion-Air, Inc.',
511: 'SICK Stegmann GmbH',
512: 'Reserved',
513: 'Edwards Signaling',
514: 'Sumitomo Metal Industries, Ltd',
515: 'Cosmo Instruments Co., Ltd.',
516: 'Denshosha Co., Ltd.',
517: 'Kaijo Corp.',
518: 'Michiproducts Co., Ltd.',
519: 'Miura Corporation',
520: 'TG Information Network Co., Ltd.',
521: 'Fujikin , Inc.',
522: 'Estic Corp.',
523: 'GS Hydraulic Sales',
524: 'Reserved',
525: 'MTE Limited',
526: 'Hyde Park Electronics, Inc.',
527: 'Pfeiffer Vacuum GmbH',
528: 'Cyberlogic Technologies',
529: 'OKUMA Corporation FA Systems Division',
530: 'Reserved',
531: 'Hitachi Kokusai Electric Co., Ltd.',
532: 'SHINKO TECHNOS Co., Ltd.',
533: 'Itoh Electric Co., Ltd.',
534: 'Colorado Flow Tech Inc.',
535: 'Love Controls Division/Dwyer Inst.',
536: 'Alstom Drives and Controls',
537: 'The Foxboro Company',
538: 'Tescom Corporation',
539: 'Reserved',
540: 'Atlas Copco Controls UK',
541: 'Reserved',
542: 'Autojet Technologies',
543: 'Prima Electronics S.p.A.',
544: 'PMA GmbH',
545: 'Shimafuji Electric Co., Ltd',
546: 'Oki Electric Industry Co., Ltd',
547: 'Kyushu Matsushita Electric Co., Ltd',
548: 'Nihon Electric Wire & Cable Co., Ltd',
549: 'Tsuken Electric Ind Co., Ltd',
550: 'Tamadic Co.',
551: 'MAATEL SA',
552: 'OKUMA America',
554: 'TPC Wire & Cable',
555: 'ATI Industrial Automation',
557: 'Serra Soldadura, S.A.',
558: 'Southwest Research Institute',
559: 'Cabinplant International',
560: 'Sartorius Mechatronics T&H GmbH',
561: 'Comau S.p.A. Robotics & Final Assembly Division',
562: 'Phoenix Contact',
563: 'Yokogawa MAT Corporation',
564: 'asahi sangyo co., ltd.',
565: 'Reserved',
566: 'Akita Myotoku Ltd.',
567: 'OBARA Corp.',
568: 'Suetron Electronic GmbH',
569: 'Reserved',
570: 'Serck Controls Limited',
571: 'Fairchild Industrial Products Company',
572: 'ARO S.A.',
573: 'M2C GmbH',
574: 'Shin Caterpillar Mitsubishi Ltd.',
575: 'Santest Co., Ltd.',
576: 'Cosmotechs Co., Ltd.',
577: 'Hitachi Electric Systems',
578: 'Smartscan Ltd',
579: 'Woodhead Software & Electronics France',
580: 'Athena Controls, Inc.',
581: 'Syron Engineering & Manufacturing, Inc.',
582: 'Asahi Optical Co., Ltd.',
583: 'Sansha Electric Mfg. Co., Ltd.',
584: 'Nikki Denso Co., Ltd.',
585: 'Star Micronics, Co., Ltd.',
586: 'Ecotecnia Socirtat Corp.',
587: 'AC Technology Corp.',
588: 'West Instruments Limited',
589: 'NTI Limited',
590: 'Delta Computer Systems, Inc.',
591: 'FANUC Ltd.',
592: 'Hearn-Gu Lee',
593: 'ABB Automation Products',
594: 'Orion Machinery Co., Ltd.',
595: 'Reserved',
596: 'Wire-Pro, Inc.',
597: 'Beijing Huakong Technology Co. Ltd.',
598: 'Yokoyama Shokai Co., Ltd.',
599: 'Toyogiken Co., Ltd.',
600: 'Coester Equipamentos Eletronicos Ltda.',
601: 'Reserved',
602: 'Electroplating Engineers of Japan Ltd.',
603: 'ROBOX S.p.A.',
604: 'Spraying Systems Company',
605: 'Benshaw Inc.',
606: 'ZPA-DP A.S.',
607: 'Wired Rite Systems',
608: 'Tandis Research, Inc.',
609: 'SSD Drives GmbH',
610: 'ULVAC Japan Ltd.',
611: 'DYNAX Corporation',
612: 'Nor-Cal Products, Inc.',
613: 'Aros Electronics AB',
614: 'Jun-Tech Co., Ltd.',
615: 'HAN-MI Co. Ltd.',
616: 'uniNtech (formerly SungGi Internet)',
617: 'Hae Pyung Electronics Reserch Institute',
618: 'Milwaukee Electronics',
619: 'OBERG Industries',
620: 'Parker Hannifin/Compumotor Division',
621: 'TECHNO DIGITAL CORPORATION',
622: 'Network Supply Co., Ltd.',
623: 'Union Electronics Co., Ltd.',
624: 'Tritronics Services PM Ltd.',
625: 'Rockwell Automation-Sprecher+Schuh',
626: 'Matsushita Electric Industrial Co., Ltd/Motor Co.',
627: 'Rolls-Royce Energy Systems, Inc.',
628: 'JEONGIL INTERCOM CO., LTD',
629: 'Interroll Corp.',
630: 'Hubbell Wiring Device-Kellems (Delaware)',
631: 'Intelligent Motion Systems',
632: 'Reserved',
633: 'INFICON AG',
634: 'Hirschmann, Inc.',
635: 'The Siemon Company',
636: 'YAMAHA Motor Co. Ltd.',
637: 'aska corporation',
638: 'Woodhead Connectivity',
639: 'Trimble AB',
640: 'Murrelektronik GmbH',
641: 'Creatrix Labs, Inc.',
642: 'TopWorx',
643: 'Kumho Industrial Co., Ltd.',
644: 'Wind River Systems, Inc.',
645: 'Bihl & Wiedemann GmbH',
646: 'Harmonic Drive Systems Inc.',
647: 'Rikei Corporation',
648: 'BL Autotec, Ltd.',
649: 'Hana Information & Technology Co., Ltd.',
650: 'Seoil Electric Co., Ltd.',
651: 'Fife Corporation',
652: 'Shanghai Electrical Apparatus Research Institute',
653: 'Reserved',
654: 'Parasense Development Centre',
655: 'Reserved',
656: 'Reserved',
657: 'Six Tau S.p.A.',
658: 'Aucos GmbH',
659: 'Rotork Controls',
660: 'Automationdirect.com',
661: 'Thermo BLH',
662: 'System Controls, Ltd.',
663: 'Univer S.p.A.',
664: 'MKS-Tenta Technology',
665: 'Lika Electronic SNC',
666: 'Mettler-Toledo, Inc.',
667: 'DXL USA Inc.',
668: 'Rockwell Automation/Entek IRD Intl.',
669: 'Nippon Otis Elevator Company',
670: 'Sinano Electric, Co., Ltd.',
671: 'Sony Manufacturing Systems',
672: 'Reserved',
673: 'Contec Co., Ltd.',
674: 'Automated Solutions',
675: 'Controlweigh',
676: 'Reserved',
677: 'Fincor Electronics',
678: 'Cognex Corporation',
679: 'Qualiflow',
680: 'Weidmuller, Inc.',
681: 'Morinaga Milk Industry Co., Ltd.',
682: 'Takagi Industrial Co., Ltd.',
683: 'Wittenstein AG',
684: 'Sena Technologies, Inc.',
685: 'Reserved',
686: 'APV Products Unna',
687: 'Creator Teknisk Utvedkling AB',
688: 'Reserved',
689: 'Mibu Denki Industrial Co., Ltd.',
690: 'Takamastsu Machineer Section',
691: 'Startco Engineering Ltd.',
692: 'Reserved',
693: 'Holjeron',
694: 'ALCATEL High Vacuum Technology',
695: 'Taesan LCD Co., Ltd.',
696: 'POSCON',
697: 'VMIC',
698: 'Matsushita Electric Works, Ltd.',
699: 'IAI Corporation',
700: 'Horst GmbH',
701: 'MicroControl GmbH & Co.',
702: 'Leine & Linde AB',
703: 'Reserved',
704: 'EC Elettronica Srl',
705: 'VIT Software HB',
706: 'Bronkhorst High-Tech B.V.',
707: 'Optex Co., Ltd.',
708: 'Yosio Electronic Co.',
709: 'Terasaki Electric Co., Ltd.',
710: 'Sodick Co., Ltd.',
711: 'MTS Systems Corporation-Automation Division',
712: 'Mesa Systemtechnik',
713: 'SHIN HO SYSTEM Co., Ltd.',
714: 'Goyo Electronics Co, Ltd.',
715: 'Loreme',
716: 'SAB Brockskes GmbH & Co. KG',
717: 'Trumpf Laser GmbH + Co. KG',
718: 'Niigata Electronic Instruments Co., Ltd.',
719: 'Yokogawa Digital Computer Corporation',
720: 'O.N. Electronic Co., Ltd.',
721: 'Industrial Control Communication, Inc.',
722: 'ABB, Inc.',
723: 'ElectroWave USA, Inc.',
724: 'Industrial Network Controls, LLC',
725: 'KDT Systems Co., Ltd.',
726: 'SEFA Technology Inc.',
727: 'Nippon POP Rivets and Fasteners Ltd.',
728: 'Yamato Scale Co., Ltd.',
729: 'Zener Electric',
730: 'GSE Scale Systems',
731: 'ISAS (Integrated Switchgear & Sys. Pty Ltd)',
732: 'Beta LaserMike Limited',
733: 'TOEI Electric Co., Ltd.',
734: 'Hakko Electronics Co., Ltd',
735: 'Reserved',
736: 'RFID, Inc.',
737: 'Adwin Corporation',
738: 'Osaka Vacuum, Ltd.',
739: 'A-Kyung Motion, Inc.',
740: 'Camozzi S.P. A.',
741: 'Crevis Co., LTD',
742: 'Rice Lake Weighing Systems',
743: 'Linux Network Services',
744: 'KEB Antriebstechnik GmbH',
745: 'Hagiwara Electric Co., Ltd.',
746: 'Glass Inc. International',
747: 'Reserved',
748: 'DVT Corporation',
749: 'Woodward Governor',
750: 'Mosaic Systems, Inc.',
751: 'Laserline GmbH',
752: 'COM-TEC, Inc.',
753: 'Weed Instrument',
754: 'Prof-face European Technology Center',
755: 'Fuji Automation Co., Ltd.',
756: 'Matsutame Co., Ltd.',
757: 'Hitachi Via Mechanics, Ltd.',
758: 'Dainippon Screen Mfg. Co. Ltd.',
759: 'FLS Automation A/S',
760: 'ABB Stotz Kontakt GmbH',
761: 'Technical Marine Service',
762: 'Advanced Automation Associates, Inc.',
763: 'Baumer Ident GmbH',
764: 'Tsubakimoto Chain Co.',
765: 'Reserved',
766: 'Furukawa Co., Ltd.',
767: 'Active Power',
768: 'CSIRO Mining Automation',
769: 'Matrix Integrated Systems',
770: 'Digitronic Automationsanlagen GmbH',
771: 'SICK STEGMANN Inc.',
772: 'TAE-Antriebstechnik GmbH',
773: 'Electronic Solutions',
774: 'Rocon L.L.C.',
775: 'Dijitized Communications Inc.',
776: 'Asahi Organic Chemicals Industry Co., Ltd.',
777: 'Hodensha',
778: 'Harting, Inc. NA',
779: 'Kubler GmbH',
780: 'Yamatake Corporation',
781: 'JEOL',
782: 'Yamatake Industrial Systems Co., Ltd.',
783: 'HAEHNE Elektronische Messgerate GmbH',
784: 'Ci Technologies Pty Ltd (for Pelamos Industries)',
785: 'N. SCHLUMBERGER & CIE',
786: 'Teijin Seiki Co., Ltd.',
787: 'DAIKIN Industries, Ltd',
788: 'RyuSyo Industrial Co., Ltd.',
789: 'SAGINOMIYA SEISAKUSHO, INC.',
790: 'Seishin Engineering Co., Ltd.',
791: 'Japan Support System Ltd.',
792: 'Decsys',
793: 'Metronix Messgerate u. Elektronik GmbH',
794: 'Reserved',
795: 'Vaccon Company, Inc.',
796: 'Siemens Energy & Automation, Inc.',
797: 'Ten X Technology, Inc.',
798: 'Tyco Electronics',
799: 'Delta Power Electronics Center',
800: 'Denker',
801: 'Autonics Corporation',
802: 'JFE Electronic Engineering Pty. Ltd.',
803: 'Reserved',
804: 'Electro-Sensors, Inc.',
805: 'Digi International, Inc.',
806: 'Texas Instruments',
807: 'ADTEC Plasma Technology Co., Ltd',
808: 'SICK AG',
809: 'Ethernet Peripherals, Inc.',
810: 'Animatics Corporation',
811: 'Reserved',
812: 'Process Control Corporation',
813: 'SystemV. Inc.',
814: 'Danaher Motion SRL',
815: 'SHINKAWA Sensor Technology, Inc.',
816: 'Tesch GmbH & Co. KG',
817: 'Reserved',
818: 'Trend Controls Systems Ltd.',
819: 'Guangzhou ZHIYUAN Electronic Co., Ltd.',
820: 'Mykrolis Corporation',
821: 'Bethlehem Steel Corporation',
822: 'KK ICP',
823: 'Takemoto Denki Corporation',
824: 'The Montalvo Corporation',
825: 'Reserved',
826: 'LEONI Special Cables GmbH',
827: 'Reserved',
828: 'ONO SOKKI CO.,LTD.',
829: 'Rockwell Samsung Automation',
830: 'SHINDENGEN ELECTRIC MFG. CO. LTD',
831: 'Origin Electric Co. Ltd.',
832: 'Quest Technical Solutions, Inc.',
833: 'LS Cable, Ltd.',
834: 'Enercon-Nord Electronic GmbH',
835: 'Northwire Inc.',
836: 'Engel Elektroantriebe GmbH',
837: 'The Stanley Works',
838: 'Celesco Transducer Products, Inc.',
839: 'Chugoku Electric Wire and Cable Co.',
840: 'Kongsberg Simrad AS',
841: 'Panduit Corporation',
842: 'Spellman High Voltage Electronics Corp.',
843: 'Kokusai Electric Alpha Co., Ltd.',
844: 'Brooks Automation, Inc.',
845: 'ANYWIRE CORPORATION',
846: 'Honda Electronics Co. Ltd',
847: 'REO Elektronik AG',
848: 'Fusion UV Systems, Inc.',
849: 'ASI Advanced Semiconductor Instruments GmbH',
850: 'Datalogic, Inc.',
851: 'SoftPLC Corporation',
852: 'Dynisco Instruments LLC',
853: 'WEG Industrias SA',
854: 'Frontline Test Equipment, Inc.',
855: 'Tamagawa Seiki Co., Ltd.',
856: 'Multi Computing Co., Ltd.',
857: 'RVSI',
858: 'Commercial Timesharing Inc.',
859: 'Tennessee Rand Automation LLC',
860: 'Wacogiken Co., Ltd',
861: 'Reflex Integration Inc.',
862: 'Siemens AG, A&D PI Flow Instruments',
863: 'G. Bachmann Electronic GmbH',
864: 'NT International',
865: 'Schweitzer Engineering Laboratories',
866: 'ATR Industrie-Elektronik GmbH Co.',
867: 'PLASMATECH Co., Ltd',
868: 'Reserved',
869: 'GEMU GmbH & Co. KG',
870: 'Alcorn McBride Inc.',
871: 'MORI SEIKI CO., LTD',
872: 'NodeTech Systems Ltd',
873: 'Emhart Teknologies',
874: 'Cervis, Inc.',
875: 'FieldServer Technologies (Div Sierra Monitor Corp)',
876: 'NEDAP Power Supplies',
877: 'Nippon Sanso Corporation',
878: 'Mitomi Giken Co., Ltd.',
879: 'PULS GmbH',
880: 'Reserved',
881: 'Japan Control Engineering Ltd',
882: 'Embedded Systems Korea (Former Zues Emtek Co Ltd.)',
883: 'Automa SRL',
884: 'Harms+Wende GmbH & Co KG',
885: 'SAE-STAHL GmbH',
886: 'Microwave Data Systems',
887: 'Bernecker + Rainer Industrie-Elektronik GmbH',
888: 'Hiprom Technologies',
889: 'Reserved',
890: 'Nitta Corporation',
891: 'Kontron Modular Computers GmbH',
892: 'Marlin Controls',
893: 'ELCIS s.r.l.',
894: 'Acromag, Inc.',
895: 'Avery Weigh-Tronix',
896: 'Reserved',
897: 'Reserved',
898: 'Reserved',
899: 'Practicon Ltd',
900: 'Schunk GmbH & Co. KG',
901: 'MYNAH Technologies',
902: 'Defontaine Groupe',
903: 'Emerson Process Management Power & Water Solutions',
904: 'F.A. Elec',
905: 'Hottinger Baldwin Messtechnik GmbH',
906: 'Coreco Imaging, Inc.',
907: 'London Electronics Ltd.',
908: 'HSD SpA',
909: 'Comtrol Corporation',
910: 'TEAM, S.A. (Tecnica Electronica de Automatismo Y Medida)',
911: 'MAN B&W Diesel Ltd. Regulateurs Europa',
912: 'Reserved',
913: 'Reserved',
914: 'Micro Motion, Inc.',
915: 'Eckelmann AG',
916: 'Hanyoung Nux',
917: 'Ransburg Industrial Finishing KK',
918: 'Kun Hung Electric Co. Ltd.',
919: 'Brimos wegbebakening b.v.',
920: 'Nitto Seiki Co., Ltd',
921: 'PPT Vision, Inc.',
922: 'Yamazaki Machinery Works',
923: 'SCHMIDT Technology GmbH',
924: 'Parker Hannifin SpA (SBC Division)',
925: 'HIMA Paul Hildebrandt GmbH',
926: 'RivaTek, Inc.',
927: 'Misumi Corporation',
928: 'GE Multilin',
929: 'Measurement Computing Corporation',
930: 'Jetter AG',
931: 'Tokyo Electronics Systems Corporation',
932: 'Togami Electric Mfg. Co., Ltd.',
933: 'HK Systems',
934: 'CDA Systems Ltd.',
935: 'Aerotech Inc.',
936: 'JVL Industrie Elektronik A/S',
937: 'NovaTech Process Solutions LLC',
938: 'Reserved',
939: 'Cisco Systems',
940: 'Grid Connect',
941: 'ITW Automotive Finishing',
942: 'HanYang System',
943: 'ABB K.K. Technical Center',
944: 'Taiyo Electric Wire & Cable Co., Ltd.',
945: 'Reserved',
946: 'SEREN IPS INC',
947: 'Belden CDT Electronics Division',
948: 'ControlNet International',
949: 'Gefran S.P.A.',
950: 'Jokab Safety AB',
951: 'SUMITA OPTICAL GLASS, INC.',
952: 'Biffi Italia srl',
953: 'Beck IPC GmbH',
954: 'Copley Controls Corporation',
955: 'Fagor Automation S. Coop.',
956: 'DARCOM',
957: 'Frick Controls (div. of York International)',
958: 'SymCom, Inc.',
959: 'Infranor',
960: 'Kyosan Cable, Ltd.',
961: 'Varian Vacuum Technologies',
962: 'Messung Systems',
963: 'Xantrex Technology, Inc.',
964: 'StarThis Inc.',
965: 'Chiyoda Co., Ltd.',
966: 'Flowserve Corporation',
967: 'Spyder Controls Corp.',
968: 'IBA AG',
969: 'SHIMOHIRA ELECTRIC MFG.CO.,LTD',
970: 'Reserved',
971: 'Siemens L&A',
972: 'Micro Innovations AG',
973: 'Switchgear & Instrumentation',
974: 'PRE-TECH CO., LTD.',
975: 'National Semiconductor',
976: 'Invensys Process Systems',
977: 'Ametek HDR Power Systems',
978: 'Reserved',
979: 'TETRA-K Corporation',
980: 'C & M Corporation',
981: 'Siempelkamp Maschinen',
982: 'Reserved',
983: 'Daifuku America Corporation',
984: 'Electro-Matic Products Inc.',
985: 'BUSSAN MICROELECTRONICS CORP.',
986: 'ELAU AG',
987: 'Hetronic USA',
988: 'NIIGATA POWER SYSTEMS Co., Ltd.',
989: 'Software Horizons Inc.',
990: 'B3 Systems, Inc.',
991: 'Moxa Networking Co., Ltd.',
992: 'Reserved',
993: 'S4 Integration',
994: 'Elettro Stemi S.R.L.',
995: 'AquaSensors',
996: 'Ifak System GmbH',
997: 'SANKEI MANUFACTURING Co.,LTD.',
998: 'Emerson Network Power Co., Ltd.',
999: 'Fairmount Automation, Inc.',
1000: 'Bird Electronic Corporation',
1001: 'Nabtesco Corporation',
1002: 'AGM Electronics, Inc.',
1003: 'ARCX Inc.',
1004: 'DELTA I/O Co.',
1005: 'Chun IL Electric Ind. Co.',
1006: 'N-Tron',
1007: 'Nippon Pneumatics/Fludics System CO.,LTD.',
1008: 'DDK Ltd.',
1009: 'Seiko Epson Corporation',
1010: 'Halstrup-Walcher GmbH',
1011: 'ITT',
1012: 'Ground Fault Systems bv',
1013: 'Scolari Engineering S.p.A.',
1014: 'Vialis Traffic bv',
1015: 'Weidmueller Interface GmbH & Co. KG',
1016: 'Shanghai Sibotech Automation Co. Ltd',
1017: 'AEG Power Supply Systems GmbH',
1018: 'Komatsu Electronics Inc.',
1019: 'Souriau',
1020: 'Baumuller Chicago Corp.',
1021: 'J. Schmalz GmbH',
1022: 'SEN Corporation',
1023: 'Korenix Technology Co. Ltd',
1024: 'Cooper Power Tools',
1025: 'INNOBIS',
1026: 'Shinho System',
1027: 'Xm Services Ltd.',
1028: 'KVC Co., Ltd.',
1029: 'Sanyu Seiki Co., Ltd.',
1030: 'TuxPLC',
1031: 'Northern Network Solutions',
1032: 'Converteam GmbH',
1033: 'Symbol Technologies',
1034: 'S-TEAM Lab',
1035: 'Maguire Products, Inc.',
1036: 'AC&T',
1037: 'MITSUBISHI HEAVY INDUSTRIES, LTD. KOBE SHIPYARD & MACHINERY WORKS',
1038: 'Hurletron Inc.',
1039: 'Chunichi Denshi Co., Ltd',
1040: 'Cardinal Scale Mfg. Co.',
1041: 'BTR NETCOM via RIA Connect, Inc.',
1042: 'Base2',
1043: 'ASRC Aerospace',
1044: 'Beijing Stone Automation',
1045: 'Changshu Switchgear Manufacture Ltd.',
1046: 'METRONIX Corp.',
1047: 'WIT',
1048: 'ORMEC Systems Corp.',
1049: 'ASATech (China) Inc.',
1050: 'Controlled Systems Limited',
1051: 'Mitsubishi Heavy Ind. Digital System Co., Ltd. (M.H.I.)',
1052: 'Electrogrip',
1053: 'TDS Automation',
1054: 'T&C Power Conversion, Inc.',
1055: 'Robostar Co., Ltd',
1056: 'Scancon A/S',
1057: 'Haas Automation, Inc.',
1058: 'Eshed Technology',
1059: 'Delta Electronic Inc.',
1060: 'Innovasic Semiconductor',
1061: 'SoftDEL Systems Limited',
1062: 'FiberFin, Inc.',
1063: 'Nicollet Technologies Corp.',
1064: 'B.F. Systems',
1065: 'Empire Wire and Supply LLC',
1066: 'Reserved',
1067: 'Elmo Motion Control LTD',
1068: 'Reserved',
1069: 'Asahi Keiki Co., Ltd.',
1070: 'Joy Mining Machinery',
1071: 'MPM Engineering Ltd',
1072: 'Wolke Inks & Printers GmbH',
1073: 'Mitsubishi Electric Engineering Co., Ltd.',
1074: 'COMET AG',
1075: 'Real Time Objects & Systems, LLC',
1076: 'MISCO Refractometer',
1077: 'JT Engineering Inc.',
1078: 'Automated Packing Systems',
1079: 'Niobrara R&D Corp.',
1080: 'Garmin Ltd.',
1081: 'Japan Mobile Platform Co., Ltd',
1082: 'Advosol Inc.',
1083: 'ABB Global Services Limited',
1084: 'Sciemetric Instruments Inc.',
1085: 'Tata Elxsi Ltd.',
1086: 'TPC Mechatronics, Co., Ltd.',
1087: 'Cooper Bussmann',
1088: 'Trinite Automatisering B.V.',
1089: 'Peek Traffic B.V.',
1090: 'Acrison, Inc',
1091: 'Applied Robotics, Inc.',
1092: 'FireBus Systems, Inc.',
1093: 'Beijing Sevenstar Huachuang Electronics',
1094: 'Magnetek',
1095: 'Microscan',
1096: 'Air Water Inc.',
1097: 'Sensopart Industriesensorik GmbH',
1098: 'Tiefenbach Control Systems GmbH',
1099: 'INOXPA S.A',
1100: 'Zurich University of Applied Sciences',
1101: 'Ethernet Direct',
1102: 'GSI-Micro-E Systems',
1103: 'S-Net Automation Co., Ltd.',
1104: 'Power Electronics S.L.',
1105: 'Renesas Technology Corp.',
1106: 'NSWCCD-SSES',
1107: 'Porter Engineering Ltd.',
1108: 'Meggitt Airdynamics, Inc.',
1109: 'Inductive Automation',
1110: 'Neural ID',
1111: 'EEPod LLC',
1112: 'Hitachi Industrial Equipment Systems Co., Ltd.',
1113: 'Salem Automation',
1114: 'port GmbH',
1115: 'B & PLUS',
1116: 'Graco Inc.',
1117: 'Altera Corporation',
1118: 'Technology Brewing Corporation',
1121: 'CSE Servelec',
1124: 'Fluke Networks',
1125: 'Tetra Pak Packaging Solutions SPA',
1126: 'Racine Federated, Inc.',
1127: 'Pureron Japan Co., Ltd.',
1130: 'Brother Industries, Ltd.',
1132: 'Leroy Automation',
1137: 'TR-Electronic GmbH',
1138: 'ASCON S.p.A.',
1139: 'Toledo do Brasil Industria de Balancas Ltda.',
1140: 'Bucyrus DBT Europe GmbH',
1141: 'Emerson Process Management Valve Automation',
1142: 'Alstom Transport',
1144: 'Matrox Electronic Systems',
1145: 'Littelfuse',
1146: 'PLASMART, Inc.',
1147: 'Miyachi Corporation',
1150: 'Promess Incorporated',
1151: 'COPA-DATA GmbH',
1152: 'Precision Engine Controls Corporation',
1153: 'Alga Automacao e controle LTDA',
1154: 'U.I. Lapp GmbH',
1155: 'ICES',
1156: 'Philips Lighting bv',
1157: 'Aseptomag AG',
1158: 'ARC Informatique',
1159: 'Hesmor GmbH',
1160: 'Kobe Steel, Ltd.',
1161: 'FLIR Systems',
1162: 'Simcon A/S',
1163: 'COPALP',
1164: 'Zypcom, Inc.',
1165: 'Swagelok',
1166: 'Elspec',
1167: 'ITT Water & Wastewater AB',
1168: 'Kunbus GmbH Industrial Communication',
1170: 'Performance Controls, Inc.',
1171: 'ACS Motion Control, Ltd.',
1173: 'IStar Technology Limited',
1174: 'Alicat Scientific, Inc.',
1176: 'ADFweb.com SRL',
1177: 'Tata Consultancy Services Limited',
1178: 'CXR Ltd.',
1179: 'Vishay Nobel AB',
1181: 'SolaHD',
1182: 'Endress+Hauser',
1183: 'Bartec GmbH',
1185: 'AccuSentry, Inc.',
1186: 'Exlar Corporation',
1187: 'ILS Technology',
1188: 'Control Concepts Inc.',
1190: 'Procon Engineering Limited',
1191: 'Hermary Opto Electronics Inc.',
1192: 'Q-Lambda',
1194: 'VAMP Ltd',
1195: 'FlexLink',
1196: 'Office FA.com Co., Ltd.',
1197: 'SPMC (Changzhou) Co. Ltd.',
1198: 'Anton Paar GmbH',
1199: 'Zhuzhou CSR Times Electric Co., Ltd.',
1200: 'DeStaCo',
1201: 'Synrad, Inc',
1202: 'Bonfiglioli Vectron GmbH',
1203: 'Pivotal Systems',
1204: 'TKSCT',
1205: 'Randy Nuernberger',
1206: 'CENTRALP',
1207: 'Tengen Group',
1208: 'OES, Inc.',
1209: 'Actel Corporation',
1210: 'Monaghan Engineering, Inc.',
1211: 'wenglor sensoric gmbh',
1212: 'HSA Systems',
1213: 'MK Precision Co., Ltd.',
1214: 'Tappan Wire and Cable',
1215: 'Heinzmann GmbH & Co. KG',
1216: 'Process Automation International Ltd.',
1217: 'Secure Crossing',
1218: 'SMA Railway Technology GmbH',
1219: 'FMS Force Measuring Systems AG',
1220: 'ABT Endustri Enerji Sistemleri Sanayi Tic. Ltd. Sti.',
1221: 'MagneMotion Inc.',
1222: 'STS Co., Ltd.',
1223: 'MERAK SIC, SA',
1224: 'ABOUNDI, Inc.',
1225: 'Rosemount Inc.',
1226: 'GEA FES, Inc.',
1227: 'TMG Technologie und Engineering GmbH',
1228: 'embeX GmbH',
1229: 'GH Electrotermia, S.A.',
1230: 'Tolomatic',
1231: 'Dukane',
1232: 'Elco (Tian Jin) Electronics Co., Ltd.',
1233: 'Jacobs Automation',
1234: 'Noda Radio Frequency Technologies Co., Ltd.',
1235: 'MSC Tuttlingen GmbH',
1236: 'Hitachi Cable Manchester',
1237: 'ACOREL SAS',
1238: 'Global Engineering Solutions Co., Ltd.',
1239: 'ALTE Transportation, S.L.',
1240: 'Penko Engineering B.V.'}
| apache-2.0 |
hachreak/invenio-previewer | invenio_previewer/extensions/csv_dthreejs.py | 2 | 2628 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2013, 2014, 2015, 2016 CERN.
#
# Invenio is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Render a CSV file using d3.js."""
from __future__ import absolute_import, print_function
import csv
from flask import current_app, render_template
from ..proxies import current_previewer
from ..utils import detect_encoding
previewable_extensions = ['csv', 'dsv']
def validate_csv(file):
"""Return dialect information about given csv file."""
try:
# Detect encoding and dialect
with file.open() as fp:
encoding = detect_encoding(fp, default='utf-8')
sample = fp.read(
current_app.config.get('PREVIEWER_CSV_VALIDATION_BYTES', 1024))
delimiter = csv.Sniffer().sniff(sample.decode(encoding)).delimiter
is_valid = True
except Exception as e:
current_app.logger.debug(
'File {0} is not valid CSV: {1}'.format(file.uri, e))
encoding = ''
delimiter = ''
is_valid = False
return {
'delimiter': delimiter,
'encoding': encoding,
'is_valid': is_valid
}
def can_preview(file):
"""Determine if the given file can be previewed."""
if file.is_local() and file.has_extensions('.csv', '.dsv'):
return validate_csv(file)['is_valid']
return False
def preview(file):
"""Render appropiate template with embed flag."""
file_info = validate_csv(file)
return render_template(
'invenio_previewer/csv_bar.html',
file=file,
delimiter=file_info['delimiter'],
encoding=file_info['encoding'],
js_bundles=current_previewer.js_bundles + ['previewer_csv_js'],
css_bundles=current_previewer.css_bundles,
)
| gpl-2.0 |
ltilve/ChromiumGStreamerBackend | third_party/boringssl/roll_boringssl.py | 17 | 3938 | #!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Rolls third_party/boringssl/src in DEPS and updates generated build files."""
import os
import os.path
import shutil
import subprocess
import sys
SCRIPT_PATH = os.path.abspath(__file__)
SRC_PATH = os.path.dirname(os.path.dirname(os.path.dirname(SCRIPT_PATH)))
DEPS_PATH = os.path.join(SRC_PATH, 'DEPS')
BORINGSSL_PATH = os.path.join(SRC_PATH, 'third_party', 'boringssl')
BORINGSSL_SRC_PATH = os.path.join(BORINGSSL_PATH, 'src')
if not os.path.isfile(DEPS_PATH) or not os.path.isdir(BORINGSSL_SRC_PATH):
raise Exception('Could not find Chromium checkout')
# Pull OS_ARCH_COMBOS out of the BoringSSL script.
sys.path.append(os.path.join(BORINGSSL_SRC_PATH, 'util'))
import generate_build_files
GENERATED_FILES = [
'boringssl.gypi',
'boringssl_tests.gypi',
'err_data.c',
]
def IsPristine(repo):
"""Returns True if a git checkout is pristine."""
cmd = ['git', 'diff', '--ignore-submodules']
return not (subprocess.check_output(cmd, cwd=repo).strip() or
subprocess.check_output(cmd + ['--cached'], cwd=repo).strip())
def RevParse(repo, rev):
"""Resolves a string to a git commit."""
return subprocess.check_output(['git', 'rev-parse', rev], cwd=repo).strip()
def UpdateDEPS(deps, from_hash, to_hash):
"""Updates all references of |from_hash| to |to_hash| in |deps|."""
with open(deps, 'rb') as f:
contents = f.read()
if from_hash not in contents:
raise Exception('%s not in DEPS' % from_hash)
contents = contents.replace(from_hash, to_hash)
with open(deps, 'wb') as f:
f.write(contents)
def main():
if len(sys.argv) > 2:
sys.stderr.write('Usage: %s [COMMIT]' % sys.argv[0])
return 1
if not IsPristine(SRC_PATH):
print >>sys.stderr, 'Chromium checkout not pristine.'
return 0
if not IsPristine(BORINGSSL_SRC_PATH):
print >>sys.stderr, 'BoringSSL checkout not pristine.'
return 0
if len(sys.argv) > 1:
commit = RevParse(BORINGSSL_SRC_PATH, sys.argv[1])
else:
subprocess.check_call(['git', 'fetch', 'origin'], cwd=BORINGSSL_SRC_PATH)
commit = RevParse(BORINGSSL_SRC_PATH, 'origin/master')
head = RevParse(BORINGSSL_SRC_PATH, 'HEAD')
if head == commit:
print 'BoringSSL already up-to-date.'
return 0
print 'Rolling BoringSSL from %s to %s...' % (head, commit)
UpdateDEPS(DEPS_PATH, head, commit)
# Checkout third_party/boringssl/src to generate new files.
subprocess.check_call(['git', 'checkout', commit], cwd=BORINGSSL_SRC_PATH)
# Clear the old generated files.
for (osname, arch, _, _, _) in generate_build_files.OS_ARCH_COMBOS:
path = os.path.join(BORINGSSL_PATH, osname + '-' + arch)
shutil.rmtree(path)
for file in GENERATED_FILES:
path = os.path.join(BORINGSSL_PATH, file)
os.unlink(path)
# Generate new ones.
subprocess.check_call(['python',
os.path.join(BORINGSSL_SRC_PATH, 'util',
'generate_build_files.py'),
'gyp'],
cwd=BORINGSSL_PATH)
# Commit everything.
subprocess.check_call(['git', 'add', DEPS_PATH], cwd=SRC_PATH)
for (osname, arch, _, _, _) in generate_build_files.OS_ARCH_COMBOS:
path = os.path.join(BORINGSSL_PATH, osname + '-' + arch)
subprocess.check_call(['git', 'add', path], cwd=SRC_PATH)
for file in GENERATED_FILES:
path = os.path.join(BORINGSSL_PATH, file)
subprocess.check_call(['git', 'add', path], cwd=SRC_PATH)
message = """Roll src/third_party/boringssl/src %s..%s
https://boringssl.googlesource.com/boringssl/+log/%s..%s
BUG=none
""" % (head[:9], commit[:9], head, commit)
subprocess.check_call(['git', 'commit', '-m', message], cwd=SRC_PATH)
return 0
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause |
francois-contat/scapy | scapy/layers/dot11.py | 1 | 37033 | # This file is part of Scapy
# See http://www.secdev.org/projects/scapy for more information
# Scapy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# any later version.
#
# Scapy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Scapy. If not, see <http://www.gnu.org/licenses/>.
# Copyright (C) Philippe Biondi <phil@secdev.org>
"""
Wireless LAN according to IEEE 802.11.
"""
from __future__ import print_function
import math
import re
import struct
from zlib import crc32
from scapy.config import conf, crypto_validator
from scapy.data import ETHER_ANY, DLT_IEEE802_11, DLT_PRISM_HEADER, \
DLT_IEEE802_11_RADIO
from scapy.compat import raw, plain_str, orb, chb
from scapy.packet import Packet, bind_layers, NoPayload
from scapy.fields import ByteField, LEShortField, BitField, LEShortEnumField, \
ByteEnumField, X3BytesField, FlagsField, LELongField, StrField, \
StrLenField, IntField, XByteField, LEIntField, StrFixedLenField, \
LESignedIntField, ReversePadField, ConditionalField, PacketListField, \
ShortField, BitEnumField, XLEIntField, FieldLenField, LEFieldLenField, \
FieldListField, XStrFixedLenField, PacketField
from scapy.ansmachine import AnsweringMachine
from scapy.plist import PacketList
from scapy.layers.l2 import Ether, LLC, MACField
from scapy.layers.inet import IP, TCP
from scapy.error import warning, log_loading
from scapy.sendrecv import sniff, sendp
from scapy.utils import issubtype
if conf.crypto_valid:
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms
else:
default_backend = Ciphers = algorithms = None
log_loading.info("Can't import python-cryptography v1.7+. Disabled WEP decryption/encryption. (Dot11)") # noqa: E501
# Layers
class PrismHeader(Packet):
""" iwpriv wlan0 monitor 3 """
name = "Prism header"
fields_desc = [LEIntField("msgcode", 68),
LEIntField("len", 144),
StrFixedLenField("dev", "", 16),
LEIntField("hosttime_did", 0),
LEShortField("hosttime_status", 0),
LEShortField("hosttime_len", 0),
LEIntField("hosttime", 0),
LEIntField("mactime_did", 0),
LEShortField("mactime_status", 0),
LEShortField("mactime_len", 0),
LEIntField("mactime", 0),
LEIntField("channel_did", 0),
LEShortField("channel_status", 0),
LEShortField("channel_len", 0),
LEIntField("channel", 0),
LEIntField("rssi_did", 0),
LEShortField("rssi_status", 0),
LEShortField("rssi_len", 0),
LEIntField("rssi", 0),
LEIntField("sq_did", 0),
LEShortField("sq_status", 0),
LEShortField("sq_len", 0),
LEIntField("sq", 0),
LEIntField("signal_did", 0),
LEShortField("signal_status", 0),
LEShortField("signal_len", 0),
LESignedIntField("signal", 0),
LEIntField("noise_did", 0),
LEShortField("noise_status", 0),
LEShortField("noise_len", 0),
LEIntField("noise", 0),
LEIntField("rate_did", 0),
LEShortField("rate_status", 0),
LEShortField("rate_len", 0),
LEIntField("rate", 0),
LEIntField("istx_did", 0),
LEShortField("istx_status", 0),
LEShortField("istx_len", 0),
LEIntField("istx", 0),
LEIntField("frmlen_did", 0),
LEShortField("frmlen_status", 0),
LEShortField("frmlen_len", 0),
LEIntField("frmlen", 0),
]
def answers(self, other):
if isinstance(other, PrismHeader):
return self.payload.answers(other.payload)
else:
return self.payload.answers(other)
class _RadiotapReversePadField(ReversePadField):
def __init__(self, fld):
self._fld = fld
self._padwith = b"\x00"
# Quote from https://www.radiotap.org/:
# ""Radiotap requires that all fields in the radiotap header are aligned to natural boundaries. # noqa: E501
# For radiotap, that means all 8-, 16-, 32-, and 64-bit fields must begin on 8-, 16-, 32-, and 64-bit boundaries, respectively."" # noqa: E501
if isinstance(self._fld, BitField):
self._align = int(math.ceil(self.i2len(None, None)))
else:
self._align = struct.calcsize(self._fld.fmt)
class _dbmField(ByteField):
def i2m(self, pkt, x):
return super(ByteField, self).i2m(pkt, x + 256)
def m2i(self, pkt, x):
return super(ByteField, self).m2i(pkt, x) - 256
def i2repr(self, pkt, x):
return "%sdBm" % x
_vht_bandwidth = {0: "20MHz", 1: "40MHz", 2: "40MHz", 3: "40MHz", 4: "80MHz", 5: "80MHz", # noqa: E501
6: "80MHz", 7: "80MHz", 8: "80MHz", 9: "80MHz", 10: "80MHz", 11: "160MHz", # noqa: E501
12: "160MHz", 13: "160MHz", 14: "160MHz", 15: "160MHz", 16: "160MHz", 17: "160MHz", # noqa: E501
18: "160MHz", 19: "160MHz", 20: "160MHz", 21: "160MHz", 22: "160MHz", 23: "160MHz", # noqa: E501
24: "160MHz", 25: "160MHz"}
def _next_radiotap_extpm(pkt, lst, cur, s):
"""Generates the next RadioTapExtendedPresenceMask"""
if cur is None or (cur.present and cur.present.Ext):
st = len(lst) + (cur is not None)
return lambda *args: RadioTapExtendedPresenceMask(*args, index=st)
return None
class RadioTapExtendedPresenceMask(Packet):
"""RadioTapExtendedPresenceMask should be instantiated by passing an
`index=` kwarg, stating which place the item has in the list.
Passing index will update the b[x] fields accordingly to the index.
e.g.
>>> a = RadioTapExtendedPresenceMask(present="b0+b12+b29+Ext")
>>> b = RadioTapExtendedPresenceMask(index=1, present="b33+b45+b59+b62")
>>> pkt = RadioTap(present="Ext", Ext=[a, b])
"""
name = "RadioTap Extended presence mask"
fields_desc = [FlagsField('present', None, -32,
["b%s" % i for i in range(0, 31)] + ["Ext"])]
def __init__(self, _pkt=None, index=0, **kwargs):
self._restart_indentation(index)
Packet.__init__(self, _pkt, **kwargs)
def _restart_indentation(self, index):
st = index * 32
self.fields_desc[0].names = ["b%s" % (i + st) for i in range(0, 31)] + ["Ext"] # noqa: E501
def guess_payload_class(self, pay):
return conf.padding_layer
class RadioTap(Packet):
name = "RadioTap dummy"
fields_desc = [ByteField('version', 0),
ByteField('pad', 0),
LEShortField('len', None),
FlagsField('present', None, -32, ['TSFT', 'Flags', 'Rate', 'Channel', 'FHSS', 'dBm_AntSignal', # noqa: E501
'dBm_AntNoise', 'Lock_Quality', 'TX_Attenuation', 'dB_TX_Attenuation', # noqa: E501
'dBm_TX_Power', 'Antenna', 'dB_AntSignal', 'dB_AntNoise', # noqa: E501
'RXFlags', 'TXFlags', 'b17', 'b18', 'ChannelPlus', 'MCS', 'A_MPDU', # noqa: E501
'VHT', 'timestamp', 'b24', 'b25', 'b26', 'b27', 'b28', 'b29', # noqa: E501
'RadiotapNS', 'VendorNS', 'Ext']), # noqa: E501
# Extended presence mask
ConditionalField(PacketListField("Ext", [], next_cls_cb=_next_radiotap_extpm), lambda pkt: pkt.present and pkt.present.Ext), # noqa: E501
# Default fields
ConditionalField(_RadiotapReversePadField(BitField("mac_timestamp", 0, -64)), lambda pkt: pkt.present and pkt.present.TSFT), # noqa: E501
ConditionalField(
_RadiotapReversePadField(
FlagsField("Flags", None, -8, ['CFP', 'ShortPreamble', 'wep', 'fragment', # noqa: E501
'FCS', 'pad', 'badFCS', 'ShortGI']) # noqa: E501
),
lambda pkt: pkt.present and pkt.present.Flags),
ConditionalField(_RadiotapReversePadField(ByteField("Rate", 0)), lambda pkt: pkt.present and pkt.present.Rate), # noqa: E501
ConditionalField(_RadiotapReversePadField(LEShortField("Channel", 0)), lambda pkt: pkt.present and pkt.present.Channel), # noqa: E501
ConditionalField(
_RadiotapReversePadField(
FlagsField("ChannelFlags", None, -16, ['res1', 'res2', 'res3', 'res4', 'Turbo', 'CCK', # noqa: E501
'OFDM', '2GHz', '5GHz', 'Passive', 'Dynamic_CCK_OFDM', # noqa: E501
'GFSK', 'GSM', 'StaticTurbo', '10MHz', '5MHz']) # noqa: E501
),
lambda pkt: pkt.present and pkt.present.Channel),
ConditionalField(_RadiotapReversePadField(_dbmField("dBm_AntSignal", -256)), lambda pkt: pkt.present and pkt.present.dBm_AntSignal), # noqa: E501
ConditionalField(_RadiotapReversePadField(_dbmField("dBm_AntNoise", -256)), lambda pkt: pkt.present and pkt.present.dBm_AntNoise), # noqa: E501
ConditionalField(_RadiotapReversePadField(ByteField("Antenna", 0)), lambda pkt: pkt.present and pkt.present.Antenna), # noqa: E501
# RX Flags
ConditionalField(_RadiotapReversePadField(FlagsField("RXFlags", None, -16, ["res1", "BAD_PLCP", "res2"])), # noqa: E501
lambda pkt: pkt.present and pkt.present.RXFlags), # noqa: E501#
# TX Flags
ConditionalField(_RadiotapReversePadField(FlagsField("TXFlags", None, -16, ["TX_FAIL", "CTS", "RTS", "NOACK", "NOSEQ"])), # noqa: E501
lambda pkt: pkt.present and pkt.present.TXFlags), # noqa: E501
# ChannelPlus
ConditionalField(
_RadiotapReversePadField(
FlagsField("ChannelFlags2", None, -32, ['res1', 'res2', 'res3', 'res4', 'Turbo', 'CCK', # noqa: E501
'OFDM', '2GHz', '5GHz', 'Passive', 'Dynamic_CCK_OFDM', # noqa: E501
'GFSK', 'GSM', 'StaticTurbo', '10MHz', '5MHz', # noqa: E501
'20MHz', '40MHz_ext_channel_above', '40MHz_ext_channel_below', # noqa: E501
'res5', 'res6', 'res7', 'res8', 'res9']) # noqa: E501
),
lambda pkt: pkt.present and pkt.present.ChannelPlus),
ConditionalField(_RadiotapReversePadField(LEShortField("ChannelFrequency", 0)), lambda pkt: pkt.present and pkt.present.ChannelPlus), # noqa: E501
ConditionalField(_RadiotapReversePadField(ByteField("ChannelNumber", 0)), lambda pkt: pkt.present and pkt.present.ChannelPlus), # noqa: E501
# MCS
ConditionalField(
_RadiotapReversePadField(FlagsField("knownMCS", None, -8, ['bandwidth', 'MCS_index', 'guard_interval', 'HT_format', # noqa: E501
'FEC_type', 'STBC_streams', 'Ness', 'Ness_MSB'])), # noqa: E501
lambda pkt: pkt.present and pkt.present.MCS),
ConditionalField(BitEnumField("bandwidth", 0, 2, {0: "20MHz", 1: "40MHz", 2: "ht40Mhz-", 3: "ht40MHz+"}), # noqa: E501
lambda pkt: pkt.present and pkt.present.MCS), # noqa: E501
ConditionalField(BitEnumField("guard_interval", 0, 1, {0: "Long_GI", 1: "Short_GI"}), lambda pkt: pkt.present and pkt.present.MCS), # noqa: E501
ConditionalField(BitEnumField("HT_format", 0, 1, {0: "mixed", 1: "greenfield"}), lambda pkt: pkt.present and pkt.present.MCS), # noqa: E501
ConditionalField(BitEnumField("FEC_type", 0, 1, {0: "BCC", 1: "LDPC"}), lambda pkt: pkt.present and pkt.present.MCS), # noqa: E501
ConditionalField(BitField("STBC_streams", 0, 2), lambda pkt: pkt.present and pkt.present.MCS), # noqa: E501
ConditionalField(BitField("Ness_LSB", 0, 1), lambda pkt: pkt.present and pkt.present.MCS), # noqa: E501
ConditionalField(ByteField("MCS_index", 0), lambda pkt: pkt.present and pkt.present.MCS), # noqa: E501
# A_MPDU
ConditionalField(_RadiotapReversePadField(LEIntField("A_MPDU_ref", 0)), lambda pkt: pkt.present and pkt.present.A_MPDU), # noqa: E501
ConditionalField(
_RadiotapReversePadField(
FlagsField("A_MPDU_flags", None, -32, ['Report0Subframe', 'Is0Subframe', 'KnownLastSubframe', # noqa: E501
'LastSubframe', 'CRCerror', 'EOFsubframe', 'KnownEOF', # noqa: E501
'res1', 'res2', 'res3', 'res4', 'res5', 'res6', 'res7', 'res8']) # noqa: E501
),
lambda pkt: pkt.present and pkt.present.A_MPDU),
# VHT
ConditionalField(
_RadiotapReversePadField(
FlagsField("KnownVHT", None, -16, ['STBC', 'TXOP_PS_NOT_ALLOWED', 'GuardInterval', 'SGINsysmDis', # noqa: E501
'LDPCextraOFDM', 'Beamformed', 'Bandwidth', 'GroupID', 'PartialAID', # noqa: E501
'res1', 'res2', 'res3', 'res4', 'res5', 'res6', 'res7']) # noqa: E501
),
lambda pkt: pkt.present and pkt.present.VHT),
ConditionalField(
_RadiotapReversePadField(
FlagsField("PresentVHT", None, -8, ['STBC', 'TXOP_PS_NOT_ALLOWED', 'GuardInterval', 'SGINsysmDis', # noqa: E501
'LDPCextraOFDM', 'Beamformed', 'res1', 'res2']) # noqa: E501
),
lambda pkt: pkt.present and pkt.present.VHT),
ConditionalField(_RadiotapReversePadField(ByteEnumField("bandwidth", 0, _vht_bandwidth)), lambda pkt: pkt.present and pkt.present.VHT), # noqa: E501
ConditionalField(_RadiotapReversePadField(StrFixedLenField("mcs_nss", 0, length=5)), lambda pkt: pkt.present and pkt.present.VHT), # noqa: E501
ConditionalField(_RadiotapReversePadField(ByteField("GroupID", 0)), lambda pkt: pkt.present and pkt.present.VHT), # noqa: E501
ConditionalField(_RadiotapReversePadField(ShortField("PartialAID", 0)), lambda pkt: pkt.present and pkt.present.VHT), # noqa: E501
StrLenField('notdecoded', "", length_from=lambda pkt: pkt.len - pkt._tmp_dissect_pos)] # noqa: E501
def guess_payload_class(self, payload):
if self.present and self.present.Flags and self.Flags.FCS:
return Dot11FCS
return Dot11
def post_build(self, p, pay):
if self.len is None:
p = p[:2] + struct.pack("!H", len(p))[::-1] + p[4:]
return p + pay
class Dot11(Packet):
name = "802.11"
fields_desc = [
BitField("subtype", 0, 4),
BitEnumField("type", 0, 2, ["Management", "Control", "Data",
"Reserved"]),
BitField("proto", 0, 2),
FlagsField("FCfield", 0, 8, ["to-DS", "from-DS", "MF", "retry",
"pw-mgt", "MD", "wep", "order"]),
ShortField("ID", 0),
MACField("addr1", ETHER_ANY),
ConditionalField(
MACField("addr2", ETHER_ANY),
lambda pkt: (pkt.type != 1 or
pkt.subtype in [0x8, 0x9, 0xa, 0xb, 0xe, 0xf]),
),
ConditionalField(
MACField("addr3", ETHER_ANY),
lambda pkt: pkt.type in [0, 2],
),
ConditionalField(LEShortField("SC", 0), lambda pkt: pkt.type != 1),
ConditionalField(
MACField("addr4", ETHER_ANY),
lambda pkt: (pkt.type == 2 and
pkt.FCfield & 3 == 3), # from-DS+to-DS
)
]
def mysummary(self):
# Supports both Dot11 and Dot11FCS
return self.sprintf("802.11 %%%s.type%% %%%s.subtype%% %%%s.addr2%% > %%%s.addr1%%" % ((self.__class__.__name__,) * 4)) # noqa: E501
def guess_payload_class(self, payload):
if self.type == 0x02 and (0x08 <= self.subtype <= 0xF and self.subtype != 0xD): # noqa: E501
return Dot11QoS
elif self.FCfield & 0x40:
return Dot11WEP
else:
return Packet.guess_payload_class(self, payload)
def answers(self, other):
if isinstance(other, Dot11):
if self.type == 0: # management
if self.addr1.lower() != other.addr2.lower(): # check resp DA w/ req SA # noqa: E501
return 0
if (other.subtype, self.subtype) in [(0, 1), (2, 3), (4, 5)]:
return 1
if self.subtype == other.subtype == 11: # auth
return self.payload.answers(other.payload)
elif self.type == 1: # control
return 0
elif self.type == 2: # data
return self.payload.answers(other.payload)
elif self.type == 3: # reserved
return 0
return 0
def unwep(self, key=None, warn=1):
if self.FCfield & 0x40 == 0:
if warn:
warning("No WEP to remove")
return
if isinstance(self.payload.payload, NoPayload):
if key or conf.wepkey:
self.payload.decrypt(key)
if isinstance(self.payload.payload, NoPayload):
if warn:
warning("Dot11 can't be decrypted. Check conf.wepkey.")
return
self.FCfield &= ~0x40
self.payload = self.payload.payload
class Dot11FCS(Dot11):
name = "802.11-FCS"
match_subclass = True
fields_desc = Dot11.fields_desc + [XLEIntField("fcs", None)] # Automatically moved to the end of the packet # noqa: E501
def compute_fcs(self, s):
return struct.pack("!I", crc32(s) & 0xffffffff)[::-1]
def post_build(self, p, pay):
# Switch payload and frame check sequence
return p[:-4] + pay + (p[-4:] if self.fcs is not None else self.compute_fcs(p[:-4] + pay)) # noqa: E501
def post_dissect(self, s):
self.raw_packet_cache = None # Reset packet to allow post_build
return s
def pre_dissect(self, s):
# Get the frame check sequence
sty = orb(s[0])
ty = orb(s[1]) >> 2
fc = struct.unpack("!H", s[2:4])[0]
length = 12 + 6 * ((ty != 1 or sty in [0x8, 0x9, 0xa, 0xb, 0xe, 0xf]) + (ty in [0, 2]) + (ty == 2 and fc & 3 == 3)) # noqa: E501
return s[:length] + s[-4:] + s[length:-4]
class Dot11QoS(Packet):
name = "802.11 QoS"
fields_desc = [BitField("Reserved", None, 1),
BitField("Ack_Policy", None, 2),
BitField("EOSP", None, 1),
BitField("TID", None, 4),
ByteField("TXOP", None)]
def guess_payload_class(self, payload):
if isinstance(self.underlayer, Dot11):
if self.underlayer.FCfield & 0x40:
return Dot11WEP
return Packet.guess_payload_class(self, payload)
capability_list = ["res8", "res9", "short-slot", "res11",
"res12", "DSSS-OFDM", "res14", "res15",
"ESS", "IBSS", "CFP", "CFP-req",
"privacy", "short-preamble", "PBCC", "agility"]
reason_code = {0: "reserved", 1: "unspec", 2: "auth-expired",
3: "deauth-ST-leaving",
4: "inactivity", 5: "AP-full", 6: "class2-from-nonauth",
7: "class3-from-nonass", 8: "disas-ST-leaving",
9: "ST-not-auth"}
status_code = {0: "success", 1: "failure", 10: "cannot-support-all-cap",
11: "inexist-asso", 12: "asso-denied", 13: "algo-unsupported",
14: "bad-seq-num", 15: "challenge-failure",
16: "timeout", 17: "AP-full", 18: "rate-unsupported"}
class Dot11Beacon(Packet):
name = "802.11 Beacon"
fields_desc = [LELongField("timestamp", 0),
LEShortField("beacon_interval", 0x0064),
FlagsField("cap", 0, 16, capability_list)]
def network_stats(self):
"""Return a dictionary containing a summary of the Dot11
elements fields
"""
summary = {}
crypto = set()
p = self.payload
while isinstance(p, Dot11Elt):
if p.ID == 0:
summary["ssid"] = plain_str(p.info)
elif p.ID == 3:
summary["channel"] = ord(p.info)
elif isinstance(p, Dot11EltRates):
summary["rates"] = p.rates
elif isinstance(p, Dot11EltRSN):
crypto.add("WPA2")
elif p.ID == 221:
if isinstance(p, Dot11EltMicrosoftWPA) or \
p.info.startswith('\x00P\xf2\x01\x01\x00'):
crypto.add("WPA")
p = p.payload
if not crypto:
if self.cap.privacy:
crypto.add("WEP")
else:
crypto.add("OPN")
summary["crypto"] = crypto
return summary
_dot11_info_elts_ids = {
0: "SSID",
1: "Rates",
2: "FHset",
3: "DSset",
4: "CFset",
5: "TIM",
6: "IBSSset",
7: "Country",
10: "Request",
16: "challenge",
33: "PowerCapability",
36: "Channels",
42: "ERPinfo",
45: "HTCapabilities",
46: "QoSCapability",
47: "ERPinfo",
48: "RSNinfo",
50: "ESRates",
52: "PowerConstraint",
107: "Interworking",
127: "ExtendendCapatibilities",
191: "VHTCapabilities",
221: "vendor",
68: "reserved"
}
class Dot11Elt(Packet):
name = "802.11 Information Element"
fields_desc = [ByteEnumField("ID", 0, _dot11_info_elts_ids),
FieldLenField("len", None, "info", "B"),
StrLenField("info", "", length_from=lambda x: x.len,
max_length=255)]
def mysummary(self):
if self.ID == 0:
ssid = repr(self.info)
if ssid[:2] in ['b"', "b'"]:
ssid = ssid[1:]
return "SSID=%s" % ssid, [Dot11]
else:
return ""
registered_ies = {}
@classmethod
def register_variant(cls):
cls.registered_ies[cls.ID.default] = cls
@classmethod
def dispatch_hook(cls, _pkt=None, *args, **kargs):
if _pkt:
_id = orb(_pkt[0])
if _id == 221:
oui_a = orb(_pkt[2])
oui_b = orb(_pkt[3])
oui_c = orb(_pkt[4])
if oui_a == 0x00 and oui_b == 0x50 and oui_c == 0xf2:
# MS OUI
type_ = orb(_pkt[5])
if type_ == 0x01:
# MS WPA IE
return Dot11EltMicrosoftWPA
else:
return Dot11EltVendorSpecific
else:
return Dot11EltVendorSpecific
else:
return cls.registered_ies.get(_id, cls)
return cls
def haslayer(self, cls):
if cls == "Dot11Elt":
if isinstance(self, Dot11Elt):
return True
elif issubtype(cls, Dot11Elt):
if isinstance(self, cls):
return True
return super(Dot11Elt, self).haslayer(cls)
def getlayer(self, cls, nb=1, _track=None, _subclass=True, **flt):
return super(Dot11Elt, self).getlayer(cls, nb=nb, _track=_track,
_subclass=True, **flt)
def post_build(self, p, pay):
if self.len is None:
p = p[:1] + chb(len(p) - 2) + p[2:]
return p + pay
class RSNCipherSuite(Packet):
name = "Cipher suite"
fields_desc = [
X3BytesField("oui", 0x000fac),
ByteEnumField("cipher", 0x04, {
0x00: "Use group cipher suite",
0x01: "WEP-40",
0x02: "TKIP",
0x03: "Reserved",
0x04: "CCMP",
0x05: "WEP-104"
})
]
def extract_padding(self, s):
return "", s
class AKMSuite(Packet):
name = "AKM suite"
fields_desc = [
X3BytesField("oui", 0x000fac),
ByteEnumField("suite", 0x01, {
0x00: "Reserved",
0x01: "IEEE 802.1X / PMKSA caching",
0x02: "PSK"
})
]
def extract_padding(self, s):
return "", s
class PMKIDListPacket(Packet):
name = "PMKIDs"
fields_desc = [
LEFieldLenField("nb_pmkids", 0, count_of="pmk_id_list"),
FieldListField(
"pmkid_list",
None,
XStrFixedLenField("", "", length=16),
count_from=lambda pkt: pkt.nb_pmkids
)
]
def extract_padding(self, s):
return "", s
class Dot11EltRSN(Dot11Elt):
name = "RSN information"
fields_desc = [
ByteField("ID", 48),
ByteField("len", None),
LEShortField("version", 1),
PacketField("group_cipher_suite", RSNCipherSuite(), RSNCipherSuite),
LEFieldLenField(
"nb_pairwise_cipher_suites",
1,
count_of="pairwise_cipher_suites"
),
PacketListField(
"pairwise_cipher_suites",
[RSNCipherSuite()],
RSNCipherSuite,
count_from=lambda p: p.nb_pairwise_cipher_suites
),
LEFieldLenField(
"nb_akm_suites",
1,
count_of="akm_suites"
),
PacketListField(
"akm_suites",
[AKMSuite()],
AKMSuite,
count_from=lambda p: p.nb_akm_suites
),
BitField("mfp_capable", 0, 1),
BitField("mfp_required", 0, 1),
BitField("gtksa_replay_counter", 0, 2),
BitField("ptksa_replay_counter", 0, 2),
BitField("no_pairwise", 0, 1),
BitField("pre_auth", 0, 1),
BitField("reserved", 0, 8),
ConditionalField(
PacketField("pmkids", None, PMKIDListPacket),
lambda pkt: (
0 if pkt.len is None else
pkt.len - (12 + (pkt.nb_pairwise_cipher_suites * 4) +
(pkt.nb_akm_suites * 4)) >= 18)
)
]
class Dot11EltMicrosoftWPA(Dot11Elt):
name = "Microsoft WPA"
fields_desc = [
ByteField("ID", 221),
ByteField("len", None),
X3BytesField("oui", 0x0050f2),
XByteField("type", 0x01),
LEShortField("version", 1),
PacketField("group_cipher_suite", RSNCipherSuite(), RSNCipherSuite),
LEFieldLenField(
"nb_pairwise_cipher_suites",
1,
count_of="pairwise_cipher_suites"
),
PacketListField(
"pairwise_cipher_suites",
RSNCipherSuite(),
RSNCipherSuite,
count_from=lambda p: p.nb_pairwise_cipher_suites
),
LEFieldLenField(
"nb_akm_suites",
1,
count_of="akm_suites"
),
PacketListField(
"akm_suites",
AKMSuite(),
AKMSuite,
count_from=lambda p: p.nb_akm_suites
)
]
class Dot11EltRates(Dot11Elt):
name = "Rates"
fields_desc = [
ByteField("ID", 1),
ByteField("len", None),
FieldListField(
"rates",
[],
XByteField("", 0),
count_from=lambda p: p.len
)
]
class Dot11EltVendorSpecific(Dot11Elt):
name = "Vendor Specific"
fields_desc = [
ByteField("ID", 221),
ByteField("len", None),
X3BytesField("oui", 0x000000),
StrLenField("info", "", length_from=lambda x: x.len - 3)
]
class Dot11ATIM(Packet):
name = "802.11 ATIM"
class Dot11Disas(Packet):
name = "802.11 Disassociation"
fields_desc = [LEShortEnumField("reason", 1, reason_code)]
class Dot11AssoReq(Packet):
name = "802.11 Association Request"
fields_desc = [FlagsField("cap", 0, 16, capability_list),
LEShortField("listen_interval", 0x00c8)]
class Dot11AssoResp(Packet):
name = "802.11 Association Response"
fields_desc = [FlagsField("cap", 0, 16, capability_list),
LEShortField("status", 0),
LEShortField("AID", 0)]
class Dot11ReassoReq(Packet):
name = "802.11 Reassociation Request"
fields_desc = [FlagsField("cap", 0, 16, capability_list),
LEShortField("listen_interval", 0x00c8),
MACField("current_AP", ETHER_ANY)]
class Dot11ReassoResp(Dot11AssoResp):
name = "802.11 Reassociation Response"
class Dot11ProbeReq(Packet):
name = "802.11 Probe Request"
class Dot11ProbeResp(Packet):
name = "802.11 Probe Response"
fields_desc = [LELongField("timestamp", 0),
LEShortField("beacon_interval", 0x0064),
FlagsField("cap", 0, 16, capability_list)]
class Dot11Auth(Packet):
name = "802.11 Authentication"
fields_desc = [LEShortEnumField("algo", 0, ["open", "sharedkey"]),
LEShortField("seqnum", 0),
LEShortEnumField("status", 0, status_code)]
def answers(self, other):
if self.seqnum == other.seqnum + 1:
return 1
return 0
class Dot11Deauth(Packet):
name = "802.11 Deauthentication"
fields_desc = [LEShortEnumField("reason", 1, reason_code)]
class Dot11WEP(Packet):
name = "802.11 WEP packet"
fields_desc = [StrFixedLenField("iv", b"\0\0\0", 3),
ByteField("keyid", 0),
StrField("wepdata", None, remain=4),
IntField("icv", None)]
@crypto_validator
def decrypt(self, key=None):
if key is None:
key = conf.wepkey
if key:
d = Cipher(
algorithms.ARC4(self.iv + key.encode("utf8")),
None,
default_backend(),
).decryptor()
self.add_payload(LLC(d.update(self.wepdata) + d.finalize()))
def post_dissect(self, s):
self.decrypt()
def build_payload(self):
if self.wepdata is None:
return Packet.build_payload(self)
return b""
@crypto_validator
def encrypt(self, p, pay, key=None):
if key is None:
key = conf.wepkey
if key:
if self.icv is None:
pay += struct.pack("<I", crc32(pay) & 0xffffffff)
icv = b""
else:
icv = p[4:8]
e = Cipher(
algorithms.ARC4(self.iv + key.encode("utf8")),
None,
default_backend(),
).encryptor()
return p[:4] + e.update(pay) + e.finalize() + icv
else:
warning("No WEP key set (conf.wepkey).. strange results expected..") # noqa: E501
return b""
def post_build(self, p, pay):
if self.wepdata is None:
p = self.encrypt(p, raw(pay))
return p
class Dot11Ack(Packet):
name = "802.11 Ack packet"
bind_layers(PrismHeader, Dot11,)
bind_layers(Dot11, LLC, type=2)
bind_layers(Dot11QoS, LLC,)
bind_layers(Dot11, Dot11AssoReq, subtype=0, type=0)
bind_layers(Dot11, Dot11AssoResp, subtype=1, type=0)
bind_layers(Dot11, Dot11ReassoReq, subtype=2, type=0)
bind_layers(Dot11, Dot11ReassoResp, subtype=3, type=0)
bind_layers(Dot11, Dot11ProbeReq, subtype=4, type=0)
bind_layers(Dot11, Dot11ProbeResp, subtype=5, type=0)
bind_layers(Dot11, Dot11Beacon, subtype=8, type=0)
bind_layers(Dot11, Dot11ATIM, subtype=9, type=0)
bind_layers(Dot11, Dot11Disas, subtype=10, type=0)
bind_layers(Dot11, Dot11Auth, subtype=11, type=0)
bind_layers(Dot11, Dot11Deauth, subtype=12, type=0)
bind_layers(Dot11, Dot11Ack, subtype=13, type=1)
bind_layers(Dot11Beacon, Dot11Elt,)
bind_layers(Dot11AssoReq, Dot11Elt,)
bind_layers(Dot11AssoResp, Dot11Elt,)
bind_layers(Dot11ReassoReq, Dot11Elt,)
bind_layers(Dot11ReassoResp, Dot11Elt,)
bind_layers(Dot11ProbeReq, Dot11Elt,)
bind_layers(Dot11ProbeResp, Dot11Elt,)
bind_layers(Dot11Auth, Dot11Elt,)
bind_layers(Dot11Elt, Dot11Elt,)
conf.l2types.register(DLT_IEEE802_11, Dot11)
conf.l2types.register_num2layer(801, Dot11)
conf.l2types.register(DLT_PRISM_HEADER, PrismHeader)
conf.l2types.register_num2layer(802, PrismHeader)
conf.l2types.register(DLT_IEEE802_11_RADIO, RadioTap)
conf.l2types.register_num2layer(803, RadioTap)
class WiFi_am(AnsweringMachine):
"""Before using this, initialize "iffrom" and "ifto" interfaces:
iwconfig iffrom mode monitor
iwpriv orig_ifto hostapd 1
ifconfig ifto up
note: if ifto=wlan0ap then orig_ifto=wlan0
note: ifto and iffrom must be set on the same channel
ex:
ifconfig eth1 up
iwconfig eth1 mode monitor
iwconfig eth1 channel 11
iwpriv wlan0 hostapd 1
ifconfig wlan0ap up
iwconfig wlan0 channel 11
iwconfig wlan0 essid dontexist
iwconfig wlan0 mode managed
"""
function_name = "airpwn"
filter = None
def parse_options(self, iffrom=conf.iface, ifto=conf.iface, replace="",
pattern="", ignorepattern=""):
self.iffrom = iffrom
self.ifto = ifto
self.ptrn = re.compile(pattern.encode())
self.iptrn = re.compile(ignorepattern.encode())
self.replace = replace
def is_request(self, pkt):
if not isinstance(pkt, Dot11):
return 0
if not pkt.FCfield & 1:
return 0
if not pkt.haslayer(TCP):
return 0
tcp = pkt.getlayer(TCP)
pay = raw(tcp.payload)
if not self.ptrn.match(pay):
return 0
if self.iptrn.match(pay) is True:
return 0
return True
def make_reply(self, p):
ip = p.getlayer(IP)
tcp = p.getlayer(TCP)
pay = raw(tcp.payload)
del(p.payload.payload.payload)
p.FCfield = "from-DS"
p.addr1, p.addr2 = p.addr2, p.addr1
p /= IP(src=ip.dst, dst=ip.src)
p /= TCP(sport=tcp.dport, dport=tcp.sport,
seq=tcp.ack, ack=tcp.seq + len(pay),
flags="PA")
q = p.copy()
p /= self.replace
q.ID += 1
q.getlayer(TCP).flags = "RA"
q.getlayer(TCP).seq += len(self.replace)
return [p, q]
def print_reply(self, query, *reply):
p = reply[0][0]
print(p.sprintf("Sent %IP.src%:%IP.sport% > %IP.dst%:%TCP.dport%"))
def send_reply(self, reply):
sendp(reply, iface=self.ifto, **self.optsend)
def sniff(self):
sniff(iface=self.iffrom, **self.optsniff)
conf.stats_dot11_protocols += [Dot11WEP, Dot11Beacon, ]
class Dot11PacketList(PacketList):
def __init__(self, res=None, name="Dot11List", stats=None):
if stats is None:
stats = conf.stats_dot11_protocols
PacketList.__init__(self, res, name, stats)
def toEthernet(self):
data = [x[Dot11] for x in self.res if Dot11 in x and x.type == 2]
r2 = []
for p in data:
q = p.copy()
q.unwep()
r2.append(Ether() / q.payload.payload.payload) # Dot11/LLC/SNAP/IP
return PacketList(r2, name="Ether from %s" % self.listname)
| gpl-2.0 |
mxOBS/deb-pkg_trusty_chromium-browser | tools/telemetry/telemetry/core/browser_credentials.py | 11 | 5581 | # Copyright 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import logging
import os
from telemetry.core import util
from telemetry.core.backends import codepen_credentials_backend
from telemetry.core.backends import facebook_credentials_backend
from telemetry.core.backends import google_credentials_backend
from telemetry.unittest_util import options_for_unittests
class CredentialsError(Exception):
"""Error that can be thrown when logging in."""
class BrowserCredentials(object):
def __init__(self, backends=None):
self._credentials = {}
self._credentials_path = None
self._extra_credentials = {}
if backends is None:
backends = [
codepen_credentials_backend.CodePenCredentialsBackend(),
facebook_credentials_backend.FacebookCredentialsBackend(),
facebook_credentials_backend.FacebookCredentialsBackend2(),
google_credentials_backend.GoogleCredentialsBackend()]
self._backends = {}
for backend in backends:
self._backends[backend.credentials_type] = backend
def AddBackend(self, backend):
assert backend.credentials_type not in self._backends
self._backends[backend.credentials_type] = backend
def IsLoggedIn(self, credentials_type):
if credentials_type not in self._backends:
raise CredentialsError(
'Unrecognized credentials type: %s', credentials_type)
if credentials_type not in self._credentials:
return False
return self._backends[credentials_type].IsLoggedIn()
def CanLogin(self, credentials_type):
if credentials_type not in self._backends:
raise CredentialsError(
'Unrecognized credentials type: %s', credentials_type)
return credentials_type in self._credentials
def LoginNeeded(self, tab, credentials_type):
if credentials_type not in self._backends:
raise CredentialsError(
'Unrecognized credentials type: %s', credentials_type)
if credentials_type not in self._credentials:
return False
from telemetry.page.actions import action_runner
runner = action_runner.ActionRunner(tab)
return self._backends[credentials_type].LoginNeeded(
tab, runner, self._credentials[credentials_type])
def LoginNoLongerNeeded(self, tab, credentials_type):
assert credentials_type in self._backends
self._backends[credentials_type].LoginNoLongerNeeded(tab)
@property
def credentials_path(self): # pylint: disable=E0202
return self._credentials_path
@credentials_path.setter
def credentials_path(self, credentials_path): # pylint: disable=E0202
self._credentials_path = credentials_path
self._RebuildCredentials()
def Add(self, credentials_type, data):
if credentials_type not in self._extra_credentials:
self._extra_credentials[credentials_type] = {}
for k, v in data.items():
assert k not in self._extra_credentials[credentials_type]
self._extra_credentials[credentials_type][k] = v
self._RebuildCredentials()
def _ResetLoggedInState(self):
"""Makes the backends think we're not logged in even though we are.
Should only be used in unit tests to simulate --dont-override-profile.
"""
for backend in self._backends.keys():
self._backends[backend]._ResetLoggedInState() # pylint: disable=W0212
def _RebuildCredentials(self):
credentials = {}
if self._credentials_path == None:
pass
elif os.path.exists(self._credentials_path):
with open(self._credentials_path, 'r') as f:
credentials = json.loads(f.read())
# TODO(nduca): use system keychain, if possible.
homedir_credentials_path = os.path.expanduser('~/.telemetry-credentials')
homedir_credentials = {}
if (not options_for_unittests.GetCopy() and
os.path.exists(homedir_credentials_path)):
logging.info("Found ~/.telemetry-credentials. Its contents will be used "
"when no other credentials can be found.")
with open(homedir_credentials_path, 'r') as f:
homedir_credentials = json.loads(f.read())
self._credentials = {}
all_keys = set(credentials.keys()).union(
homedir_credentials.keys()).union(
self._extra_credentials.keys())
for k in all_keys:
if k in credentials:
self._credentials[k] = credentials[k]
if k in homedir_credentials:
logging.info("Will use ~/.telemetry-credentials for %s logins." % k)
self._credentials[k] = homedir_credentials[k]
if k in self._extra_credentials:
self._credentials[k] = self._extra_credentials[k]
def WarnIfMissingCredentials(self, page):
if page.credentials and not self.CanLogin(page.credentials):
files_to_tweak = []
if page.credentials_path:
files_to_tweak.append(page.credentials_path)
files_to_tweak.append('~/.telemetry-credentials')
example_credentials_file = os.path.join(
util.GetTelemetryDir(), 'examples', 'credentials_example.json')
logging.warning("""
Credentials for %s were not found. page %s will not be tested.
To fix this, either follow the instructions to authenticate to gsutil
here:
http://www.chromium.org/developers/telemetry/upload_to_cloud_storage,
or add your own credentials to:
%s
An example credentials file you can copy from is here:
%s\n""" % (page.credentials, page, ' or '.join(files_to_tweak),
example_credentials_file))
| bsd-3-clause |
vveerava/Openstack | neutron/common/config.py | 4 | 8551 | # Copyright 2011 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Routines for configuring Neutron
"""
import os
import sys
from oslo.config import cfg
from oslo.db import options as db_options
from oslo import messaging
from paste import deploy
from neutron.api.v2 import attributes
from neutron.common import utils
from neutron.i18n import _LI
from neutron.openstack.common import log as logging
from neutron import version
LOG = logging.getLogger(__name__)
core_opts = [
cfg.StrOpt('bind_host', default='0.0.0.0',
help=_("The host IP to bind to")),
cfg.IntOpt('bind_port', default=9696,
help=_("The port to bind to")),
cfg.StrOpt('api_paste_config', default="api-paste.ini",
help=_("The API paste config file to use")),
cfg.StrOpt('api_extensions_path', default="",
help=_("The path for API extensions")),
cfg.StrOpt('auth_strategy', default='keystone',
help=_("The type of authentication to use")),
cfg.StrOpt('core_plugin',
help=_("The core plugin Neutron will use")),
cfg.ListOpt('service_plugins', default=[],
help=_("The service plugins Neutron will use")),
cfg.StrOpt('base_mac', default="fa:16:3e:00:00:00",
help=_("The base MAC address Neutron will use for VIFs")),
cfg.IntOpt('mac_generation_retries', default=16,
help=_("How many times Neutron will retry MAC generation")),
cfg.BoolOpt('allow_bulk', default=True,
help=_("Allow the usage of the bulk API")),
cfg.BoolOpt('allow_pagination', default=False,
help=_("Allow the usage of the pagination")),
cfg.BoolOpt('allow_sorting', default=False,
help=_("Allow the usage of the sorting")),
cfg.StrOpt('pagination_max_limit', default="-1",
help=_("The maximum number of items returned in a single "
"response, value was 'infinite' or negative integer "
"means no limit")),
cfg.IntOpt('max_dns_nameservers', default=5,
help=_("Maximum number of DNS nameservers")),
cfg.IntOpt('max_subnet_host_routes', default=20,
help=_("Maximum number of host routes per subnet")),
cfg.IntOpt('max_fixed_ips_per_port', default=5,
help=_("Maximum number of fixed ips per port")),
cfg.IntOpt('dhcp_lease_duration', default=86400,
deprecated_name='dhcp_lease_time',
help=_("DHCP lease duration (in seconds). Use -1 to tell "
"dnsmasq to use infinite lease times.")),
cfg.BoolOpt('dhcp_agent_notification', default=True,
help=_("Allow sending resource operation"
" notification to DHCP agent")),
cfg.BoolOpt('allow_overlapping_ips', default=False,
help=_("Allow overlapping IP support in Neutron")),
cfg.StrOpt('host', default=utils.get_hostname(),
help=_("The hostname Neutron is running on")),
cfg.BoolOpt('force_gateway_on_subnet', default=True,
help=_("Ensure that configured gateway is on subnet. "
"For IPv6, validate only if gateway is not a link "
"local address. Deprecated, to be removed during the "
"K release, at which point the check will be "
"mandatory.")),
cfg.BoolOpt('notify_nova_on_port_status_changes', default=True,
help=_("Send notification to nova when port status changes")),
cfg.BoolOpt('notify_nova_on_port_data_changes', default=True,
help=_("Send notification to nova when port data (fixed_ips/"
"floatingip) changes so nova can update its cache.")),
cfg.StrOpt('nova_url',
default='http://127.0.0.1:8774/v2',
help=_('URL for connection to nova')),
cfg.StrOpt('nova_admin_username',
help=_('Username for connecting to nova in admin context')),
cfg.StrOpt('nova_admin_password',
help=_('Password for connection to nova in admin context'),
secret=True),
cfg.StrOpt('nova_admin_tenant_id',
help=_('The uuid of the admin nova tenant')),
cfg.StrOpt('nova_admin_tenant_name',
help=_('The name of the admin nova tenant')),
cfg.StrOpt('nova_admin_auth_url',
default='http://localhost:5000/v2.0',
help=_('Authorization URL for connecting to nova in admin '
'context')),
cfg.StrOpt('nova_ca_certificates_file',
help=_('CA file for novaclient to verify server certificates')),
cfg.BoolOpt('nova_api_insecure', default=False,
help=_("If True, ignore any SSL validation issues")),
cfg.StrOpt('nova_region_name',
help=_('Name of nova region to use. Useful if keystone manages'
' more than one region.')),
cfg.IntOpt('send_events_interval', default=2,
help=_('Number of seconds between sending events to nova if '
'there are any events to send.')),
]
core_cli_opts = [
cfg.StrOpt('state_path',
default='/var/lib/neutron',
help=_("Where to store Neutron state files. "
"This directory must be writable by the agent.")),
]
# Register the configuration options
cfg.CONF.register_opts(core_opts)
cfg.CONF.register_cli_opts(core_cli_opts)
# Ensure that the control exchange is set correctly
messaging.set_transport_defaults(control_exchange='neutron')
_SQL_CONNECTION_DEFAULT = 'sqlite://'
# Update the default QueuePool parameters. These can be tweaked by the
# configuration variables - max_pool_size, max_overflow and pool_timeout
db_options.set_defaults(cfg.CONF,
connection=_SQL_CONNECTION_DEFAULT,
sqlite_db='', max_pool_size=10,
max_overflow=20, pool_timeout=10)
def init(args, **kwargs):
cfg.CONF(args=args, project='neutron',
version='%%(prog)s %s' % version.version_info.release_string(),
**kwargs)
# FIXME(ihrachys): if import is put in global, circular import
# failure occurs
from neutron.common import rpc as n_rpc
n_rpc.init(cfg.CONF)
# Validate that the base_mac is of the correct format
msg = attributes._validate_regex(cfg.CONF.base_mac,
attributes.MAC_PATTERN)
if msg:
msg = _("Base MAC: %s") % msg
raise Exception(msg)
def setup_logging():
"""Sets up the logging options for a log with supplied name."""
product_name = "neutron"
logging.setup(product_name)
LOG.info(_LI("Logging enabled!"))
LOG.info(_LI("%(prog)s version %(version)s"),
{'prog': sys.argv[0],
'version': version.version_info.release_string()})
LOG.debug("command line: %s" % " ".join(sys.argv))
def load_paste_app(app_name):
"""Builds and returns a WSGI app from a paste config file.
:param app_name: Name of the application to load
:raises ConfigFilesNotFoundError when config file cannot be located
:raises RuntimeError when application cannot be loaded from config file
"""
config_path = cfg.CONF.find_file(cfg.CONF.api_paste_config)
if not config_path:
raise cfg.ConfigFilesNotFoundError(
config_files=[cfg.CONF.api_paste_config])
config_path = os.path.abspath(config_path)
LOG.info(_LI("Config paste file: %s"), config_path)
try:
app = deploy.loadapp("config:%s" % config_path, name=app_name)
except (LookupError, ImportError):
msg = (_("Unable to load %(app_name)s from "
"configuration file %(config_path)s.") %
{'app_name': app_name,
'config_path': config_path})
LOG.exception(msg)
raise RuntimeError(msg)
return app
| apache-2.0 |
a-parhom/edx-platform | common/djangoapps/third_party_auth/settings.py | 1 | 4672 | """Settings for the third-party auth module.
The flow for settings registration is:
The base settings file contains a boolean, ENABLE_THIRD_PARTY_AUTH, indicating
whether this module is enabled. startup.py probes the ENABLE_THIRD_PARTY_AUTH.
If true, it:
a) loads this module.
b) calls apply_settings(), passing in the Django settings
"""
from openedx.features.enterprise_support.api import insert_enterprise_pipeline_elements
def apply_settings(django_settings):
"""Set provider-independent settings."""
# Whitelisted URL query parameters retrained in the pipeline session.
# Params not in this whitelist will be silently dropped.
django_settings.FIELDS_STORED_IN_SESSION = ['auth_entry', 'next']
# Inject exception middleware to make redirects fire.
django_settings.MIDDLEWARE_CLASSES.extend(
['third_party_auth.middleware.ExceptionMiddleware']
)
# Where to send the user if there's an error during social authentication
# and we cannot send them to a more specific URL
# (see middleware.ExceptionMiddleware).
django_settings.SOCIAL_AUTH_LOGIN_ERROR_URL = '/'
# Where to send the user once social authentication is successful.
django_settings.SOCIAL_AUTH_LOGIN_REDIRECT_URL = '/dashboard'
# Disable sanitizing of redirect urls in social-auth since the platform
# already does its own sanitization via the LOGIN_REDIRECT_WHITELIST setting.
django_settings.SOCIAL_AUTH_SANITIZE_REDIRECTS = False
# Adding extra key value pair in the url query string for microsoft as per request
django_settings.SOCIAL_AUTH_AZUREAD_OAUTH2_AUTH_EXTRA_ARGUMENTS = {'msafed': 0}
# Inject our customized auth pipeline. All auth backends must work with
# this pipeline.
django_settings.SOCIAL_AUTH_PIPELINE = [
'third_party_auth.pipeline.parse_query_params',
'social_core.pipeline.social_auth.social_details',
'social_core.pipeline.social_auth.social_uid',
'social_core.pipeline.social_auth.auth_allowed',
'social_core.pipeline.social_auth.social_user',
'third_party_auth.pipeline.associate_by_email_if_login_api',
'social_core.pipeline.user.get_username',
'third_party_auth.pipeline.set_pipeline_timeout',
'third_party_auth.pipeline.ensure_user_information',
'social_core.pipeline.user.create_user',
'social_core.pipeline.social_auth.associate_user',
'social_core.pipeline.social_auth.load_extra_data',
'social_core.pipeline.user.user_details',
'third_party_auth.pipeline.user_details_force_sync',
'third_party_auth.pipeline.set_id_verification_status',
'third_party_auth.pipeline.set_logged_in_cookies',
'third_party_auth.pipeline.login_analytics',
]
# Add enterprise pipeline elements if the enterprise app is installed
insert_enterprise_pipeline_elements(django_settings.SOCIAL_AUTH_PIPELINE)
# Required so that we can use unmodified PSA OAuth2 backends:
django_settings.SOCIAL_AUTH_STRATEGY = 'third_party_auth.strategy.ConfigurationModelStrategy'
# We let the user specify their email address during signup.
django_settings.SOCIAL_AUTH_PROTECTED_USER_FIELDS = ['email']
# Disable exceptions by default for prod so you get redirect behavior
# instead of a Django error page. During development you may want to
# enable this when you want to get stack traces rather than redirections.
django_settings.SOCIAL_AUTH_RAISE_EXCEPTIONS = False
# Allow users to login using social auth even if their account is not verified yet
# This is required since we [ab]use django's 'is_active' flag to indicate verified
# accounts; without this set to True, python-social-auth won't allow us to link the
# user's account to the third party account during registration (since the user is
# not verified at that point).
# We also generally allow unverified third party auth users to login (see the logic
# in ensure_user_information in pipeline.py) because otherwise users who use social
# auth to register with an invalid email address can become "stuck".
# TODO: Remove the following if/when email validation is separated from the is_active flag.
django_settings.SOCIAL_AUTH_INACTIVE_USER_LOGIN = True
django_settings.SOCIAL_AUTH_INACTIVE_USER_URL = '/auth/inactive'
# Context processors required under Django.
django_settings.SOCIAL_AUTH_UUID_LENGTH = 4
django_settings.DEFAULT_TEMPLATE_ENGINE['OPTIONS']['context_processors'] += (
'social_django.context_processors.backends',
'social_django.context_processors.login_redirect',
)
| agpl-3.0 |
Kiiv/CouchPotatoServer | libs/pyasn1/codec/ber/encoder.py | 185 | 13626 | # BER encoder
from pyasn1.type import base, tag, univ, char, useful
from pyasn1.codec.ber import eoo
from pyasn1.compat.octets import int2oct, oct2int, ints2octs, null, str2octs
from pyasn1 import debug, error
class Error(Exception): pass
class AbstractItemEncoder:
supportIndefLenMode = 1
def encodeTag(self, t, isConstructed):
tagClass, tagFormat, tagId = t.asTuple() # this is a hotspot
v = tagClass | tagFormat
if isConstructed:
v = v|tag.tagFormatConstructed
if tagId < 31:
return int2oct(v|tagId)
else:
s = int2oct(tagId&0x7f)
tagId = tagId >> 7
while tagId:
s = int2oct(0x80|(tagId&0x7f)) + s
tagId = tagId >> 7
return int2oct(v|0x1F) + s
def encodeLength(self, length, defMode):
if not defMode and self.supportIndefLenMode:
return int2oct(0x80)
if length < 0x80:
return int2oct(length)
else:
substrate = null
while length:
substrate = int2oct(length&0xff) + substrate
length = length >> 8
substrateLen = len(substrate)
if substrateLen > 126:
raise Error('Length octets overflow (%d)' % substrateLen)
return int2oct(0x80 | substrateLen) + substrate
def encodeValue(self, encodeFun, value, defMode, maxChunkSize):
raise Error('Not implemented')
def _encodeEndOfOctets(self, encodeFun, defMode):
if defMode or not self.supportIndefLenMode:
return null
else:
return encodeFun(eoo.endOfOctets, defMode)
def encode(self, encodeFun, value, defMode, maxChunkSize):
substrate, isConstructed = self.encodeValue(
encodeFun, value, defMode, maxChunkSize
)
tagSet = value.getTagSet()
if tagSet:
if not isConstructed: # primitive form implies definite mode
defMode = 1
return self.encodeTag(
tagSet[-1], isConstructed
) + self.encodeLength(
len(substrate), defMode
) + substrate + self._encodeEndOfOctets(encodeFun, defMode)
else:
return substrate # untagged value
class EndOfOctetsEncoder(AbstractItemEncoder):
def encodeValue(self, encodeFun, value, defMode, maxChunkSize):
return null, 0
class ExplicitlyTaggedItemEncoder(AbstractItemEncoder):
def encodeValue(self, encodeFun, value, defMode, maxChunkSize):
if isinstance(value, base.AbstractConstructedAsn1Item):
value = value.clone(tagSet=value.getTagSet()[:-1],
cloneValueFlag=1)
else:
value = value.clone(tagSet=value.getTagSet()[:-1])
return encodeFun(value, defMode, maxChunkSize), 1
explicitlyTaggedItemEncoder = ExplicitlyTaggedItemEncoder()
class BooleanEncoder(AbstractItemEncoder):
supportIndefLenMode = 0
_true = ints2octs((1,))
_false = ints2octs((0,))
def encodeValue(self, encodeFun, value, defMode, maxChunkSize):
return value and self._true or self._false, 0
class IntegerEncoder(AbstractItemEncoder):
supportIndefLenMode = 0
supportCompactZero = False
def encodeValue(self, encodeFun, value, defMode, maxChunkSize):
if value == 0: # shortcut for zero value
if self.supportCompactZero:
# this seems to be a correct way for encoding zeros
return null, 0
else:
# this seems to be a widespread way for encoding zeros
return ints2octs((0,)), 0
octets = []
value = int(value) # to save on ops on asn1 type
while 1:
octets.insert(0, value & 0xff)
if value == 0 or value == -1:
break
value = value >> 8
if value == 0 and octets[0] & 0x80:
octets.insert(0, 0)
while len(octets) > 1 and \
(octets[0] == 0 and octets[1] & 0x80 == 0 or \
octets[0] == 0xff and octets[1] & 0x80 != 0):
del octets[0]
return ints2octs(octets), 0
class BitStringEncoder(AbstractItemEncoder):
def encodeValue(self, encodeFun, value, defMode, maxChunkSize):
if not maxChunkSize or len(value) <= maxChunkSize*8:
r = {}; l = len(value); p = 0; j = 7
while p < l:
i, j = divmod(p, 8)
r[i] = r.get(i,0) | value[p]<<(7-j)
p = p + 1
keys = list(r); keys.sort()
return int2oct(7-j) + ints2octs([r[k] for k in keys]), 0
else:
pos = 0; substrate = null
while 1:
# count in octets
v = value.clone(value[pos*8:pos*8+maxChunkSize*8])
if not v:
break
substrate = substrate + encodeFun(v, defMode, maxChunkSize)
pos = pos + maxChunkSize
return substrate, 1
class OctetStringEncoder(AbstractItemEncoder):
def encodeValue(self, encodeFun, value, defMode, maxChunkSize):
if not maxChunkSize or len(value) <= maxChunkSize:
return value.asOctets(), 0
else:
pos = 0; substrate = null
while 1:
v = value.clone(value[pos:pos+maxChunkSize])
if not v:
break
substrate = substrate + encodeFun(v, defMode, maxChunkSize)
pos = pos + maxChunkSize
return substrate, 1
class NullEncoder(AbstractItemEncoder):
supportIndefLenMode = 0
def encodeValue(self, encodeFun, value, defMode, maxChunkSize):
return null, 0
class ObjectIdentifierEncoder(AbstractItemEncoder):
supportIndefLenMode = 0
precomputedValues = {
(1, 3, 6, 1, 2): (43, 6, 1, 2),
(1, 3, 6, 1, 4): (43, 6, 1, 4)
}
def encodeValue(self, encodeFun, value, defMode, maxChunkSize):
oid = value.asTuple()
if oid[:5] in self.precomputedValues:
octets = self.precomputedValues[oid[:5]]
index = 5
else:
if len(oid) < 2:
raise error.PyAsn1Error('Short OID %s' % (value,))
# Build the first twos
if oid[0] > 6 or oid[1] > 39 or oid[0] == 6 and oid[1] > 15:
raise error.PyAsn1Error(
'Initial sub-ID overflow %s in OID %s' % (oid[:2], value)
)
octets = (oid[0] * 40 + oid[1],)
index = 2
# Cycle through subids
for subid in oid[index:]:
if subid > -1 and subid < 128:
# Optimize for the common case
octets = octets + (subid & 0x7f,)
elif subid < 0 or subid > 0xFFFFFFFF:
raise error.PyAsn1Error(
'SubId overflow %s in %s' % (subid, value)
)
else:
# Pack large Sub-Object IDs
res = (subid & 0x7f,)
subid = subid >> 7
while subid > 0:
res = (0x80 | (subid & 0x7f),) + res
subid = subid >> 7
# Add packed Sub-Object ID to resulted Object ID
octets += res
return ints2octs(octets), 0
class RealEncoder(AbstractItemEncoder):
supportIndefLenMode = 0
def encodeValue(self, encodeFun, value, defMode, maxChunkSize):
if value.isPlusInfinity():
return int2oct(0x40), 0
if value.isMinusInfinity():
return int2oct(0x41), 0
m, b, e = value
if not m:
return null, 0
if b == 10:
return str2octs('\x03%dE%s%d' % (m, e == 0 and '+' or '', e)), 0
elif b == 2:
fo = 0x80 # binary enoding
if m < 0:
fo = fo | 0x40 # sign bit
m = -m
while int(m) != m: # drop floating point
m *= 2
e -= 1
while m & 0x1 == 0: # mantissa normalization
m >>= 1
e += 1
eo = null
while e not in (0, -1):
eo = int2oct(e&0xff) + eo
e >>= 8
if e == 0 and eo and oct2int(eo[0]) & 0x80:
eo = int2oct(0) + eo
n = len(eo)
if n > 0xff:
raise error.PyAsn1Error('Real exponent overflow')
if n == 1:
pass
elif n == 2:
fo |= 1
elif n == 3:
fo |= 2
else:
fo |= 3
eo = int2oct(n//0xff+1) + eo
po = null
while m:
po = int2oct(m&0xff) + po
m >>= 8
substrate = int2oct(fo) + eo + po
return substrate, 0
else:
raise error.PyAsn1Error('Prohibited Real base %s' % b)
class SequenceEncoder(AbstractItemEncoder):
def encodeValue(self, encodeFun, value, defMode, maxChunkSize):
value.setDefaultComponents()
value.verifySizeSpec()
substrate = null; idx = len(value)
while idx > 0:
idx = idx - 1
if value[idx] is None: # Optional component
continue
component = value.getDefaultComponentByPosition(idx)
if component is not None and component == value[idx]:
continue
substrate = encodeFun(
value[idx], defMode, maxChunkSize
) + substrate
return substrate, 1
class SequenceOfEncoder(AbstractItemEncoder):
def encodeValue(self, encodeFun, value, defMode, maxChunkSize):
value.verifySizeSpec()
substrate = null; idx = len(value)
while idx > 0:
idx = idx - 1
substrate = encodeFun(
value[idx], defMode, maxChunkSize
) + substrate
return substrate, 1
class ChoiceEncoder(AbstractItemEncoder):
def encodeValue(self, encodeFun, value, defMode, maxChunkSize):
return encodeFun(value.getComponent(), defMode, maxChunkSize), 1
class AnyEncoder(OctetStringEncoder):
def encodeValue(self, encodeFun, value, defMode, maxChunkSize):
return value.asOctets(), defMode == 0
tagMap = {
eoo.endOfOctets.tagSet: EndOfOctetsEncoder(),
univ.Boolean.tagSet: BooleanEncoder(),
univ.Integer.tagSet: IntegerEncoder(),
univ.BitString.tagSet: BitStringEncoder(),
univ.OctetString.tagSet: OctetStringEncoder(),
univ.Null.tagSet: NullEncoder(),
univ.ObjectIdentifier.tagSet: ObjectIdentifierEncoder(),
univ.Enumerated.tagSet: IntegerEncoder(),
univ.Real.tagSet: RealEncoder(),
# Sequence & Set have same tags as SequenceOf & SetOf
univ.SequenceOf.tagSet: SequenceOfEncoder(),
univ.SetOf.tagSet: SequenceOfEncoder(),
univ.Choice.tagSet: ChoiceEncoder(),
# character string types
char.UTF8String.tagSet: OctetStringEncoder(),
char.NumericString.tagSet: OctetStringEncoder(),
char.PrintableString.tagSet: OctetStringEncoder(),
char.TeletexString.tagSet: OctetStringEncoder(),
char.VideotexString.tagSet: OctetStringEncoder(),
char.IA5String.tagSet: OctetStringEncoder(),
char.GraphicString.tagSet: OctetStringEncoder(),
char.VisibleString.tagSet: OctetStringEncoder(),
char.GeneralString.tagSet: OctetStringEncoder(),
char.UniversalString.tagSet: OctetStringEncoder(),
char.BMPString.tagSet: OctetStringEncoder(),
# useful types
useful.GeneralizedTime.tagSet: OctetStringEncoder(),
useful.UTCTime.tagSet: OctetStringEncoder()
}
# Type-to-codec map for ambiguous ASN.1 types
typeMap = {
univ.Set.typeId: SequenceEncoder(),
univ.SetOf.typeId: SequenceOfEncoder(),
univ.Sequence.typeId: SequenceEncoder(),
univ.SequenceOf.typeId: SequenceOfEncoder(),
univ.Choice.typeId: ChoiceEncoder(),
univ.Any.typeId: AnyEncoder()
}
class Encoder:
def __init__(self, tagMap, typeMap={}):
self.__tagMap = tagMap
self.__typeMap = typeMap
def __call__(self, value, defMode=1, maxChunkSize=0):
debug.logger & debug.flagEncoder and debug.logger('encoder called in %sdef mode, chunk size %s for type %s, value:\n%s' % (not defMode and 'in' or '', maxChunkSize, value.__class__.__name__, value.prettyPrint()))
tagSet = value.getTagSet()
if len(tagSet) > 1:
concreteEncoder = explicitlyTaggedItemEncoder
else:
if value.typeId is not None and value.typeId in self.__typeMap:
concreteEncoder = self.__typeMap[value.typeId]
elif tagSet in self.__tagMap:
concreteEncoder = self.__tagMap[tagSet]
else:
tagSet = value.baseTagSet
if tagSet in self.__tagMap:
concreteEncoder = self.__tagMap[tagSet]
else:
raise Error('No encoder for %s' % (value,))
debug.logger & debug.flagEncoder and debug.logger('using value codec %s chosen by %r' % (concreteEncoder.__class__.__name__, tagSet))
substrate = concreteEncoder.encode(
self, value, defMode, maxChunkSize
)
debug.logger & debug.flagEncoder and debug.logger('built %s octets of substrate: %s\nencoder completed' % (len(substrate), debug.hexdump(substrate)))
return substrate
encode = Encoder(tagMap, typeMap)
| gpl-3.0 |
tuxfux-hlp-notes/python-batches | archieves/batch-57/modules/sheets/lib/python2.7/site-packages/pip/compat/dictconfig.py | 921 | 23096 | # This is a copy of the Python logging.config.dictconfig module,
# reproduced with permission. It is provided here for backwards
# compatibility for Python versions prior to 2.7.
#
# Copyright 2009-2010 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from __future__ import absolute_import
import logging.handlers
import re
import sys
import types
from pip._vendor import six
# flake8: noqa
IDENTIFIER = re.compile('^[a-z_][a-z0-9_]*$', re.I)
def valid_ident(s):
m = IDENTIFIER.match(s)
if not m:
raise ValueError('Not a valid Python identifier: %r' % s)
return True
#
# This function is defined in logging only in recent versions of Python
#
try:
from logging import _checkLevel
except ImportError:
def _checkLevel(level):
if isinstance(level, int):
rv = level
elif str(level) == level:
if level not in logging._levelNames:
raise ValueError('Unknown level: %r' % level)
rv = logging._levelNames[level]
else:
raise TypeError('Level not an integer or a '
'valid string: %r' % level)
return rv
# The ConvertingXXX classes are wrappers around standard Python containers,
# and they serve to convert any suitable values in the container. The
# conversion converts base dicts, lists and tuples to their wrapped
# equivalents, whereas strings which match a conversion format are converted
# appropriately.
#
# Each wrapper should have a configurator attribute holding the actual
# configurator to use for conversion.
class ConvertingDict(dict):
"""A converting dictionary wrapper."""
def __getitem__(self, key):
value = dict.__getitem__(self, key)
result = self.configurator.convert(value)
# If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def get(self, key, default=None):
value = dict.get(self, key, default)
result = self.configurator.convert(value)
# If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def pop(self, key, default=None):
value = dict.pop(self, key, default)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
class ConvertingList(list):
"""A converting list wrapper."""
def __getitem__(self, key):
value = list.__getitem__(self, key)
result = self.configurator.convert(value)
# If the converted value is different, save for next time
if value is not result:
self[key] = result
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
def pop(self, idx=-1):
value = list.pop(self, idx)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
return result
class ConvertingTuple(tuple):
"""A converting tuple wrapper."""
def __getitem__(self, key):
value = tuple.__getitem__(self, key)
result = self.configurator.convert(value)
if value is not result:
if type(result) in (ConvertingDict, ConvertingList,
ConvertingTuple):
result.parent = self
result.key = key
return result
class BaseConfigurator(object):
"""
The configurator base class which defines some useful defaults.
"""
CONVERT_PATTERN = re.compile(r'^(?P<prefix>[a-z]+)://(?P<suffix>.*)$')
WORD_PATTERN = re.compile(r'^\s*(\w+)\s*')
DOT_PATTERN = re.compile(r'^\.\s*(\w+)\s*')
INDEX_PATTERN = re.compile(r'^\[\s*(\w+)\s*\]\s*')
DIGIT_PATTERN = re.compile(r'^\d+$')
value_converters = {
'ext' : 'ext_convert',
'cfg' : 'cfg_convert',
}
# We might want to use a different one, e.g. importlib
importer = __import__
def __init__(self, config):
self.config = ConvertingDict(config)
self.config.configurator = self
def resolve(self, s):
"""
Resolve strings to objects using standard import and attribute
syntax.
"""
name = s.split('.')
used = name.pop(0)
try:
found = self.importer(used)
for frag in name:
used += '.' + frag
try:
found = getattr(found, frag)
except AttributeError:
self.importer(used)
found = getattr(found, frag)
return found
except ImportError:
e, tb = sys.exc_info()[1:]
v = ValueError('Cannot resolve %r: %s' % (s, e))
v.__cause__, v.__traceback__ = e, tb
raise v
def ext_convert(self, value):
"""Default converter for the ext:// protocol."""
return self.resolve(value)
def cfg_convert(self, value):
"""Default converter for the cfg:// protocol."""
rest = value
m = self.WORD_PATTERN.match(rest)
if m is None:
raise ValueError("Unable to convert %r" % value)
else:
rest = rest[m.end():]
d = self.config[m.groups()[0]]
# print d, rest
while rest:
m = self.DOT_PATTERN.match(rest)
if m:
d = d[m.groups()[0]]
else:
m = self.INDEX_PATTERN.match(rest)
if m:
idx = m.groups()[0]
if not self.DIGIT_PATTERN.match(idx):
d = d[idx]
else:
try:
n = int(idx) # try as number first (most likely)
d = d[n]
except TypeError:
d = d[idx]
if m:
rest = rest[m.end():]
else:
raise ValueError('Unable to convert '
'%r at %r' % (value, rest))
# rest should be empty
return d
def convert(self, value):
"""
Convert values to an appropriate type. dicts, lists and tuples are
replaced by their converting alternatives. Strings are checked to
see if they have a conversion format and are converted if they do.
"""
if not isinstance(value, ConvertingDict) and isinstance(value, dict):
value = ConvertingDict(value)
value.configurator = self
elif not isinstance(value, ConvertingList) and isinstance(value, list):
value = ConvertingList(value)
value.configurator = self
elif not isinstance(value, ConvertingTuple) and\
isinstance(value, tuple):
value = ConvertingTuple(value)
value.configurator = self
elif isinstance(value, six.string_types): # str for py3k
m = self.CONVERT_PATTERN.match(value)
if m:
d = m.groupdict()
prefix = d['prefix']
converter = self.value_converters.get(prefix, None)
if converter:
suffix = d['suffix']
converter = getattr(self, converter)
value = converter(suffix)
return value
def configure_custom(self, config):
"""Configure an object with a user-supplied factory."""
c = config.pop('()')
if not hasattr(c, '__call__') and hasattr(types, 'ClassType') and type(c) != types.ClassType:
c = self.resolve(c)
props = config.pop('.', None)
# Check for valid identifiers
kwargs = dict((k, config[k]) for k in config if valid_ident(k))
result = c(**kwargs)
if props:
for name, value in props.items():
setattr(result, name, value)
return result
def as_tuple(self, value):
"""Utility function which converts lists to tuples."""
if isinstance(value, list):
value = tuple(value)
return value
class DictConfigurator(BaseConfigurator):
"""
Configure logging using a dictionary-like object to describe the
configuration.
"""
def configure(self):
"""Do the configuration."""
config = self.config
if 'version' not in config:
raise ValueError("dictionary doesn't specify a version")
if config['version'] != 1:
raise ValueError("Unsupported version: %s" % config['version'])
incremental = config.pop('incremental', False)
EMPTY_DICT = {}
logging._acquireLock()
try:
if incremental:
handlers = config.get('handlers', EMPTY_DICT)
# incremental handler config only if handler name
# ties in to logging._handlers (Python 2.7)
if sys.version_info[:2] == (2, 7):
for name in handlers:
if name not in logging._handlers:
raise ValueError('No handler found with '
'name %r' % name)
else:
try:
handler = logging._handlers[name]
handler_config = handlers[name]
level = handler_config.get('level', None)
if level:
handler.setLevel(_checkLevel(level))
except StandardError as e:
raise ValueError('Unable to configure handler '
'%r: %s' % (name, e))
loggers = config.get('loggers', EMPTY_DICT)
for name in loggers:
try:
self.configure_logger(name, loggers[name], True)
except StandardError as e:
raise ValueError('Unable to configure logger '
'%r: %s' % (name, e))
root = config.get('root', None)
if root:
try:
self.configure_root(root, True)
except StandardError as e:
raise ValueError('Unable to configure root '
'logger: %s' % e)
else:
disable_existing = config.pop('disable_existing_loggers', True)
logging._handlers.clear()
del logging._handlerList[:]
# Do formatters first - they don't refer to anything else
formatters = config.get('formatters', EMPTY_DICT)
for name in formatters:
try:
formatters[name] = self.configure_formatter(
formatters[name])
except StandardError as e:
raise ValueError('Unable to configure '
'formatter %r: %s' % (name, e))
# Next, do filters - they don't refer to anything else, either
filters = config.get('filters', EMPTY_DICT)
for name in filters:
try:
filters[name] = self.configure_filter(filters[name])
except StandardError as e:
raise ValueError('Unable to configure '
'filter %r: %s' % (name, e))
# Next, do handlers - they refer to formatters and filters
# As handlers can refer to other handlers, sort the keys
# to allow a deterministic order of configuration
handlers = config.get('handlers', EMPTY_DICT)
for name in sorted(handlers):
try:
handler = self.configure_handler(handlers[name])
handler.name = name
handlers[name] = handler
except StandardError as e:
raise ValueError('Unable to configure handler '
'%r: %s' % (name, e))
# Next, do loggers - they refer to handlers and filters
# we don't want to lose the existing loggers,
# since other threads may have pointers to them.
# existing is set to contain all existing loggers,
# and as we go through the new configuration we
# remove any which are configured. At the end,
# what's left in existing is the set of loggers
# which were in the previous configuration but
# which are not in the new configuration.
root = logging.root
existing = list(root.manager.loggerDict)
# The list needs to be sorted so that we can
# avoid disabling child loggers of explicitly
# named loggers. With a sorted list it is easier
# to find the child loggers.
existing.sort()
# We'll keep the list of existing loggers
# which are children of named loggers here...
child_loggers = []
# now set up the new ones...
loggers = config.get('loggers', EMPTY_DICT)
for name in loggers:
if name in existing:
i = existing.index(name)
prefixed = name + "."
pflen = len(prefixed)
num_existing = len(existing)
i = i + 1 # look at the entry after name
while (i < num_existing) and\
(existing[i][:pflen] == prefixed):
child_loggers.append(existing[i])
i = i + 1
existing.remove(name)
try:
self.configure_logger(name, loggers[name])
except StandardError as e:
raise ValueError('Unable to configure logger '
'%r: %s' % (name, e))
# Disable any old loggers. There's no point deleting
# them as other threads may continue to hold references
# and by disabling them, you stop them doing any logging.
# However, don't disable children of named loggers, as that's
# probably not what was intended by the user.
for log in existing:
logger = root.manager.loggerDict[log]
if log in child_loggers:
logger.level = logging.NOTSET
logger.handlers = []
logger.propagate = True
elif disable_existing:
logger.disabled = True
# And finally, do the root logger
root = config.get('root', None)
if root:
try:
self.configure_root(root)
except StandardError as e:
raise ValueError('Unable to configure root '
'logger: %s' % e)
finally:
logging._releaseLock()
def configure_formatter(self, config):
"""Configure a formatter from a dictionary."""
if '()' in config:
factory = config['()'] # for use in exception handler
try:
result = self.configure_custom(config)
except TypeError as te:
if "'format'" not in str(te):
raise
# Name of parameter changed from fmt to format.
# Retry with old name.
# This is so that code can be used with older Python versions
#(e.g. by Django)
config['fmt'] = config.pop('format')
config['()'] = factory
result = self.configure_custom(config)
else:
fmt = config.get('format', None)
dfmt = config.get('datefmt', None)
result = logging.Formatter(fmt, dfmt)
return result
def configure_filter(self, config):
"""Configure a filter from a dictionary."""
if '()' in config:
result = self.configure_custom(config)
else:
name = config.get('name', '')
result = logging.Filter(name)
return result
def add_filters(self, filterer, filters):
"""Add filters to a filterer from a list of names."""
for f in filters:
try:
filterer.addFilter(self.config['filters'][f])
except StandardError as e:
raise ValueError('Unable to add filter %r: %s' % (f, e))
def configure_handler(self, config):
"""Configure a handler from a dictionary."""
formatter = config.pop('formatter', None)
if formatter:
try:
formatter = self.config['formatters'][formatter]
except StandardError as e:
raise ValueError('Unable to set formatter '
'%r: %s' % (formatter, e))
level = config.pop('level', None)
filters = config.pop('filters', None)
if '()' in config:
c = config.pop('()')
if not hasattr(c, '__call__') and hasattr(types, 'ClassType') and type(c) != types.ClassType:
c = self.resolve(c)
factory = c
else:
klass = self.resolve(config.pop('class'))
# Special case for handler which refers to another handler
if issubclass(klass, logging.handlers.MemoryHandler) and\
'target' in config:
try:
config['target'] = self.config['handlers'][config['target']]
except StandardError as e:
raise ValueError('Unable to set target handler '
'%r: %s' % (config['target'], e))
elif issubclass(klass, logging.handlers.SMTPHandler) and\
'mailhost' in config:
config['mailhost'] = self.as_tuple(config['mailhost'])
elif issubclass(klass, logging.handlers.SysLogHandler) and\
'address' in config:
config['address'] = self.as_tuple(config['address'])
factory = klass
kwargs = dict((k, config[k]) for k in config if valid_ident(k))
try:
result = factory(**kwargs)
except TypeError as te:
if "'stream'" not in str(te):
raise
# The argument name changed from strm to stream
# Retry with old name.
# This is so that code can be used with older Python versions
#(e.g. by Django)
kwargs['strm'] = kwargs.pop('stream')
result = factory(**kwargs)
if formatter:
result.setFormatter(formatter)
if level is not None:
result.setLevel(_checkLevel(level))
if filters:
self.add_filters(result, filters)
return result
def add_handlers(self, logger, handlers):
"""Add handlers to a logger from a list of names."""
for h in handlers:
try:
logger.addHandler(self.config['handlers'][h])
except StandardError as e:
raise ValueError('Unable to add handler %r: %s' % (h, e))
def common_logger_config(self, logger, config, incremental=False):
"""
Perform configuration which is common to root and non-root loggers.
"""
level = config.get('level', None)
if level is not None:
logger.setLevel(_checkLevel(level))
if not incremental:
# Remove any existing handlers
for h in logger.handlers[:]:
logger.removeHandler(h)
handlers = config.get('handlers', None)
if handlers:
self.add_handlers(logger, handlers)
filters = config.get('filters', None)
if filters:
self.add_filters(logger, filters)
def configure_logger(self, name, config, incremental=False):
"""Configure a non-root logger from a dictionary."""
logger = logging.getLogger(name)
self.common_logger_config(logger, config, incremental)
propagate = config.get('propagate', None)
if propagate is not None:
logger.propagate = propagate
def configure_root(self, config, incremental=False):
"""Configure a root logger from a dictionary."""
root = logging.getLogger()
self.common_logger_config(root, config, incremental)
dictConfigClass = DictConfigurator
def dictConfig(config):
"""Configure logging using a dictionary."""
dictConfigClass(config).configure()
| gpl-3.0 |
luofei98/qgis | python/plugins/processing/modeler/ModelerDialog.py | 1 | 24658 | # -*- coding: utf-8 -*-
"""
***************************************************************************
ModelerDialog.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
from processing.modeler.WrongModelException import WrongModelException
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import codecs
import json
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from processing.core.ProcessingConfig import ProcessingConfig
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.gui.HelpEditionDialog import HelpEditionDialog
from processing.gui.ParametersDialog import ParametersDialog
from processing.gui.AlgorithmClassification import AlgorithmDecorator
from processing.modeler.ModelerParameterDefinitionDialog import ModelerParameterDefinitionDialog
from processing.modeler.ModelerAlgorithm import ModelerAlgorithm, ModelerParameter
from processing.modeler.ModelerParametersDialog import ModelerParametersDialog
from processing.modeler.ModelerUtils import ModelerUtils
from processing.modeler.ModelerScene import ModelerScene
from processing.core.ProcessingLog import ProcessingLog
from processing.tools.system import *
from processing.ui.ui_DlgModeler import Ui_DlgModeler
class ModelerDialog(QDialog, Ui_DlgModeler):
USE_CATEGORIES = '/Processing/UseSimplifiedInterface'
CANVAS_SIZE = 4000
def __init__(self, alg=None):
QDialog.__init__(self)
self.setupUi(self)
self.zoom = 1
self.setWindowFlags(Qt.WindowMinimizeButtonHint |
Qt.WindowMaximizeButtonHint |
Qt.WindowCloseButtonHint)
self.tabWidget.setCurrentIndex(0)
self.scene = ModelerScene(self)
self.scene.setSceneRect(QRectF(0, 0, self.CANVAS_SIZE, self.CANVAS_SIZE))
self.view.setScene(self.scene)
self.view.setAcceptDrops(True)
self.view.ensureVisible(0, 0, 10, 10)
def _dragEnterEvent(event):
if event.mimeData().hasText():
event.acceptProposedAction()
else:
event.ignore()
def _dropEvent(event):
if event.mimeData().hasText():
text = event.mimeData().text()
if text in ModelerParameterDefinitionDialog.paramTypes:
self.addInputOfType(text, event.pos())
else:
alg = ModelerUtils.getAlgorithm(text);
if alg is not None:
self._addAlgorithm(alg.getCopy(), event.pos())
event.accept()
else:
event.ignore()
def _dragMoveEvent(event):
if event.mimeData().hasText():
event.accept()
else:
event.ignore()
def _wheelEvent(event):
self.view.setTransformationAnchor(QGraphicsView.AnchorUnderMouse);
factor = 1.05
if event.delta() > 0:
factor = 1/factor
self.view.scale(factor, factor)
self.view.centerOn(event.pos().x(), event.pos().y())
self.repaintModel()
def _enterEvent(e):
QGraphicsView.enterEvent(self.view, e)
self.view.viewport().setCursor(Qt.ArrowCursor)
def _mousePressEvent(e):
QGraphicsView.mousePressEvent(self.view, e)
self.view.viewport().setCursor(Qt.ArrowCursor)
def _mouseReleaseEvent(e):
QGraphicsView.mouseReleaseEvent(self.view, e)
self.view.viewport().setCursor(Qt.ArrowCursor)
self.view.setDragMode(QGraphicsView.ScrollHandDrag);
self.view.dragEnterEvent = _dragEnterEvent
self.view.dropEvent = _dropEvent
self.view.dragMoveEvent = _dragMoveEvent
self.view.wheelEvent = _wheelEvent
self.view.enterEvent = _enterEvent
self.view.mousePressEvent = _mousePressEvent
self.view.mouseReleaseEvent = _mouseReleaseEvent
def _mimeDataInput(items):
mimeData = QMimeData()
text = items[0].text(0)
mimeData.setText(text)
return mimeData
self.inputsTree.mimeData = _mimeDataInput
self.inputsTree.setDragDropMode(QTreeWidget.DragOnly)
self.inputsTree.setDropIndicatorShown(True)
def _mimeDataAlgorithm(items):
item = items[0]
if isinstance(item, TreeAlgorithmItem):
mimeData = QMimeData()
mimeData.setText(item.alg.commandLineName())
return mimeData
self.algorithmTree.mimeData = _mimeDataAlgorithm
self.algorithmTree.setDragDropMode(QTreeWidget.DragOnly)
self.algorithmTree.setDropIndicatorShown(True)
# Set icons
self.btnOpen.setIcon(
QgsApplication.getThemeIcon('/mActionFileOpen.svg'))
self.btnSave.setIcon(
QgsApplication.getThemeIcon('/mActionFileSave.svg'))
self.btnSaveAs.setIcon(
QgsApplication.getThemeIcon('/mActionFileSaveAs.svg'))
self.btnExportImage.setIcon(
QgsApplication.getThemeIcon('/mActionSaveMapAsImage.png'))
self.btnEditHelp.setIcon(QIcon(':/processing/images/edithelp.png'))
self.btnRun.setIcon(QIcon(':/processing/images/runalgorithm.png'))
# Fill trees with inputs and algorithms
self.fillInputsTree()
self.fillAlgorithmTree()
if hasattr(self.searchBox, 'setPlaceholderText'):
self.searchBox.setPlaceholderText(self.tr('Search...'))
if hasattr(self.textName, 'setPlaceholderText'):
self.textName.setPlaceholderText('[Enter model name here]')
if hasattr(self.textGroup, 'setPlaceholderText'):
self.textGroup.setPlaceholderText('[Enter group name here]')
# Connect signals and slots
self.inputsTree.doubleClicked.connect(self.addInput)
self.searchBox.textChanged.connect(self.fillAlgorithmTree)
self.algorithmTree.doubleClicked.connect(self.addAlgorithm)
self.btnOpen.clicked.connect(self.openModel)
self.btnSave.clicked.connect(self.save)
self.btnSaveAs.clicked.connect(self.saveAs)
self.btnExportImage.clicked.connect(self.exportAsImage)
self.btnEditHelp.clicked.connect(self.editHelp)
self.btnRun.clicked.connect(self.runModel)
if alg is not None:
self.alg = alg
self.textGroup.setText(alg.group)
self.textName.setText(alg.name)
self.repaintModel()
else:
self.alg = ModelerAlgorithm()
self.alg.modelerdialog = self
self.view.centerOn(0, 0)
self.alg.setModelerView(self)
self.help = None
# Indicates whether to update or not the toolbox after
# closing this dialog
self.update = False
self.hasChanged = False
def closeEvent(self, evt):
if self.hasChanged:
ret = QMessageBox.question(self, self.tr('Message'),
self.tr('There are unsaved changes in model. Close '
'modeler without saving?'),
QMessageBox.Yes | QMessageBox.No,
QMessageBox.No)
if ret == QMessageBox.Yes:
evt.accept()
else:
evt.ignore()
else:
evt.accept()
def editHelp(self):
dlg = HelpEditionDialog(self.alg.getCopy())
dlg.exec_()
def runModel(self):
if len(self.alg.algs) == 0:
QMessageBox.warning(self, self.tr('Empty model'),
self.tr("Model doesn't contains any algorithms and/or "
"parameters and can't be executed"))
return
if self.alg.provider is None:
# Might happen if model is opened from modeler dialog
self.alg.provider = ModelerUtils.providers['model']
alg = self.alg.getCopy()
dlg = ParametersDialog(alg)
dlg.exec_()
def save(self):
self.saveModel(False)
def saveAs(self):
self.saveModel(True)
def exportAsImage(self):
filename = unicode(QFileDialog.getSaveFileName(self,
self.tr('Save Model As Image'), '',
self.tr('PNG files (*.png *.PNG)')))
if not filename:
return
if not filename.lower().endswith('.png'):
filename += '.png'
totalRect = QRectF(0, 0, 1, 1)
for item in self.scene.items():
totalRect = totalRect.united(item.sceneBoundingRect())
totalRect.adjust(-10, -10, 10, 10)
img = QImage(totalRect.width(), totalRect.height(),
QImage.Format_ARGB32_Premultiplied)
img.fill(Qt.white)
painter = QPainter()
painter.setRenderHint(QPainter.Antialiasing)
painter.begin(img)
self.scene.render(painter, totalRect, totalRect)
painter.end()
img.save(filename)
def saveModel(self, saveAs):
if unicode(self.textGroup.text()).strip() == '' \
or unicode(self.textName.text()).strip() == '':
QMessageBox.warning(self, self.tr('Warning'),
self.tr('Please enter group and model names before saving'
))
return
self.alg.name = unicode(self.textName.text())
self.alg.group = unicode(self.textGroup.text())
if self.alg.descriptionFile is not None and not saveAs:
filename = self.alg.descriptionFile
else:
filename = unicode(QFileDialog.getSaveFileName(self,
self.tr('Save Model'),
ModelerUtils.modelsFolder(),
self.tr('Processing models (*.model)')))
if filename:
if not filename.endswith('.model'):
filename += '.model'
self.alg.descriptionFile = filename
if filename:
text = self.alg.toJson()
try:
fout = codecs.open(filename, 'w', encoding='utf-8')
except:
if saveAs:
QMessageBox.warning(self, self.tr('I/O error'),
self.tr('Unable to save edits. Reason:\n %s')
% unicode(sys.exc_info()[1]))
else:
QMessageBox.warning(self, self.tr("Can't save model"),
self.tr("This model can't be saved in its "
"original location (probably you do not "
"have permission to do it). Please, use "
"the 'Save as...' option."))
return
fout.write(text)
fout.close()
self.update = True
QMessageBox.information(self, self.tr('Model saved'),
self.tr('Model was correctly saved.'))
self.hasChanged = False
def openModel(self):
filename = unicode(QFileDialog.getOpenFileName(self,
self.tr('Open Model'), ModelerUtils.modelsFolder(),
self.tr('Processing models (*.model)')))
if filename:
try:
alg = ModelerAlgorithm.fromFile(filename)
self.alg = alg
self.alg.setModelerView(self)
self.textGroup.setText(alg.group)
self.textName.setText(alg.name)
self.repaintModel()
self.view.centerOn(0, 0)
self.hasChanged = False
except WrongModelException, e:
ProcessingLog.addToLog(ProcessingLog.LOG_ERROR,
'Could not load model ' + filename + '\n'
+ e.msg)
QMessageBox.critical(self, self.tr('Could not open model'),
self.tr('The selected model could not be loaded.\n'
'See the log for more information.'))
except Exception, e:
ProcessingLog.addToLog(ProcessingLog.LOG_ERROR,
'Could not load model ' + filename + '\n'
+ e.args[0])
QMessageBox.critical(self, self.tr('Could not open model'),
self.tr('The selected model could not be loaded.\n'
'See the log for more information.'))
def repaintModel(self):
self.scene = ModelerScene()
self.scene.setSceneRect(QRectF(0, 0, ModelerAlgorithm.CANVAS_SIZE,
ModelerAlgorithm.CANVAS_SIZE))
self.scene.paintModel(self.alg)
self.view.setScene(self.scene)
def addInput(self):
item = self.inputsTree.currentItem()
paramType = str(item.text(0))
self.addInputOfType(paramType)
def addInputOfType(self, paramType, pos=None):
if paramType in ModelerParameterDefinitionDialog.paramTypes:
dlg = ModelerParameterDefinitionDialog(self.alg, paramType)
dlg.exec_()
if dlg.param is not None:
if pos is None:
pos = self.getPositionForParameterItem()
if isinstance(pos, QPoint):
pos = QPointF(pos)
self.alg.addParameter(ModelerParameter(dlg.param, pos))
self.repaintModel()
#self.view.ensureVisible(self.scene.getLastParameterItem())
self.hasChanged = True
def getPositionForParameterItem(self):
MARGIN = 20
BOX_WIDTH = 200
BOX_HEIGHT = 80
if self.alg.inputs:
maxX = max([i.pos.x() for i in self.alg.inputs.values()])
newX = min(MARGIN + BOX_WIDTH + maxX, self.CANVAS_SIZE - BOX_WIDTH)
else:
newX = MARGIN + BOX_WIDTH / 2
return QPointF(newX, MARGIN + BOX_HEIGHT / 2)
def fillInputsTree(self):
icon = QIcon(os.path.dirname(__file__) + '/../images/input.png')
parametersItem = QTreeWidgetItem()
parametersItem.setText(0, self.tr('Parameters'))
for paramType in ModelerParameterDefinitionDialog.paramTypes:
paramItem = QTreeWidgetItem()
paramItem.setText(0, paramType)
paramItem.setIcon(0, icon)
paramItem.setFlags(Qt.ItemIsEnabled | Qt.ItemIsSelectable | Qt.ItemIsDragEnabled)
parametersItem.addChild(paramItem)
self.inputsTree.addTopLevelItem(parametersItem)
parametersItem.setExpanded(True)
def addAlgorithm(self):
item = self.algorithmTree.currentItem()
if isinstance(item, TreeAlgorithmItem):
alg = ModelerUtils.getAlgorithm(item.alg.commandLineName())
self._addAlgorithm(alg.getCopy())
def _addAlgorithm(self, alg, pos=None):
dlg = alg.getCustomModelerParametersDialog(self.alg)
if not dlg:
dlg = ModelerParametersDialog(alg, self.alg)
dlg.exec_()
if dlg.alg is not None:
if pos is None:
dlg.alg.pos = self.getPositionForAlgorithmItem()
else:
dlg.alg.pos = pos
if isinstance(dlg.alg.pos, QPoint):
dlg.alg.pos = QPointF(pos)
from processing.modeler.ModelerGraphicItem import ModelerGraphicItem
for i, out in enumerate(dlg.alg.outputs):
dlg.alg.outputs[out].pos = dlg.alg.pos + QPointF(ModelerGraphicItem.BOX_WIDTH, (i + 1.5)
* ModelerGraphicItem.BOX_HEIGHT)
self.alg.addAlgorithm(dlg.alg)
self.repaintModel()
#self.view.ensureVisible(self.scene.getLastAlgorithmItem())
self.hasChanged = True
def getPositionForAlgorithmItem(self):
MARGIN = 20
BOX_WIDTH = 200
BOX_HEIGHT = 80
if self.alg.algs:
maxX = max([alg.pos.x() for alg in self.alg.algs.values()])
maxY = max([alg.pos.y() for alg in self.alg.algs.values()])
newX = min(MARGIN + BOX_WIDTH + maxX, self.CANVAS_SIZE - BOX_WIDTH)
newY = min(MARGIN + BOX_HEIGHT + maxY, self.CANVAS_SIZE
- BOX_HEIGHT)
else:
newX = MARGIN + BOX_WIDTH / 2
newY = MARGIN * 2 + BOX_HEIGHT + BOX_HEIGHT / 2
return QPointF(newX, newY)
def fillAlgorithmTree(self):
settings = QSettings()
useCategories = settings.value(self.USE_CATEGORIES, type=bool)
if useCategories:
self.fillAlgorithmTreeUsingCategories()
else:
self.fillAlgorithmTreeUsingProviders()
self.algorithmTree.sortItems(0, Qt.AscendingOrder)
text = unicode(self.searchBox.text())
if text != '':
self.algorithmTree.expandAll()
def fillAlgorithmTreeUsingCategories(self):
providersToExclude = ['model', 'script']
self.algorithmTree.clear()
text = unicode(self.searchBox.text())
groups = {}
allAlgs = ModelerUtils.allAlgs
for providerName in allAlgs.keys():
provider = allAlgs[providerName]
name = 'ACTIVATE_' + providerName.upper().replace(' ', '_')
if not ProcessingConfig.getSetting(name):
continue
if providerName in providersToExclude \
or len(ModelerUtils.providers[providerName].actions) != 0:
continue
algs = provider.values()
# Add algorithms
for alg in algs:
if not alg.showInModeler or alg.allowOnlyOpenedLayers:
continue
(altgroup, altsubgroup, altname) = \
AlgorithmDecorator.getGroupsAndName(alg)
if altgroup is None:
continue
if text == '' or text.lower() in altname.lower():
if altgroup not in groups:
groups[altgroup] = {}
group = groups[altgroup]
if altsubgroup not in group:
groups[altgroup][altsubgroup] = []
subgroup = groups[altgroup][altsubgroup]
subgroup.append(alg)
if len(groups) > 0:
mainItem = QTreeWidgetItem()
mainItem.setText(0, 'Geoalgorithms')
mainItem.setIcon(0, GeoAlgorithm.getDefaultIcon())
mainItem.setToolTip(0, mainItem.text(0))
for (groupname, group) in groups.items():
groupItem = QTreeWidgetItem()
groupItem.setText(0, groupname)
groupItem.setIcon(0, GeoAlgorithm.getDefaultIcon())
groupItem.setToolTip(0, groupItem.text(0))
mainItem.addChild(groupItem)
for (subgroupname, subgroup) in group.items():
subgroupItem = QTreeWidgetItem()
subgroupItem.setText(0, subgroupname)
subgroupItem.setIcon(0, GeoAlgorithm.getDefaultIcon())
subgroupItem.setToolTip(0, subgroupItem.text(0))
groupItem.addChild(subgroupItem)
for alg in subgroup:
algItem = TreeAlgorithmItem(alg)
subgroupItem.addChild(algItem)
self.algorithmTree.addTopLevelItem(mainItem)
for providerName in allAlgs.keys():
groups = {}
provider = allAlgs[providerName]
name = 'ACTIVATE_' + providerName.upper().replace(' ', '_')
if not ProcessingConfig.getSetting(name):
continue
if providerName not in providersToExclude:
continue
algs = provider.values()
# Add algorithms
for alg in algs:
if not alg.showInModeler or alg.allowOnlyOpenedLayers:
continue
if text == '' or text.lower() in alg.name.lower():
if alg.group in groups:
groupItem = groups[alg.group]
else:
groupItem = QTreeWidgetItem()
groupItem.setText(0, alg.group)
groupItem.setToolTip(0, alg.group)
groups[alg.group] = groupItem
algItem = TreeAlgorithmItem(alg)
groupItem.addChild(algItem)
if len(groups) > 0:
providerItem = QTreeWidgetItem()
providerItem.setText(0,
ModelerUtils.providers[providerName].getDescription())
providerItem.setIcon(0,
ModelerUtils.providers[providerName].getIcon())
providerItem.setToolTip(0, providerItem.text(0))
for groupItem in groups.values():
providerItem.addChild(groupItem)
self.algorithmTree.addTopLevelItem(providerItem)
providerItem.setExpanded(text != '')
for groupItem in groups.values():
if text != '':
groupItem.setExpanded(True)
def fillAlgorithmTreeUsingProviders(self):
self.algorithmTree.clear()
text = unicode(self.searchBox.text())
allAlgs = ModelerUtils.allAlgs
for providerName in allAlgs.keys():
groups = {}
provider = allAlgs[providerName]
algs = provider.values()
# Add algorithms
for alg in algs:
if not alg.showInModeler or alg.allowOnlyOpenedLayers:
continue
if text == '' or text.lower() in alg.name.lower():
if alg.group in groups:
groupItem = groups[alg.group]
else:
groupItem = QTreeWidgetItem()
groupItem.setText(0, alg.group)
groupItem.setToolTip(0, alg.group)
groups[alg.group] = groupItem
algItem = TreeAlgorithmItem(alg)
groupItem.addChild(algItem)
if len(groups) > 0:
providerItem = QTreeWidgetItem()
providerItem.setText(0,
ModelerUtils.providers[providerName].getDescription())
providerItem.setToolTip(0,
ModelerUtils.providers[providerName].getDescription())
providerItem.setIcon(0,
ModelerUtils.providers[providerName].getIcon())
for groupItem in groups.values():
providerItem.addChild(groupItem)
self.algorithmTree.addTopLevelItem(providerItem)
providerItem.setExpanded(text != '')
for groupItem in groups.values():
if text != '':
groupItem.setExpanded(True)
self.algorithmTree.sortItems(0, Qt.AscendingOrder)
class TreeAlgorithmItem(QTreeWidgetItem):
def __init__(self, alg):
settings = QSettings()
useCategories = settings.value(ModelerDialog.USE_CATEGORIES, type=bool)
QTreeWidgetItem.__init__(self)
self.alg = alg
icon = alg.getIcon()
name = alg.name
if useCategories:
icon = GeoAlgorithm.getDefaultIcon()
(group, subgroup, name) = AlgorithmDecorator.getGroupsAndName(alg)
self.setIcon(0, icon)
self.setToolTip(0, name)
self.setText(0, name)
| gpl-2.0 |
alviano/wasp | tests/asp/AllAnswerSets/aggregates/sum.example7.test.py | 4 | 27383 | input = """
1 2 0 0
1 3 0 0
1 4 0 0
1 5 0 0
1 6 0 0
1 7 0 0
1 8 0 0
1 9 0 0
1 10 0 0
1 11 0 0
1 12 0 0
1 13 0 0
1 14 0 0
1 15 0 0
1 16 0 0
1 17 0 0
1 18 0 0
1 19 0 0
1 20 0 0
1 21 0 0
1 22 0 0
1 23 2 1 24 25
1 24 2 1 23 25
1 25 0 0
1 26 2 1 27 28
1 27 2 1 26 28
1 28 0 0
1 29 2 1 30 31
1 30 2 1 29 31
1 31 0 0
1 32 2 1 33 34
1 33 2 1 32 34
1 34 0 0
1 35 2 1 36 37
1 36 2 1 35 37
1 37 0 0
1 38 2 1 39 40
1 39 2 1 38 40
1 40 0 0
1 41 2 1 42 43
1 42 2 1 41 43
1 43 0 0
1 44 2 1 45 46
1 45 2 1 44 46
1 46 0 0
1 47 2 1 48 49
1 48 2 1 47 49
1 49 0 0
1 50 2 1 51 52
1 51 2 1 50 52
1 52 0 0
1 53 2 1 54 55
1 54 2 1 53 55
1 55 0 0
1 56 2 1 57 58
1 57 2 1 56 58
1 58 0 0
1 59 2 1 60 61
1 60 2 1 59 61
1 61 0 0
1 62 2 1 63 64
1 63 2 1 62 64
1 64 0 0
1 65 2 1 66 67
1 66 2 1 65 67
1 67 0 0
1 68 2 1 69 70
1 69 2 1 68 70
1 70 0 0
1 71 2 1 72 73
1 72 2 1 71 73
1 73 0 0
1 74 2 1 75 76
1 75 2 1 74 76
1 76 0 0
1 77 2 1 78 79
1 78 2 1 77 79
1 79 0 0
1 80 2 1 81 82
1 81 2 1 80 82
1 82 0 0
5 84 6 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
5 85 7 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 83 2 1 85 84
5 87 5 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
5 88 6 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 86 2 1 88 87
5 89 7 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
5 90 8 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 86 2 1 90 89
5 92 9 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 91 1 1 92
5 94 8 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 93 1 1 94
5 96 7 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 95 1 1 96
5 98 6 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 97 1 1 98
5 100 6 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 99 1 0 100
5 102 7 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 101 1 0 102
5 104 6 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 103 1 0 104
5 106 7 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 105 1 0 106
5 108 6 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
5 109 7 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 107 2 1 109 108
5 111 5 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
5 112 6 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 110 2 1 112 111
5 113 7 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
5 114 8 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 110 2 1 114 113
5 116 9 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 115 1 1 116
5 118 8 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 117 1 1 118
5 120 7 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 119 1 1 120
5 122 6 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 121 1 1 122
5 124 6 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 123 1 0 124
5 126 7 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 125 1 0 126
5 128 6 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 127 1 0 128
5 130 7 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 129 1 0 130
5 132 6 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
5 133 7 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 131 2 1 133 132
5 135 5 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
5 136 6 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 134 2 1 136 135
5 137 7 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
5 138 8 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 134 2 1 138 137
5 140 9 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 139 1 1 140
5 142 8 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 141 1 1 142
5 144 7 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 143 1 1 144
5 146 6 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 145 1 1 146
5 148 6 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 147 1 0 148
5 150 7 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 149 1 0 150
5 152 6 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 151 1 0 152
5 154 7 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 153 1 0 154
5 156 6 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
5 157 7 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 155 2 1 157 156
5 159 5 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
5 160 6 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 158 2 1 160 159
5 161 7 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
5 162 8 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 158 2 1 162 161
5 164 6 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 163 1 0 164
5 166 7 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 165 1 0 166
5 168 6 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 167 1 0 168
5 170 7 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 169 1 0 170
5 172 9 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 171 1 1 172
5 174 8 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 173 1 1 174
5 176 7 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 175 1 1 176
5 178 6 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 177 1 1 178
5 180 6 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
5 181 7 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 179 2 1 181 180
5 183 5 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
5 184 6 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 182 2 1 184 183
5 185 7 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
5 186 8 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 182 2 1 186 185
5 188 6 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 187 1 0 188
5 190 7 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 189 1 0 190
5 192 6 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 191 1 0 192
5 194 7 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 193 1 0 194
5 196 7 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 195 1 1 196
5 198 6 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 197 1 1 198
5 200 7 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 199 1 1 200
5 202 6 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 201 1 1 202
5 204 6 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
5 205 7 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 203 2 1 205 204
5 207 5 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
5 208 6 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 206 2 1 208 207
5 209 7 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
5 210 8 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 206 2 1 210 209
5 212 6 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 211 1 0 212
5 214 7 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 213 1 0 214
5 216 6 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 215 1 0 216
5 218 7 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 217 1 0 218
5 220 7 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 219 1 1 220
5 222 6 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 221 1 1 222
5 224 7 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 223 1 1 224
5 226 6 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 225 1 1 226
5 228 4 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
5 229 7 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 227 2 1 229 228
5 231 4 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
5 232 6 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 230 2 1 232 231
5 233 5 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
5 234 7 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 230 2 1 234 233
5 236 4 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
5 237 7 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 235 2 1 237 236
5 239 4 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
5 240 6 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 238 2 1 240 239
5 241 5 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
5 242 7 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 238 2 1 242 241
5 244 4 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
5 245 7 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 243 2 1 245 244
5 247 4 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
5 248 6 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 246 2 1 248 247
5 249 5 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
5 250 7 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 246 2 1 250 249
5 252 4 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
5 253 7 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 251 2 1 253 252
5 255 5 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
5 256 8 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 254 2 1 256 255
5 258 4 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
5 259 7 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 257 2 1 259 258
5 261 5 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
5 262 8 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 260 2 1 262 261
5 264 4 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
5 265 7 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 263 2 1 265 264
5 267 5 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
5 268 8 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 266 2 1 268 267
5 270 6 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
5 271 7 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 269 2 1 271 270
5 273 5 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
5 274 6 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 272 2 1 274 273
5 275 7 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
5 276 8 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 272 2 1 276 275
5 278 6 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
5 279 7 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 277 2 1 279 278
5 281 5 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
5 282 6 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 280 2 1 282 281
5 283 7 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
5 284 8 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 280 2 1 284 283
5 286 5 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
5 287 7 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 285 2 1 287 286
5 289 4 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
5 290 6 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 288 2 1 290 289
5 292 6 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
5 293 8 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 291 2 1 293 292
5 295 7 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
5 296 9 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 294 2 1 296 295
5 298 6 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
5 299 7 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 297 2 1 299 298
5 301 5 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
5 302 6 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 300 2 1 302 301
5 303 7 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
5 304 8 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 300 2 1 304 303
5 306 5 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
5 307 7 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 305 2 1 307 306
5 309 4 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
5 310 6 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 308 2 1 310 309
5 312 6 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
5 313 8 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 311 2 1 313 312
5 315 7 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
5 316 9 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 314 2 1 316 315
5 318 6 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
5 319 7 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 317 2 1 319 318
5 321 5 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
5 322 6 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 320 2 1 322 321
5 323 7 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
5 324 8 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 320 2 1 324 323
5 326 5 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
5 327 7 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 325 2 1 327 326
5 329 4 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
5 330 6 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 328 2 1 330 329
5 332 6 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
5 333 8 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 331 2 1 333 332
5 335 7 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
5 336 9 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 334 2 1 336 335
5 338 6 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
5 339 7 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 337 2 1 339 338
5 341 5 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
5 342 6 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 340 2 1 342 341
5 343 8 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
5 344 9 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 340 2 1 344 343
5 345 5 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
5 346 6 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 1 2 1 346 345
5 347 7 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 1 1 0 347
5 348 5 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
5 349 6 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 1 2 1 349 348
5 350 6 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 1 1 1 350
5 351 7 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 1 1 0 351
5 352 4 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
5 353 6 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 1 2 1 353 352
5 354 5 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
5 355 7 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 1 2 1 355 354
5 356 5 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
5 357 7 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 1 2 1 357 356
5 358 4 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
5 359 7 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 1 2 1 359 358
5 360 4 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
5 361 7 20 0 80 77 74 71 68 65 62 59 56 53 50 47 44 41 38 35 32 29 26 23 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
1 1 2 1 361 360
0
177 ouch20
182 ouch21
158 ouch16
189 ouch22
165 ouch17
193 ouch23
169 ouch18
147 okay14
173 ouch19
260 ouch36
3 c(1)
4 c(2)
5 c(3)
6 c(4)
7 c(5)
8 c(6)
9 c(7)
10 c(8)
11 c(9)
12 c(10)
13 c(11)
14 c(12)
15 c(13)
16 c(14)
17 c(15)
18 c(16)
19 c(17)
20 c(18)
21 c(19)
22 c(20)
288 ouch40
280 ouch39
246 ouch34
238 ouch33
254 ouch35
272 ouch38
151 okay15
139 okay12
143 okay13
223 okay31
99 okay4
266 ouch37
103 okay5
107 okay6
227 okay32
215 okay29
219 okay30
195 okay24
115 okay7
203 okay26
211 okay28
199 okay25
311 okay44
334 ouch47
119 okay8
127 okay10
305 okay43
131 okay11
2 p
123 okay9
297 okay42
291 okay41
325 okay46
91 okay2
95 okay3
317 okay45
175 okay20
179 okay21
155 okay16
187 okay22
23 d(20)
26 d(19)
29 d(18)
32 d(17)
35 d(16)
38 d(15)
41 d(14)
44 d(13)
47 d(12)
50 d(11)
53 d(10)
56 d(9)
59 d(8)
62 d(7)
65 d(6)
68 d(5)
71 d(4)
74 d(3)
77 d(2)
80 d(1)
163 okay17
191 okay23
24 n_d(20)
27 n_d(19)
30 n_d(18)
33 n_d(17)
36 n_d(16)
39 n_d(15)
42 n_d(14)
45 n_d(13)
48 n_d(12)
51 n_d(11)
54 n_d(10)
57 n_d(9)
60 n_d(8)
63 n_d(7)
66 n_d(6)
69 n_d(5)
72 n_d(4)
75 n_d(3)
78 n_d(2)
81 n_d(1)
167 okay18
171 okay19
257 okay36
285 okay40
149 ouch14
277 okay39
251 okay35
243 okay34
141 ouch12
235 okay33
145 ouch13
269 okay38
153 ouch15
101 ouch4
225 ouch31
105 ouch5
263 okay37
217 ouch29
110 ouch6
221 ouch30
230 ouch32
197 ouch24
117 ouch7
201 ouch25
206 ouch27
314 ouch44
213 ouch28
337 okay48
331 okay47
121 ouch8
308 ouch43
83 okay1
340 ouch48
129 ouch10
125 ouch9
134 ouch11
300 ouch42
86 ouch1
294 ouch41
328 ouch46
320 ouch45
93 ouch2
97 ouch3
0
B+
0
B-
1
0
1
"""
output = """
"""
| apache-2.0 |
WillisXChen/django-oscar | oscar/lib/python2.7/site-packages/django/contrib/gis/gdal/layer.py | 477 | 8693 | from ctypes import byref, c_double
from django.contrib.gis.gdal.base import GDALBase
from django.contrib.gis.gdal.envelope import Envelope, OGREnvelope
from django.contrib.gis.gdal.error import (
GDALException, OGRIndexError, SRSException,
)
from django.contrib.gis.gdal.feature import Feature
from django.contrib.gis.gdal.field import OGRFieldTypes
from django.contrib.gis.gdal.geometries import OGRGeometry
from django.contrib.gis.gdal.geomtype import OGRGeomType
from django.contrib.gis.gdal.prototypes import (
ds as capi, geom as geom_api, srs as srs_api,
)
from django.contrib.gis.gdal.srs import SpatialReference
from django.utils import six
from django.utils.encoding import force_bytes, force_text
from django.utils.six.moves import range
# For more information, see the OGR C API source code:
# http://www.gdal.org/ogr/ogr__api_8h.html
#
# The OGR_L_* routines are relevant here.
class Layer(GDALBase):
"A class that wraps an OGR Layer, needs to be instantiated from a DataSource object."
def __init__(self, layer_ptr, ds):
"""
Initializes on an OGR C pointer to the Layer and the `DataSource` object
that owns this layer. The `DataSource` object is required so that a
reference to it is kept with this Layer. This prevents garbage
collection of the `DataSource` while this Layer is still active.
"""
if not layer_ptr:
raise GDALException('Cannot create Layer, invalid pointer given')
self.ptr = layer_ptr
self._ds = ds
self._ldefn = capi.get_layer_defn(self._ptr)
# Does the Layer support random reading?
self._random_read = self.test_capability(b'RandomRead')
def __getitem__(self, index):
"Gets the Feature at the specified index."
if isinstance(index, six.integer_types):
# An integer index was given -- we cannot do a check based on the
# number of features because the beginning and ending feature IDs
# are not guaranteed to be 0 and len(layer)-1, respectively.
if index < 0:
raise OGRIndexError('Negative indices are not allowed on OGR Layers.')
return self._make_feature(index)
elif isinstance(index, slice):
# A slice was given
start, stop, stride = index.indices(self.num_feat)
return [self._make_feature(fid) for fid in range(start, stop, stride)]
else:
raise TypeError('Integers and slices may only be used when indexing OGR Layers.')
def __iter__(self):
"Iterates over each Feature in the Layer."
# ResetReading() must be called before iteration is to begin.
capi.reset_reading(self._ptr)
for i in range(self.num_feat):
yield Feature(capi.get_next_feature(self._ptr), self)
def __len__(self):
"The length is the number of features."
return self.num_feat
def __str__(self):
"The string name of the layer."
return self.name
def _make_feature(self, feat_id):
"""
Helper routine for __getitem__ that constructs a Feature from the given
Feature ID. If the OGR Layer does not support random-access reading,
then each feature of the layer will be incremented through until the
a Feature is found matching the given feature ID.
"""
if self._random_read:
# If the Layer supports random reading, return.
try:
return Feature(capi.get_feature(self.ptr, feat_id), self)
except GDALException:
pass
else:
# Random access isn't supported, have to increment through
# each feature until the given feature ID is encountered.
for feat in self:
if feat.fid == feat_id:
return feat
# Should have returned a Feature, raise an OGRIndexError.
raise OGRIndexError('Invalid feature id: %s.' % feat_id)
# #### Layer properties ####
@property
def extent(self):
"Returns the extent (an Envelope) of this layer."
env = OGREnvelope()
capi.get_extent(self.ptr, byref(env), 1)
return Envelope(env)
@property
def name(self):
"Returns the name of this layer in the Data Source."
name = capi.get_fd_name(self._ldefn)
return force_text(name, self._ds.encoding, strings_only=True)
@property
def num_feat(self, force=1):
"Returns the number of features in the Layer."
return capi.get_feature_count(self.ptr, force)
@property
def num_fields(self):
"Returns the number of fields in the Layer."
return capi.get_field_count(self._ldefn)
@property
def geom_type(self):
"Returns the geometry type (OGRGeomType) of the Layer."
return OGRGeomType(capi.get_fd_geom_type(self._ldefn))
@property
def srs(self):
"Returns the Spatial Reference used in this Layer."
try:
ptr = capi.get_layer_srs(self.ptr)
return SpatialReference(srs_api.clone_srs(ptr))
except SRSException:
return None
@property
def fields(self):
"""
Returns a list of string names corresponding to each of the Fields
available in this Layer.
"""
return [force_text(capi.get_field_name(capi.get_field_defn(self._ldefn, i)),
self._ds.encoding, strings_only=True)
for i in range(self.num_fields)]
@property
def field_types(self):
"""
Returns a list of the types of fields in this Layer. For example,
the list [OFTInteger, OFTReal, OFTString] would be returned for
an OGR layer that had an integer, a floating-point, and string
fields.
"""
return [OGRFieldTypes[capi.get_field_type(capi.get_field_defn(self._ldefn, i))]
for i in range(self.num_fields)]
@property
def field_widths(self):
"Returns a list of the maximum field widths for the features."
return [capi.get_field_width(capi.get_field_defn(self._ldefn, i))
for i in range(self.num_fields)]
@property
def field_precisions(self):
"Returns the field precisions for the features."
return [capi.get_field_precision(capi.get_field_defn(self._ldefn, i))
for i in range(self.num_fields)]
def _get_spatial_filter(self):
try:
return OGRGeometry(geom_api.clone_geom(capi.get_spatial_filter(self.ptr)))
except GDALException:
return None
def _set_spatial_filter(self, filter):
if isinstance(filter, OGRGeometry):
capi.set_spatial_filter(self.ptr, filter.ptr)
elif isinstance(filter, (tuple, list)):
if not len(filter) == 4:
raise ValueError('Spatial filter list/tuple must have 4 elements.')
# Map c_double onto params -- if a bad type is passed in it
# will be caught here.
xmin, ymin, xmax, ymax = map(c_double, filter)
capi.set_spatial_filter_rect(self.ptr, xmin, ymin, xmax, ymax)
elif filter is None:
capi.set_spatial_filter(self.ptr, None)
else:
raise TypeError('Spatial filter must be either an OGRGeometry instance, a 4-tuple, or None.')
spatial_filter = property(_get_spatial_filter, _set_spatial_filter)
# #### Layer Methods ####
def get_fields(self, field_name):
"""
Returns a list containing the given field name for every Feature
in the Layer.
"""
if field_name not in self.fields:
raise GDALException('invalid field name: %s' % field_name)
return [feat.get(field_name) for feat in self]
def get_geoms(self, geos=False):
"""
Returns a list containing the OGRGeometry for every Feature in
the Layer.
"""
if geos:
from django.contrib.gis.geos import GEOSGeometry
return [GEOSGeometry(feat.geom.wkb) for feat in self]
else:
return [feat.geom for feat in self]
def test_capability(self, capability):
"""
Returns a bool indicating whether the this Layer supports the given
capability (a string). Valid capability strings include:
'RandomRead', 'SequentialWrite', 'RandomWrite', 'FastSpatialFilter',
'FastFeatureCount', 'FastGetExtent', 'CreateField', 'Transactions',
'DeleteFeature', and 'FastSetNextByIndex'.
"""
return bool(capi.test_capability(self.ptr, force_bytes(capability)))
| bsd-3-clause |
duyetdev/openerp-6.1.1 | openerp/pychart/afm/NewCenturySchlbk_Roman.py | 15 | 1528 | # -*- coding: utf-8 -*-
# AFM font NewCenturySchlbk-Roman (path: /usr/share/fonts/afms/adobe/pncr8a.afm).
# Derived from Ghostscript distribution.
# Go to www.cs.wisc.edu/~ghost to get the Ghostcript source code.
import dir
dir.afm["NewCenturySchlbk-Roman"] = (500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 278, 296, 389, 556, 556, 833, 815, 204, 333, 333, 500, 606, 278, 333, 278, 278, 556, 556, 556, 556, 556, 556, 556, 556, 556, 556, 278, 278, 606, 606, 606, 444, 737, 722, 722, 722, 778, 722, 667, 778, 833, 407, 556, 778, 667, 944, 815, 778, 667, 778, 722, 630, 667, 815, 722, 981, 704, 704, 611, 333, 606, 333, 606, 500, 204, 556, 556, 444, 574, 500, 333, 537, 611, 315, 296, 593, 315, 889, 611, 500, 574, 556, 444, 463, 389, 611, 537, 778, 537, 537, 481, 333, 606, 333, 606, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 296, 556, 556, 167, 556, 556, 500, 556, 204, 389, 426, 259, 259, 611, 611, 500, 556, 500, 500, 278, 500, 606, 606, 204, 389, 389, 426, 1000, 1000, 500, 444, 500, 333, 333, 333, 333, 333, 333, 333, 333, 500, 333, 333, 500, 333, 333, 333, 1000, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 1000, 500, 334, 500, 500, 500, 500, 667, 778, 1000, 300, 500, 500, 500, 500, 500, 796, 500, 500, 500, 315, 500, 500, 315, 500, 833, 574, )
| agpl-3.0 |
smartfile/django-1.4 | tests/regressiontests/middleware/tests.py | 1 | 29035 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, with_statement
import gzip
import re
import random
import StringIO
from django.conf import settings
from django.core import mail
from django.db import (transaction, connections, DEFAULT_DB_ALIAS,
IntegrityError)
from django.http import HttpRequest
from django.http import HttpResponse
from django.middleware.clickjacking import XFrameOptionsMiddleware
from django.middleware.common import CommonMiddleware
from django.middleware.http import ConditionalGetMiddleware
from django.middleware.gzip import GZipMiddleware
from django.middleware.transaction import TransactionMiddleware
from django.test import TransactionTestCase, TestCase, RequestFactory
from django.test.utils import override_settings
from .models import Band
class CommonMiddlewareTest(TestCase):
def setUp(self):
self.append_slash = settings.APPEND_SLASH
self.prepend_www = settings.PREPEND_WWW
self.ignorable_404_urls = settings.IGNORABLE_404_URLS
self.send_broken_email_links = settings.SEND_BROKEN_LINK_EMAILS
def tearDown(self):
settings.APPEND_SLASH = self.append_slash
settings.PREPEND_WWW = self.prepend_www
settings.IGNORABLE_404_URLS = self.ignorable_404_urls
settings.SEND_BROKEN_LINK_EMAILS = self.send_broken_email_links
def _get_request(self, path):
request = HttpRequest()
request.META = {
'SERVER_NAME': 'testserver',
'SERVER_PORT': 80,
}
request.path = request.path_info = "/middleware/%s" % path
return request
def test_append_slash_have_slash(self):
"""
Tests that URLs with slashes go unmolested.
"""
settings.APPEND_SLASH = True
request = self._get_request('slash/')
self.assertEqual(CommonMiddleware().process_request(request), None)
def test_append_slash_slashless_resource(self):
"""
Tests that matches to explicit slashless URLs go unmolested.
"""
settings.APPEND_SLASH = True
request = self._get_request('noslash')
self.assertEqual(CommonMiddleware().process_request(request), None)
def test_append_slash_slashless_unknown(self):
"""
Tests that APPEND_SLASH doesn't redirect to unknown resources.
"""
settings.APPEND_SLASH = True
request = self._get_request('unknown')
self.assertEqual(CommonMiddleware().process_request(request), None)
def test_append_slash_redirect(self):
"""
Tests that APPEND_SLASH redirects slashless URLs to a valid pattern.
"""
settings.APPEND_SLASH = True
request = self._get_request('slash')
r = CommonMiddleware().process_request(request)
self.assertEqual(r.status_code, 301)
self.assertEqual(r['Location'], 'http://testserver/middleware/slash/')
def test_append_slash_no_redirect_on_POST_in_DEBUG(self):
"""
Tests that while in debug mode, an exception is raised with a warning
when a failed attempt is made to POST to an URL which would normally be
redirected to a slashed version.
"""
settings.APPEND_SLASH = True
settings.DEBUG = True
request = self._get_request('slash')
request.method = 'POST'
self.assertRaises(
RuntimeError,
CommonMiddleware().process_request,
request)
try:
CommonMiddleware().process_request(request)
except RuntimeError, e:
self.assertTrue('end in a slash' in str(e))
settings.DEBUG = False
def test_append_slash_disabled(self):
"""
Tests disabling append slash functionality.
"""
settings.APPEND_SLASH = False
request = self._get_request('slash')
self.assertEqual(CommonMiddleware().process_request(request), None)
def test_append_slash_quoted(self):
"""
Tests that URLs which require quoting are redirected to their slash
version ok.
"""
settings.APPEND_SLASH = True
request = self._get_request('needsquoting#')
r = CommonMiddleware().process_request(request)
self.assertEqual(r.status_code, 301)
self.assertEqual(
r['Location'],
'http://testserver/middleware/needsquoting%23/')
def test_prepend_www(self):
settings.PREPEND_WWW = True
settings.APPEND_SLASH = False
request = self._get_request('path/')
r = CommonMiddleware().process_request(request)
self.assertEqual(r.status_code, 301)
self.assertEqual(
r['Location'],
'http://www.testserver/middleware/path/')
def test_prepend_www_append_slash_have_slash(self):
settings.PREPEND_WWW = True
settings.APPEND_SLASH = True
request = self._get_request('slash/')
r = CommonMiddleware().process_request(request)
self.assertEqual(r.status_code, 301)
self.assertEqual(r['Location'],
'http://www.testserver/middleware/slash/')
def test_prepend_www_append_slash_slashless(self):
settings.PREPEND_WWW = True
settings.APPEND_SLASH = True
request = self._get_request('slash')
r = CommonMiddleware().process_request(request)
self.assertEqual(r.status_code, 301)
self.assertEqual(r['Location'],
'http://www.testserver/middleware/slash/')
# The following tests examine expected behavior given a custom urlconf that
# overrides the default one through the request object.
def test_append_slash_have_slash_custom_urlconf(self):
"""
Tests that URLs with slashes go unmolested.
"""
settings.APPEND_SLASH = True
request = self._get_request('customurlconf/slash/')
request.urlconf = 'regressiontests.middleware.extra_urls'
self.assertEqual(CommonMiddleware().process_request(request), None)
def test_append_slash_slashless_resource_custom_urlconf(self):
"""
Tests that matches to explicit slashless URLs go unmolested.
"""
settings.APPEND_SLASH = True
request = self._get_request('customurlconf/noslash')
request.urlconf = 'regressiontests.middleware.extra_urls'
self.assertEqual(CommonMiddleware().process_request(request), None)
def test_append_slash_slashless_unknown_custom_urlconf(self):
"""
Tests that APPEND_SLASH doesn't redirect to unknown resources.
"""
settings.APPEND_SLASH = True
request = self._get_request('customurlconf/unknown')
request.urlconf = 'regressiontests.middleware.extra_urls'
self.assertEqual(CommonMiddleware().process_request(request), None)
def test_append_slash_redirect_custom_urlconf(self):
"""
Tests that APPEND_SLASH redirects slashless URLs to a valid pattern.
"""
settings.APPEND_SLASH = True
request = self._get_request('customurlconf/slash')
request.urlconf = 'regressiontests.middleware.extra_urls'
r = CommonMiddleware().process_request(request)
self.assertFalse(r is None,
"CommonMiddlware failed to return APPEND_SLASH redirect using request.urlconf")
self.assertEqual(r.status_code, 301)
self.assertEqual(r['Location'], 'http://testserver/middleware/customurlconf/slash/')
def test_append_slash_no_redirect_on_POST_in_DEBUG_custom_urlconf(self):
"""
Tests that while in debug mode, an exception is raised with a warning
when a failed attempt is made to POST to an URL which would normally be
redirected to a slashed version.
"""
settings.APPEND_SLASH = True
settings.DEBUG = True
request = self._get_request('customurlconf/slash')
request.urlconf = 'regressiontests.middleware.extra_urls'
request.method = 'POST'
self.assertRaises(
RuntimeError,
CommonMiddleware().process_request,
request)
try:
CommonMiddleware().process_request(request)
except RuntimeError, e:
self.assertTrue('end in a slash' in str(e))
settings.DEBUG = False
def test_append_slash_disabled_custom_urlconf(self):
"""
Tests disabling append slash functionality.
"""
settings.APPEND_SLASH = False
request = self._get_request('customurlconf/slash')
request.urlconf = 'regressiontests.middleware.extra_urls'
self.assertEqual(CommonMiddleware().process_request(request), None)
def test_append_slash_quoted_custom_urlconf(self):
"""
Tests that URLs which require quoting are redirected to their slash
version ok.
"""
settings.APPEND_SLASH = True
request = self._get_request('customurlconf/needsquoting#')
request.urlconf = 'regressiontests.middleware.extra_urls'
r = CommonMiddleware().process_request(request)
self.assertFalse(r is None,
"CommonMiddlware failed to return APPEND_SLASH redirect using request.urlconf")
self.assertEqual(r.status_code, 301)
self.assertEqual(
r['Location'],
'http://testserver/middleware/customurlconf/needsquoting%23/')
def test_prepend_www_custom_urlconf(self):
settings.PREPEND_WWW = True
settings.APPEND_SLASH = False
request = self._get_request('customurlconf/path/')
request.urlconf = 'regressiontests.middleware.extra_urls'
r = CommonMiddleware().process_request(request)
self.assertEqual(r.status_code, 301)
self.assertEqual(
r['Location'],
'http://www.testserver/middleware/customurlconf/path/')
def test_prepend_www_append_slash_have_slash_custom_urlconf(self):
settings.PREPEND_WWW = True
settings.APPEND_SLASH = True
request = self._get_request('customurlconf/slash/')
request.urlconf = 'regressiontests.middleware.extra_urls'
r = CommonMiddleware().process_request(request)
self.assertEqual(r.status_code, 301)
self.assertEqual(r['Location'],
'http://www.testserver/middleware/customurlconf/slash/')
def test_prepend_www_append_slash_slashless_custom_urlconf(self):
settings.PREPEND_WWW = True
settings.APPEND_SLASH = True
request = self._get_request('customurlconf/slash')
request.urlconf = 'regressiontests.middleware.extra_urls'
r = CommonMiddleware().process_request(request)
self.assertEqual(r.status_code, 301)
self.assertEqual(r['Location'],
'http://www.testserver/middleware/customurlconf/slash/')
# Tests for the 404 error reporting via email
def test_404_error_reporting(self):
settings.IGNORABLE_404_URLS = (re.compile(r'foo'),)
settings.SEND_BROKEN_LINK_EMAILS = True
request = self._get_request('regular_url/that/does/not/exist')
request.META['HTTP_REFERER'] = '/another/url/'
response = self.client.get(request.path)
CommonMiddleware().process_response(request, response)
self.assertEqual(len(mail.outbox), 1)
self.assertIn('Broken', mail.outbox[0].subject)
def test_404_error_reporting_no_referer(self):
settings.IGNORABLE_404_URLS = (re.compile(r'foo'),)
settings.SEND_BROKEN_LINK_EMAILS = True
request = self._get_request('regular_url/that/does/not/exist')
response = self.client.get(request.path)
CommonMiddleware().process_response(request, response)
self.assertEqual(len(mail.outbox), 0)
def test_404_error_reporting_ignored_url(self):
settings.IGNORABLE_404_URLS = (re.compile(r'foo'),)
settings.SEND_BROKEN_LINK_EMAILS = True
request = self._get_request('foo_url/that/does/not/exist/either')
request.META['HTTP_REFERER'] = '/another/url/'
response = self.client.get(request.path)
CommonMiddleware().process_response(request, response)
self.assertEqual(len(mail.outbox), 0)
class ConditionalGetMiddlewareTest(TestCase):
urls = 'regressiontests.middleware.cond_get_urls'
def setUp(self):
self.req = HttpRequest()
self.req.META = {
'SERVER_NAME': 'testserver',
'SERVER_PORT': 80,
}
self.req.path = self.req.path_info = "/"
self.resp = self.client.get(self.req.path)
# Tests for the Date header
def test_date_header_added(self):
self.assertFalse('Date' in self.resp)
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertTrue('Date' in self.resp)
# Tests for the Content-Length header
def test_content_length_header_added(self):
content_length = len(self.resp.content)
self.assertFalse('Content-Length' in self.resp)
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertTrue('Content-Length' in self.resp)
self.assertEqual(int(self.resp['Content-Length']), content_length)
def test_content_length_header_not_changed(self):
bad_content_length = len(self.resp.content) + 10
self.resp['Content-Length'] = bad_content_length
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertEqual(int(self.resp['Content-Length']), bad_content_length)
# Tests for the ETag header
def test_if_none_match_and_no_etag(self):
self.req.META['HTTP_IF_NONE_MATCH'] = 'spam'
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertEqual(self.resp.status_code, 200)
def test_no_if_none_match_and_etag(self):
self.resp['ETag'] = 'eggs'
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertEqual(self.resp.status_code, 200)
def test_if_none_match_and_same_etag(self):
self.req.META['HTTP_IF_NONE_MATCH'] = self.resp['ETag'] = 'spam'
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertEqual(self.resp.status_code, 304)
def test_if_none_match_and_different_etag(self):
self.req.META['HTTP_IF_NONE_MATCH'] = 'spam'
self.resp['ETag'] = 'eggs'
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertEqual(self.resp.status_code, 200)
# Tests for the Last-Modified header
def test_if_modified_since_and_no_last_modified(self):
self.req.META['HTTP_IF_MODIFIED_SINCE'] = 'Sat, 12 Feb 2011 17:38:44 GMT'
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertEqual(self.resp.status_code, 200)
def test_no_if_modified_since_and_last_modified(self):
self.resp['Last-Modified'] = 'Sat, 12 Feb 2011 17:38:44 GMT'
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertEqual(self.resp.status_code, 200)
def test_if_modified_since_and_same_last_modified(self):
self.req.META['HTTP_IF_MODIFIED_SINCE'] = 'Sat, 12 Feb 2011 17:38:44 GMT'
self.resp['Last-Modified'] = 'Sat, 12 Feb 2011 17:38:44 GMT'
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertEqual(self.resp.status_code, 304)
def test_if_modified_since_and_last_modified_in_the_past(self):
self.req.META['HTTP_IF_MODIFIED_SINCE'] = 'Sat, 12 Feb 2011 17:38:44 GMT'
self.resp['Last-Modified'] = 'Sat, 12 Feb 2011 17:35:44 GMT'
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertEqual(self.resp.status_code, 304)
def test_if_modified_since_and_last_modified_in_the_future(self):
self.req.META['HTTP_IF_MODIFIED_SINCE'] = 'Sat, 12 Feb 2011 17:38:44 GMT'
self.resp['Last-Modified'] = 'Sat, 12 Feb 2011 17:41:44 GMT'
self.resp = ConditionalGetMiddleware().process_response(self.req, self.resp)
self.assertEqual(self.resp.status_code, 200)
class XFrameOptionsMiddlewareTest(TestCase):
"""
Tests for the X-Frame-Options clickjacking prevention middleware.
"""
def setUp(self):
self.x_frame_options = settings.X_FRAME_OPTIONS
def tearDown(self):
settings.X_FRAME_OPTIONS = self.x_frame_options
def test_same_origin(self):
"""
Tests that the X_FRAME_OPTIONS setting can be set to SAMEORIGIN to
have the middleware use that value for the HTTP header.
"""
settings.X_FRAME_OPTIONS = 'SAMEORIGIN'
r = XFrameOptionsMiddleware().process_response(HttpRequest(),
HttpResponse())
self.assertEqual(r['X-Frame-Options'], 'SAMEORIGIN')
settings.X_FRAME_OPTIONS = 'sameorigin'
r = XFrameOptionsMiddleware().process_response(HttpRequest(),
HttpResponse())
self.assertEqual(r['X-Frame-Options'], 'SAMEORIGIN')
def test_deny(self):
"""
Tests that the X_FRAME_OPTIONS setting can be set to DENY to
have the middleware use that value for the HTTP header.
"""
settings.X_FRAME_OPTIONS = 'DENY'
r = XFrameOptionsMiddleware().process_response(HttpRequest(),
HttpResponse())
self.assertEqual(r['X-Frame-Options'], 'DENY')
settings.X_FRAME_OPTIONS = 'deny'
r = XFrameOptionsMiddleware().process_response(HttpRequest(),
HttpResponse())
self.assertEqual(r['X-Frame-Options'], 'DENY')
def test_defaults_sameorigin(self):
"""
Tests that if the X_FRAME_OPTIONS setting is not set then it defaults
to SAMEORIGIN.
"""
del settings.X_FRAME_OPTIONS
r = XFrameOptionsMiddleware().process_response(HttpRequest(),
HttpResponse())
self.assertEqual(r['X-Frame-Options'], 'SAMEORIGIN')
def test_dont_set_if_set(self):
"""
Tests that if the X-Frame-Options header is already set then the
middleware does not attempt to override it.
"""
settings.X_FRAME_OPTIONS = 'DENY'
response = HttpResponse()
response['X-Frame-Options'] = 'SAMEORIGIN'
r = XFrameOptionsMiddleware().process_response(HttpRequest(),
response)
self.assertEqual(r['X-Frame-Options'], 'SAMEORIGIN')
settings.X_FRAME_OPTIONS = 'SAMEORIGIN'
response = HttpResponse()
response['X-Frame-Options'] = 'DENY'
r = XFrameOptionsMiddleware().process_response(HttpRequest(),
response)
self.assertEqual(r['X-Frame-Options'], 'DENY')
def test_response_exempt(self):
"""
Tests that if the response has a xframe_options_exempt attribute set
to False then it still sets the header, but if it's set to True then
it does not.
"""
settings.X_FRAME_OPTIONS = 'SAMEORIGIN'
response = HttpResponse()
response.xframe_options_exempt = False
r = XFrameOptionsMiddleware().process_response(HttpRequest(),
response)
self.assertEqual(r['X-Frame-Options'], 'SAMEORIGIN')
response = HttpResponse()
response.xframe_options_exempt = True
r = XFrameOptionsMiddleware().process_response(HttpRequest(),
response)
self.assertEqual(r.get('X-Frame-Options', None), None)
def test_is_extendable(self):
"""
Tests that the XFrameOptionsMiddleware method that determines the
X-Frame-Options header value can be overridden based on something in
the request or response.
"""
class OtherXFrameOptionsMiddleware(XFrameOptionsMiddleware):
# This is just an example for testing purposes...
def get_xframe_options_value(self, request, response):
if getattr(request, 'sameorigin', False):
return 'SAMEORIGIN'
if getattr(response, 'sameorigin', False):
return 'SAMEORIGIN'
return 'DENY'
settings.X_FRAME_OPTIONS = 'DENY'
response = HttpResponse()
response.sameorigin = True
r = OtherXFrameOptionsMiddleware().process_response(HttpRequest(),
response)
self.assertEqual(r['X-Frame-Options'], 'SAMEORIGIN')
request = HttpRequest()
request.sameorigin = True
r = OtherXFrameOptionsMiddleware().process_response(request,
HttpResponse())
self.assertEqual(r['X-Frame-Options'], 'SAMEORIGIN')
settings.X_FRAME_OPTIONS = 'SAMEORIGIN'
r = OtherXFrameOptionsMiddleware().process_response(HttpRequest(),
HttpResponse())
self.assertEqual(r['X-Frame-Options'], 'DENY')
class GZipMiddlewareTest(TestCase):
"""
Tests the GZip middleware.
"""
short_string = "This string is too short to be worth compressing."
compressible_string = 'a' * 500
uncompressible_string = ''.join(chr(random.randint(0, 255)) for _ in xrange(500))
iterator_as_content = iter(compressible_string)
def setUp(self):
self.req = HttpRequest()
self.req.META = {
'SERVER_NAME': 'testserver',
'SERVER_PORT': 80,
}
self.req.path = self.req.path_info = "/"
self.req.META['HTTP_ACCEPT_ENCODING'] = 'gzip, deflate'
self.req.META['HTTP_USER_AGENT'] = 'Mozilla/5.0 (Windows NT 5.1; rv:9.0.1) Gecko/20100101 Firefox/9.0.1'
self.resp = HttpResponse()
self.resp.status_code = 200
self.resp.content = self.compressible_string
self.resp['Content-Type'] = 'text/html; charset=UTF-8'
@staticmethod
def decompress(gzipped_string):
return gzip.GzipFile(mode='rb', fileobj=StringIO.StringIO(gzipped_string)).read()
def test_compress_response(self):
"""
Tests that compression is performed on responses with compressible content.
"""
r = GZipMiddleware().process_response(self.req, self.resp)
self.assertEqual(self.decompress(r.content), self.compressible_string)
self.assertEqual(r.get('Content-Encoding'), 'gzip')
self.assertEqual(r.get('Content-Length'), str(len(r.content)))
def test_compress_non_200_response(self):
"""
Tests that compression is performed on responses with a status other than 200.
See #10762.
"""
self.resp.status_code = 404
r = GZipMiddleware().process_response(self.req, self.resp)
self.assertEqual(self.decompress(r.content), self.compressible_string)
self.assertEqual(r.get('Content-Encoding'), 'gzip')
def test_no_compress_short_response(self):
"""
Tests that compression isn't performed on responses with short content.
"""
self.resp.content = self.short_string
r = GZipMiddleware().process_response(self.req, self.resp)
self.assertEqual(r.content, self.short_string)
self.assertEqual(r.get('Content-Encoding'), None)
def test_no_compress_compressed_response(self):
"""
Tests that compression isn't performed on responses that are already compressed.
"""
self.resp['Content-Encoding'] = 'deflate'
r = GZipMiddleware().process_response(self.req, self.resp)
self.assertEqual(r.content, self.compressible_string)
self.assertEqual(r.get('Content-Encoding'), 'deflate')
def test_no_compress_ie_js_requests(self):
"""
Tests that compression isn't performed on JavaScript requests from Internet Explorer.
"""
self.req.META['HTTP_USER_AGENT'] = 'Mozilla/4.0 (compatible; MSIE 5.00; Windows 98)'
self.resp['Content-Type'] = 'application/javascript; charset=UTF-8'
r = GZipMiddleware().process_response(self.req, self.resp)
self.assertEqual(r.content, self.compressible_string)
self.assertEqual(r.get('Content-Encoding'), None)
def test_no_compress_uncompressible_response(self):
"""
Tests that compression isn't performed on responses with uncompressible content.
"""
self.resp.content = self.uncompressible_string
r = GZipMiddleware().process_response(self.req, self.resp)
self.assertEqual(r.content, self.uncompressible_string)
self.assertEqual(r.get('Content-Encoding'), None)
def test_streaming_compression(self):
"""
Tests that iterators as response content return a compressed stream without consuming
the whole response.content while doing so.
See #24158.
"""
self.resp.content = self.iterator_as_content
r = GZipMiddleware().process_response(self.req, self.resp)
self.assertEqual(self.decompress(''.join(r.content)), self.compressible_string)
self.assertEqual(r.get('Content-Encoding'), 'gzip')
self.assertEqual(r.get('Content-Length'), None)
class ETagGZipMiddlewareTest(TestCase):
"""
Tests if the ETag middleware behaves correctly with GZip middleware.
"""
compressible_string = 'a' * 500
def setUp(self):
self.rf = RequestFactory()
def test_compress_response(self):
"""
Tests that ETag is changed after gzip compression is performed.
"""
request = self.rf.get('/', HTTP_ACCEPT_ENCODING='gzip, deflate')
response = GZipMiddleware().process_response(request,
CommonMiddleware().process_response(request,
HttpResponse(self.compressible_string)))
gzip_etag = response.get('ETag')
request = self.rf.get('/', HTTP_ACCEPT_ENCODING='')
response = GZipMiddleware().process_response(request,
CommonMiddleware().process_response(request,
HttpResponse(self.compressible_string)))
nogzip_etag = response.get('ETag')
self.assertNotEqual(gzip_etag, nogzip_etag)
ETagGZipMiddlewareTest = override_settings(
USE_ETAGS=True,
)(ETagGZipMiddlewareTest)
class TransactionMiddlewareTest(TransactionTestCase):
"""
Test the transaction middleware.
"""
def setUp(self):
self.request = HttpRequest()
self.request.META = {
'SERVER_NAME': 'testserver',
'SERVER_PORT': 80,
}
self.request.path = self.request.path_info = "/"
self.response = HttpResponse()
self.response.status_code = 200
def test_request(self):
TransactionMiddleware().process_request(self.request)
self.assertTrue(transaction.is_managed())
def test_managed_response(self):
transaction.enter_transaction_management()
transaction.managed(True)
Band.objects.create(name='The Beatles')
self.assertTrue(transaction.is_dirty())
TransactionMiddleware().process_response(self.request, self.response)
self.assertFalse(transaction.is_dirty())
self.assertEqual(Band.objects.count(), 1)
def test_unmanaged_response(self):
transaction.managed(False)
TransactionMiddleware().process_response(self.request, self.response)
self.assertFalse(transaction.is_managed())
self.assertFalse(transaction.is_dirty())
def test_exception(self):
transaction.enter_transaction_management()
transaction.managed(True)
Band.objects.create(name='The Beatles')
self.assertTrue(transaction.is_dirty())
TransactionMiddleware().process_exception(self.request, None)
self.assertEqual(Band.objects.count(), 0)
self.assertFalse(transaction.is_dirty())
def test_failing_commit(self):
# It is possible that connection.commit() fails. Check that
# TransactionMiddleware handles such cases correctly.
try:
def raise_exception():
raise IntegrityError()
connections[DEFAULT_DB_ALIAS].commit = raise_exception
transaction.enter_transaction_management()
transaction.managed(True)
Band.objects.create(name='The Beatles')
self.assertTrue(transaction.is_dirty())
with self.assertRaises(IntegrityError):
TransactionMiddleware().process_response(self.request, None)
self.assertEqual(Band.objects.count(), 0)
self.assertFalse(transaction.is_dirty())
self.assertFalse(transaction.is_managed())
finally:
del connections[DEFAULT_DB_ALIAS].commit
| bsd-3-clause |
gauravbose/digital-menu | digimenu2/tests/proxy_model_inheritance/tests.py | 13 | 1747 | from __future__ import absolute_import, unicode_literals
import os
from django.core.management import call_command
from django.test import TestCase, TransactionTestCase
from django.test.utils import extend_sys_path
from django.utils._os import upath
from .models import (
ConcreteModel, ConcreteModelSubclass, ConcreteModelSubclassProxy,
)
class ProxyModelInheritanceTests(TransactionTestCase):
"""
Proxy model inheritance across apps can result in migrate not creating the table
for the proxied model (as described in #12286). This test creates two dummy
apps and calls migrate, then verifies that the table has been created.
"""
available_apps = []
def test_table_exists(self):
with extend_sys_path(os.path.dirname(os.path.abspath(upath(__file__)))):
with self.modify_settings(INSTALLED_APPS={'append': ['app1', 'app2']}):
call_command('migrate', verbosity=0)
from app1.models import ProxyModel
from app2.models import NiceModel
self.assertEqual(NiceModel.objects.all().count(), 0)
self.assertEqual(ProxyModel.objects.all().count(), 0)
class MultiTableInheritanceProxyTest(TestCase):
def test_model_subclass_proxy(self):
"""
Deleting an instance of a model proxying a multi-table inherited
subclass should cascade delete down the whole inheritance chain (see
#18083).
"""
instance = ConcreteModelSubclassProxy.objects.create()
instance.delete()
self.assertEqual(0, ConcreteModelSubclassProxy.objects.count())
self.assertEqual(0, ConcreteModelSubclass.objects.count())
self.assertEqual(0, ConcreteModel.objects.count())
| bsd-3-clause |
michaelgallacher/intellij-community | python/helpers/py2only/docutils/readers/pep.py | 136 | 1555 | # $Id: pep.py 7320 2012-01-19 22:33:02Z milde $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
Python Enhancement Proposal (PEP) Reader.
"""
__docformat__ = 'reStructuredText'
from docutils.readers import standalone
from docutils.transforms import peps, references, misc, frontmatter
from docutils.parsers import rst
class Reader(standalone.Reader):
supported = ('pep',)
"""Contexts this reader supports."""
settings_spec = (
'PEP Reader Option Defaults',
'The --pep-references and --rfc-references options (for the '
'reStructuredText parser) are on by default.',
())
config_section = 'pep reader'
config_section_dependencies = ('readers', 'standalone reader')
def get_transforms(self):
transforms = standalone.Reader.get_transforms(self)
# We have PEP-specific frontmatter handling.
transforms.remove(frontmatter.DocTitle)
transforms.remove(frontmatter.SectionSubTitle)
transforms.remove(frontmatter.DocInfo)
transforms.extend([peps.Headers, peps.Contents, peps.TargetNotes])
return transforms
settings_default_overrides = {'pep_references': 1, 'rfc_references': 1}
inliner_class = rst.states.Inliner
def __init__(self, parser=None, parser_name=None):
"""`parser` should be ``None``."""
if parser is None:
parser = rst.Parser(rfc2822=True, inliner=self.inliner_class())
standalone.Reader.__init__(self, parser, '')
| apache-2.0 |
dralves/nixysa | third_party/ply-3.1/example/yply/ylex.py | 165 | 2198 | # lexer for yacc-grammars
#
# Author: David Beazley (dave@dabeaz.com)
# Date : October 2, 2006
import sys
sys.path.append("../..")
from ply import *
tokens = (
'LITERAL','SECTION','TOKEN','LEFT','RIGHT','PREC','START','TYPE','NONASSOC','UNION','CODE',
'ID','QLITERAL','NUMBER',
)
states = (('code','exclusive'),)
literals = [ ';', ',', '<', '>', '|',':' ]
t_ignore = ' \t'
t_TOKEN = r'%token'
t_LEFT = r'%left'
t_RIGHT = r'%right'
t_NONASSOC = r'%nonassoc'
t_PREC = r'%prec'
t_START = r'%start'
t_TYPE = r'%type'
t_UNION = r'%union'
t_ID = r'[a-zA-Z_][a-zA-Z_0-9]*'
t_QLITERAL = r'''(?P<quote>['"]).*?(?P=quote)'''
t_NUMBER = r'\d+'
def t_SECTION(t):
r'%%'
if getattr(t.lexer,"lastsection",0):
t.value = t.lexer.lexdata[t.lexpos+2:]
t.lexer.lexpos = len(t.lexer.lexdata)
else:
t.lexer.lastsection = 0
return t
# Comments
def t_ccomment(t):
r'/\*(.|\n)*?\*/'
t.lexer.lineno += t.value.count('\n')
t_ignore_cppcomment = r'//.*'
def t_LITERAL(t):
r'%\{(.|\n)*?%\}'
t.lexer.lineno += t.value.count("\n")
return t
def t_NEWLINE(t):
r'\n'
t.lexer.lineno += 1
def t_code(t):
r'\{'
t.lexer.codestart = t.lexpos
t.lexer.level = 1
t.lexer.begin('code')
def t_code_ignore_string(t):
r'\"([^\\\n]|(\\.))*?\"'
def t_code_ignore_char(t):
r'\'([^\\\n]|(\\.))*?\''
def t_code_ignore_comment(t):
r'/\*(.|\n)*?\*/'
def t_code_ignore_cppcom(t):
r'//.*'
def t_code_lbrace(t):
r'\{'
t.lexer.level += 1
def t_code_rbrace(t):
r'\}'
t.lexer.level -= 1
if t.lexer.level == 0:
t.type = 'CODE'
t.value = t.lexer.lexdata[t.lexer.codestart:t.lexpos+1]
t.lexer.begin('INITIAL')
t.lexer.lineno += t.value.count('\n')
return t
t_code_ignore_nonspace = r'[^\s\}\'\"\{]+'
t_code_ignore_whitespace = r'\s+'
t_code_ignore = ""
def t_code_error(t):
raise RuntimeError
def t_error(t):
print "%d: Illegal character '%s'" % (t.lexer.lineno, t.value[0])
print t.value
t.lexer.skip(1)
lex.lex()
if __name__ == '__main__':
lex.runmain()
| apache-2.0 |
Azure/azure-sdk-for-python | sdk/storage/azure-storage-blob-changefeed/tests/conftest.py | 2 | 1748 | # --------------------------------------------------------------------------
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the ""Software""), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# --------------------------------------------------------------------------
import platform
import sys
# fixture needs to be visible from conftest
from _shared.testcase import storage_account
# Ignore async tests for Python < 3.5
collect_ignore_glob = []
if sys.version_info < (3, 5) or platform.python_implementation() == "PyPy":
collect_ignore_glob.append("*_async.py")
def pytest_configure(config):
# register an additional marker
config.addinivalue_line(
"usefixtures", "storage_account"
)
| mit |
sameersingh/uci-statnlp | hw3/train.py | 1 | 4787 | import argparse
import copy
import datetime
import json
import os
import random
import sys
import time
from tqdm import tqdm
import torch
from torch.utils.data import DataLoader
from dataset import TwitterDataset, Vocabulary
from util import load_object_from_dict
def load_datasets(train_dataset_params: dict, validation_dataset_params: dict):
# load PyTorch ``Dataset`` objects for the train & validation sets
train_dataset = TwitterDataset(**train_dataset_params)
validation_dataset = TwitterDataset(**validation_dataset_params)
# use tokens and tags in the training set to create `Vocabulary` objects
token_vocab = Vocabulary(train_dataset.get_tokens_list(), add_unk_token=True)
tag_vocab = Vocabulary(train_dataset.get_tags_list())
# add `Vocabulary` objects to datasets for tokens/tags to ID mapping
train_dataset.set_vocab(token_vocab, tag_vocab)
validation_dataset.set_vocab(token_vocab, tag_vocab)
return train_dataset, validation_dataset
def train(
model: torch.nn.Module,
train_dataloader: DataLoader,
validation_dataloader: DataLoader,
optimizer: torch.optim.Optimizer,
num_epochs: int,
serialization_dir: str
):
start = time.time()
best_metrics = {'validation_loss': 10e10}
best_model = None
for epoch_num in range(num_epochs):
# training
model.train()
for batch in tqdm(train_dataloader, f'Epoch {epoch_num}'):
optimizer.zero_grad()
output_dict = model(**batch)
output_dict['loss'].backward()
optimizer.step()
cur_epoch_metrics = model.get_metrics(header='train_')
# compute validation metrics
model.eval()
for batch in validation_dataloader:
model(**batch)
cur_epoch_metrics.update(model.get_metrics(header='validation_'))
# write the current epochs statistics to file
with open(f'{serialization_dir}/metrics_epoch_{epoch_num}.json', 'w') as f:
cur_epoch_metrics['epoch_num'] = epoch_num
print(json.dumps(cur_epoch_metrics, indent=4))
f.write(json.dumps(cur_epoch_metrics, indent=4))
# check if current model is the best so far.
if cur_epoch_metrics['validation_loss'] < best_metrics['validation_loss']:
print('Best validation loss thus far...\n')
best_model = copy.deepcopy(model)
best_metrics = copy.deepcopy(cur_epoch_metrics)
# write the best metrics we got and best model
with open(f'{serialization_dir}/best_metrics.json', 'w') as f:
best_metrics['run_time'] = str(datetime.timedelta(seconds=time.time()-start))
print(f"Best Performing Model {json.dumps(best_metrics, indent=4)}")
f.write(json.dumps(best_metrics, indent=4))
torch.save(best_model, f'{serialization_dir}/model.pt')
def main():
parser = argparse.ArgumentParser()
parser.add_argument("config_path", help="path to configuration file")
parser.add_argument("-s", "--serialization_dir", required=True,
help="save directory for model, dataset, and metrics")
args = parser.parse_args()
config = json.load(open(args.config_path))
serialization_dir = args.serialization_dir
random.seed(config['random_seed'])
torch.manual_seed(config['random_seed'])
if os.path.isdir(serialization_dir):
sys.exit(f"{serialization_dir}, already exists. Please specify a new "
f"serialization directory or erase the existing one.")
else:
os.makedirs(serialization_dir)
with open(f'{serialization_dir}/config.json', 'w') as f:
f.write(json.dumps(config, indent=4))
# load PyTorch `Dataset` and `DataLoader` objects
train_dataset, validation_dataset = load_datasets(
train_dataset_params=config['train_dataset'],
validation_dataset_params=config['validation_dataset']
)
batch_size = config['training']['batch_size']
train_dataloader = DataLoader(train_dataset, batch_size)
validation_dataloader = DataLoader(validation_dataset, batch_size)
# load model
model = load_object_from_dict(config['model'],
token_vocab=train_dataset.token_vocab,
tag_vocab=train_dataset.tag_vocab)
# load optimizer
optimizer = load_object_from_dict(config['training']['optimizer'],
params=model.parameters())
train(
model=model,
train_dataloader=train_dataloader,
validation_dataloader=validation_dataloader,
optimizer=optimizer,
num_epochs=config['training']['num_epochs'],
serialization_dir=serialization_dir
)
if __name__ == "__main__":
main()
| apache-2.0 |
tobspr/LUI | Builtin/LUILabel.py | 1 | 2261 |
from panda3d.lui import LUIText
from LUIObject import LUIObject
from LUIInitialState import LUIInitialState
__all__ = ["LUILabel"]
class LUILabel(LUIObject):
""" A simple label, displaying text. """
# Default variables which can be overridden by skins
DEFAULT_COLOR = (0.9, 0.9, 0.9, 1)
DEFAULT_USE_SHADOW = True
def __init__(self, text=u"Label", shadow=None, font_size=14, font="label", color=None, wordwrap=False, **kwargs):
""" Creates a new label. If shadow is True, a small text shadow will be
rendered below the actual text. """
LUIObject.__init__(self)
LUIInitialState.init(self, kwargs)
self._text = LUIText(
self,
text.encode('utf-8'),
font,
font_size,
0,
0,
wordwrap
)
self._text.z_offset = 1
if color is None:
self.color = LUILabel.DEFAULT_COLOR
else:
self.color = color
if shadow is None:
shadow = LUILabel.DEFAULT_USE_SHADOW
self._have_shadow = shadow
if self._have_shadow:
self._shadow_text = LUIText(
self,
text.encode('utf-8'),
font,
font_size,
0,
0,
wordwrap
)
self._shadow_text.top = 1
self._shadow_text.color = (0,0,0,0.6)
def get_text_handle(self):
""" Returns a handle to the internal used LUIText object """
return self._text
text_handle = property(get_text_handle)
def get_text(self):
""" Returns the current text of the label """
return self._text.text
def set_text(self, text):
""" Sets the text of the label """
self._text.text = text.encode('utf-8')
if self._have_shadow:
self._shadow_text.text = text.encode('utf-8')
text = property(get_text, set_text)
def get_color(self):
""" Returns the current color of the label's text """
return self._text.color
def set_color(self, color):
""" Sets the color of the label's text """
self._text.color = color
color = property(get_color, set_color)
| mit |
ryfeus/lambda-packs | Sklearn_scipy_numpy/source/sklearn/datasets/samples_generator.py | 20 | 56502 | """
Generate samples of synthetic data sets.
"""
# Authors: B. Thirion, G. Varoquaux, A. Gramfort, V. Michel, O. Grisel,
# G. Louppe, J. Nothman
# License: BSD 3 clause
import numbers
import array
import numpy as np
from scipy import linalg
import scipy.sparse as sp
from ..preprocessing import MultiLabelBinarizer
from ..utils import check_array, check_random_state
from ..utils import shuffle as util_shuffle
from ..utils.fixes import astype
from ..utils.random import sample_without_replacement
from ..externals import six
map = six.moves.map
zip = six.moves.zip
def _generate_hypercube(samples, dimensions, rng):
"""Returns distinct binary samples of length dimensions
"""
if dimensions > 30:
return np.hstack([_generate_hypercube(samples, dimensions - 30, rng),
_generate_hypercube(samples, 30, rng)])
out = astype(sample_without_replacement(2 ** dimensions, samples,
random_state=rng),
dtype='>u4', copy=False)
out = np.unpackbits(out.view('>u1')).reshape((-1, 32))[:, -dimensions:]
return out
def make_classification(n_samples=100, n_features=20, n_informative=2,
n_redundant=2, n_repeated=0, n_classes=2,
n_clusters_per_class=2, weights=None, flip_y=0.01,
class_sep=1.0, hypercube=True, shift=0.0, scale=1.0,
shuffle=True, random_state=None):
"""Generate a random n-class classification problem.
This initially creates clusters of points normally distributed (std=1)
about vertices of a `2 * class_sep`-sided hypercube, and assigns an equal
number of clusters to each class. It introduces interdependence between
these features and adds various types of further noise to the data.
Prior to shuffling, `X` stacks a number of these primary "informative"
features, "redundant" linear combinations of these, "repeated" duplicates
of sampled features, and arbitrary noise for and remaining features.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features. These comprise `n_informative`
informative features, `n_redundant` redundant features, `n_repeated`
duplicated features and `n_features-n_informative-n_redundant-
n_repeated` useless features drawn at random.
n_informative : int, optional (default=2)
The number of informative features. Each class is composed of a number
of gaussian clusters each located around the vertices of a hypercube
in a subspace of dimension `n_informative`. For each cluster,
informative features are drawn independently from N(0, 1) and then
randomly linearly combined within each cluster in order to add
covariance. The clusters are then placed on the vertices of the
hypercube.
n_redundant : int, optional (default=2)
The number of redundant features. These features are generated as
random linear combinations of the informative features.
n_repeated : int, optional (default=0)
The number of duplicated features, drawn randomly from the informative
and the redundant features.
n_classes : int, optional (default=2)
The number of classes (or labels) of the classification problem.
n_clusters_per_class : int, optional (default=2)
The number of clusters per class.
weights : list of floats or None (default=None)
The proportions of samples assigned to each class. If None, then
classes are balanced. Note that if `len(weights) == n_classes - 1`,
then the last class weight is automatically inferred.
More than `n_samples` samples may be returned if the sum of `weights`
exceeds 1.
flip_y : float, optional (default=0.01)
The fraction of samples whose class are randomly exchanged.
class_sep : float, optional (default=1.0)
The factor multiplying the hypercube dimension.
hypercube : boolean, optional (default=True)
If True, the clusters are put on the vertices of a hypercube. If
False, the clusters are put on the vertices of a random polytope.
shift : float, array of shape [n_features] or None, optional (default=0.0)
Shift features by the specified value. If None, then features
are shifted by a random value drawn in [-class_sep, class_sep].
scale : float, array of shape [n_features] or None, optional (default=1.0)
Multiply features by the specified value. If None, then features
are scaled by a random value drawn in [1, 100]. Note that scaling
happens after shifting.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for class membership of each sample.
Notes
-----
The algorithm is adapted from Guyon [1] and was designed to generate
the "Madelon" dataset.
References
----------
.. [1] I. Guyon, "Design of experiments for the NIPS 2003 variable
selection benchmark", 2003.
See also
--------
make_blobs: simplified variant
make_multilabel_classification: unrelated generator for multilabel tasks
"""
generator = check_random_state(random_state)
# Count features, clusters and samples
if n_informative + n_redundant + n_repeated > n_features:
raise ValueError("Number of informative, redundant and repeated "
"features must sum to less than the number of total"
" features")
if 2 ** n_informative < n_classes * n_clusters_per_class:
raise ValueError("n_classes * n_clusters_per_class must"
" be smaller or equal 2 ** n_informative")
if weights and len(weights) not in [n_classes, n_classes - 1]:
raise ValueError("Weights specified but incompatible with number "
"of classes.")
n_useless = n_features - n_informative - n_redundant - n_repeated
n_clusters = n_classes * n_clusters_per_class
if weights and len(weights) == (n_classes - 1):
weights.append(1.0 - sum(weights))
if weights is None:
weights = [1.0 / n_classes] * n_classes
weights[-1] = 1.0 - sum(weights[:-1])
# Distribute samples among clusters by weight
n_samples_per_cluster = []
for k in range(n_clusters):
n_samples_per_cluster.append(int(n_samples * weights[k % n_classes]
/ n_clusters_per_class))
for i in range(n_samples - sum(n_samples_per_cluster)):
n_samples_per_cluster[i % n_clusters] += 1
# Intialize X and y
X = np.zeros((n_samples, n_features))
y = np.zeros(n_samples, dtype=np.int)
# Build the polytope whose vertices become cluster centroids
centroids = _generate_hypercube(n_clusters, n_informative,
generator).astype(float)
centroids *= 2 * class_sep
centroids -= class_sep
if not hypercube:
centroids *= generator.rand(n_clusters, 1)
centroids *= generator.rand(1, n_informative)
# Initially draw informative features from the standard normal
X[:, :n_informative] = generator.randn(n_samples, n_informative)
# Create each cluster; a variant of make_blobs
stop = 0
for k, centroid in enumerate(centroids):
start, stop = stop, stop + n_samples_per_cluster[k]
y[start:stop] = k % n_classes # assign labels
X_k = X[start:stop, :n_informative] # slice a view of the cluster
A = 2 * generator.rand(n_informative, n_informative) - 1
X_k[...] = np.dot(X_k, A) # introduce random covariance
X_k += centroid # shift the cluster to a vertex
# Create redundant features
if n_redundant > 0:
B = 2 * generator.rand(n_informative, n_redundant) - 1
X[:, n_informative:n_informative + n_redundant] = \
np.dot(X[:, :n_informative], B)
# Repeat some features
if n_repeated > 0:
n = n_informative + n_redundant
indices = ((n - 1) * generator.rand(n_repeated) + 0.5).astype(np.intp)
X[:, n:n + n_repeated] = X[:, indices]
# Fill useless features
if n_useless > 0:
X[:, -n_useless:] = generator.randn(n_samples, n_useless)
# Randomly replace labels
if flip_y >= 0.0:
flip_mask = generator.rand(n_samples) < flip_y
y[flip_mask] = generator.randint(n_classes, size=flip_mask.sum())
# Randomly shift and scale
if shift is None:
shift = (2 * generator.rand(n_features) - 1) * class_sep
X += shift
if scale is None:
scale = 1 + 100 * generator.rand(n_features)
X *= scale
if shuffle:
# Randomly permute samples
X, y = util_shuffle(X, y, random_state=generator)
# Randomly permute features
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
return X, y
def make_multilabel_classification(n_samples=100, n_features=20, n_classes=5,
n_labels=2, length=50, allow_unlabeled=True,
sparse=False, return_indicator='dense',
return_distributions=False,
random_state=None):
"""Generate a random multilabel classification problem.
For each sample, the generative process is:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that
n is never zero or more than `n_classes`, and that the document length
is never zero. Likewise, we reject classes which have already been chosen.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features.
n_classes : int, optional (default=5)
The number of classes of the classification problem.
n_labels : int, optional (default=2)
The average number of labels per instance. More precisely, the number
of labels per sample is drawn from a Poisson distribution with
``n_labels`` as its expected value, but samples are bounded (using
rejection sampling) by ``n_classes``, and must be nonzero if
``allow_unlabeled`` is False.
length : int, optional (default=50)
The sum of the features (number of words if documents) is drawn from
a Poisson distribution with this expected value.
allow_unlabeled : bool, optional (default=True)
If ``True``, some instances might not belong to any class.
sparse : bool, optional (default=False)
If ``True``, return a sparse feature matrix
.. versionadded:: 0.17
parameter to allow *sparse* output.
return_indicator : 'dense' (default) | 'sparse' | False
If ``dense`` return ``Y`` in the dense binary indicator format. If
``'sparse'`` return ``Y`` in the sparse binary indicator format.
``False`` returns a list of lists of labels.
return_distributions : bool, optional (default=False)
If ``True``, return the prior class probability and conditional
probabilities of features given classes, from which the data was
drawn.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
Y : array or sparse CSR matrix of shape [n_samples, n_classes]
The label sets.
p_c : array, shape [n_classes]
The probability of each class being drawn. Only returned if
``return_distributions=True``.
p_w_c : array, shape [n_features, n_classes]
The probability of each feature being drawn given each class.
Only returned if ``return_distributions=True``.
"""
generator = check_random_state(random_state)
p_c = generator.rand(n_classes)
p_c /= p_c.sum()
cumulative_p_c = np.cumsum(p_c)
p_w_c = generator.rand(n_features, n_classes)
p_w_c /= np.sum(p_w_c, axis=0)
def sample_example():
_, n_classes = p_w_c.shape
# pick a nonzero number of labels per document by rejection sampling
y_size = n_classes + 1
while (not allow_unlabeled and y_size == 0) or y_size > n_classes:
y_size = generator.poisson(n_labels)
# pick n classes
y = set()
while len(y) != y_size:
# pick a class with probability P(c)
c = np.searchsorted(cumulative_p_c,
generator.rand(y_size - len(y)))
y.update(c)
y = list(y)
# pick a non-zero document length by rejection sampling
n_words = 0
while n_words == 0:
n_words = generator.poisson(length)
# generate a document of length n_words
if len(y) == 0:
# if sample does not belong to any class, generate noise word
words = generator.randint(n_features, size=n_words)
return words, y
# sample words with replacement from selected classes
cumulative_p_w_sample = p_w_c.take(y, axis=1).sum(axis=1).cumsum()
cumulative_p_w_sample /= cumulative_p_w_sample[-1]
words = np.searchsorted(cumulative_p_w_sample, generator.rand(n_words))
return words, y
X_indices = array.array('i')
X_indptr = array.array('i', [0])
Y = []
for i in range(n_samples):
words, y = sample_example()
X_indices.extend(words)
X_indptr.append(len(X_indices))
Y.append(y)
X_data = np.ones(len(X_indices), dtype=np.float64)
X = sp.csr_matrix((X_data, X_indices, X_indptr),
shape=(n_samples, n_features))
X.sum_duplicates()
if not sparse:
X = X.toarray()
# return_indicator can be True due to backward compatibility
if return_indicator in (True, 'sparse', 'dense'):
lb = MultiLabelBinarizer(sparse_output=(return_indicator == 'sparse'))
Y = lb.fit([range(n_classes)]).transform(Y)
elif return_indicator is not False:
raise ValueError("return_indicator must be either 'sparse', 'dense' "
'or False.')
if return_distributions:
return X, Y, p_c, p_w_c
return X, Y
def make_hastie_10_2(n_samples=12000, random_state=None):
"""Generates data for binary classification used in
Hastie et al. 2009, Example 10.2.
The ten features are standard independent Gaussian and
the target ``y`` is defined by::
y[i] = 1 if np.sum(X[i] ** 2) > 9.34 else -1
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=12000)
The number of samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 10]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
See also
--------
make_gaussian_quantiles: a generalization of this dataset approach
"""
rs = check_random_state(random_state)
shape = (n_samples, 10)
X = rs.normal(size=shape).reshape(shape)
y = ((X ** 2.0).sum(axis=1) > 9.34).astype(np.float64)
y[y == 0.0] = -1.0
return X, y
def make_regression(n_samples=100, n_features=100, n_informative=10,
n_targets=1, bias=0.0, effective_rank=None,
tail_strength=0.5, noise=0.0, shuffle=True, coef=False,
random_state=None):
"""Generate a random regression problem.
The input set can either be well conditioned (by default) or have a low
rank-fat tail singular profile. See :func:`make_low_rank_matrix` for
more details.
The output is generated by applying a (potentially biased) random linear
regression model with `n_informative` nonzero regressors to the previously
generated input and some gaussian centered noise with some adjustable
scale.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
n_informative : int, optional (default=10)
The number of informative features, i.e., the number of features used
to build the linear model used to generate the output.
n_targets : int, optional (default=1)
The number of regression targets, i.e., the dimension of the y output
vector associated with a sample. By default, the output is a scalar.
bias : float, optional (default=0.0)
The bias term in the underlying linear model.
effective_rank : int or None, optional (default=None)
if not None:
The approximate number of singular vectors required to explain most
of the input data by linear combinations. Using this kind of
singular spectrum in the input allows the generator to reproduce
the correlations often observed in practice.
if None:
The input set is well conditioned, centered and gaussian with
unit variance.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile if `effective_rank` is not None.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
coef : boolean, optional (default=False)
If True, the coefficients of the underlying linear model are returned.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples] or [n_samples, n_targets]
The output values.
coef : array of shape [n_features] or [n_features, n_targets], optional
The coefficient of the underlying linear model. It is returned only if
coef is True.
"""
n_informative = min(n_features, n_informative)
generator = check_random_state(random_state)
if effective_rank is None:
# Randomly generate a well conditioned input set
X = generator.randn(n_samples, n_features)
else:
# Randomly generate a low rank, fat tail input set
X = make_low_rank_matrix(n_samples=n_samples,
n_features=n_features,
effective_rank=effective_rank,
tail_strength=tail_strength,
random_state=generator)
# Generate a ground truth model with only n_informative features being non
# zeros (the other features are not correlated to y and should be ignored
# by a sparsifying regularizers such as L1 or elastic net)
ground_truth = np.zeros((n_features, n_targets))
ground_truth[:n_informative, :] = 100 * generator.rand(n_informative,
n_targets)
y = np.dot(X, ground_truth) + bias
# Add noise
if noise > 0.0:
y += generator.normal(scale=noise, size=y.shape)
# Randomly permute samples and features
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
ground_truth = ground_truth[indices]
y = np.squeeze(y)
if coef:
return X, y, np.squeeze(ground_truth)
else:
return X, y
def make_circles(n_samples=100, shuffle=True, noise=None, random_state=None,
factor=.8):
"""Make a large circle containing a smaller circle in 2d.
A simple toy dataset to visualize clustering and classification
algorithms.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle: bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
factor : double < 1 (default=.8)
Scale factor between inner and outer circle.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
if factor > 1 or factor < 0:
raise ValueError("'factor' has to be between 0 and 1.")
generator = check_random_state(random_state)
# so as not to have the first point = last point, we add one and then
# remove it.
linspace = np.linspace(0, 2 * np.pi, n_samples // 2 + 1)[:-1]
outer_circ_x = np.cos(linspace)
outer_circ_y = np.sin(linspace)
inner_circ_x = outer_circ_x * factor
inner_circ_y = outer_circ_y * factor
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples // 2, dtype=np.intp),
np.ones(n_samples // 2, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if noise is not None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_moons(n_samples=100, shuffle=True, noise=None, random_state=None):
"""Make two interleaving half circles
A simple toy dataset to visualize clustering and classification
algorithms.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle : bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
Read more in the :ref:`User Guide <sample_generators>`.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
n_samples_out = n_samples // 2
n_samples_in = n_samples - n_samples_out
generator = check_random_state(random_state)
outer_circ_x = np.cos(np.linspace(0, np.pi, n_samples_out))
outer_circ_y = np.sin(np.linspace(0, np.pi, n_samples_out))
inner_circ_x = 1 - np.cos(np.linspace(0, np.pi, n_samples_in))
inner_circ_y = 1 - np.sin(np.linspace(0, np.pi, n_samples_in)) - .5
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples_in, dtype=np.intp),
np.ones(n_samples_out, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if noise is not None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_blobs(n_samples=100, n_features=2, centers=3, cluster_std=1.0,
center_box=(-10.0, 10.0), shuffle=True, random_state=None):
"""Generate isotropic Gaussian blobs for clustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points equally divided among clusters.
n_features : int, optional (default=2)
The number of features for each sample.
centers : int or array of shape [n_centers, n_features], optional
(default=3)
The number of centers to generate, or the fixed center locations.
cluster_std: float or sequence of floats, optional (default=1.0)
The standard deviation of the clusters.
center_box: pair of floats (min, max), optional (default=(-10.0, 10.0))
The bounding box for each cluster center when centers are
generated at random.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for cluster membership of each sample.
Examples
--------
>>> from sklearn.datasets.samples_generator import make_blobs
>>> X, y = make_blobs(n_samples=10, centers=3, n_features=2,
... random_state=0)
>>> print(X.shape)
(10, 2)
>>> y
array([0, 0, 1, 0, 2, 2, 2, 1, 1, 0])
See also
--------
make_classification: a more intricate variant
"""
generator = check_random_state(random_state)
if isinstance(centers, numbers.Integral):
centers = generator.uniform(center_box[0], center_box[1],
size=(centers, n_features))
else:
centers = check_array(centers)
n_features = centers.shape[1]
if isinstance(cluster_std, numbers.Real):
cluster_std = np.ones(len(centers)) * cluster_std
X = []
y = []
n_centers = centers.shape[0]
n_samples_per_center = [int(n_samples // n_centers)] * n_centers
for i in range(n_samples % n_centers):
n_samples_per_center[i] += 1
for i, (n, std) in enumerate(zip(n_samples_per_center, cluster_std)):
X.append(centers[i] + generator.normal(scale=std,
size=(n, n_features)))
y += [i] * n
X = np.concatenate(X)
y = np.array(y)
if shuffle:
indices = np.arange(n_samples)
generator.shuffle(indices)
X = X[indices]
y = y[indices]
return X, y
def make_friedman1(n_samples=100, n_features=10, noise=0.0, random_state=None):
"""Generate the "Friedman \#1" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are independent features uniformly distributed on the interval
[0, 1]. The output `y` is created according to the formula::
y(X) = 10 * sin(pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * N(0, 1).
Out of the `n_features` features, only 5 are actually used to compute
`y`. The remaining features are independent of `y`.
The number of features has to be >= 5.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features. Should be at least 5.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
if n_features < 5:
raise ValueError("n_features must be at least five.")
generator = check_random_state(random_state)
X = generator.rand(n_samples, n_features)
y = 10 * np.sin(np.pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * generator.randn(n_samples)
return X, y
def make_friedman2(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#2" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = (X[:, 0] ** 2 + (X[:, 1] * X[:, 2] \
- 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 + noise * N(0, 1).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = (X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 \
+ noise * generator.randn(n_samples)
return X, y
def make_friedman3(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#3" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) \
/ X[:, 0]) + noise * N(0, 1).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = np.arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) / X[:, 0]) \
+ noise * generator.randn(n_samples)
return X, y
def make_low_rank_matrix(n_samples=100, n_features=100, effective_rank=10,
tail_strength=0.5, random_state=None):
"""Generate a mostly low rank matrix with bell-shaped singular values
Most of the variance can be explained by a bell-shaped curve of width
effective_rank: the low rank part of the singular values profile is::
(1 - tail_strength) * exp(-1.0 * (i / effective_rank) ** 2)
The remaining singular values' tail is fat, decreasing as::
tail_strength * exp(-0.1 * i / effective_rank).
The low rank part of the profile can be considered the structured
signal part of the data while the tail can be considered the noisy
part of the data that cannot be summarized by a low number of linear
components (singular vectors).
This kind of singular profiles is often seen in practice, for instance:
- gray level pictures of faces
- TF-IDF vectors of text documents crawled from the web
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
effective_rank : int, optional (default=10)
The approximate number of singular vectors required to explain most of
the data by linear combinations.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The matrix.
"""
generator = check_random_state(random_state)
n = min(n_samples, n_features)
# Random (ortho normal) vectors
u, _ = linalg.qr(generator.randn(n_samples, n), mode='economic')
v, _ = linalg.qr(generator.randn(n_features, n), mode='economic')
# Index of the singular values
singular_ind = np.arange(n, dtype=np.float64)
# Build the singular profile by assembling signal and noise components
low_rank = ((1 - tail_strength) *
np.exp(-1.0 * (singular_ind / effective_rank) ** 2))
tail = tail_strength * np.exp(-0.1 * singular_ind / effective_rank)
s = np.identity(n) * (low_rank + tail)
return np.dot(np.dot(u, s), v.T)
def make_sparse_coded_signal(n_samples, n_components, n_features,
n_nonzero_coefs, random_state=None):
"""Generate a signal as a sparse combination of dictionary elements.
Returns a matrix Y = DX, such as D is (n_features, n_components),
X is (n_components, n_samples) and each column of X has exactly
n_nonzero_coefs non-zero elements.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int
number of samples to generate
n_components: int,
number of components in the dictionary
n_features : int
number of features of the dataset to generate
n_nonzero_coefs : int
number of active (non-zero) coefficients in each sample
random_state: int or RandomState instance, optional (default=None)
seed used by the pseudo random number generator
Returns
-------
data: array of shape [n_features, n_samples]
The encoded signal (Y).
dictionary: array of shape [n_features, n_components]
The dictionary with normalized components (D).
code: array of shape [n_components, n_samples]
The sparse code such that each column of this matrix has exactly
n_nonzero_coefs non-zero items (X).
"""
generator = check_random_state(random_state)
# generate dictionary
D = generator.randn(n_features, n_components)
D /= np.sqrt(np.sum((D ** 2), axis=0))
# generate code
X = np.zeros((n_components, n_samples))
for i in range(n_samples):
idx = np.arange(n_components)
generator.shuffle(idx)
idx = idx[:n_nonzero_coefs]
X[idx, i] = generator.randn(n_nonzero_coefs)
# encode signal
Y = np.dot(D, X)
return map(np.squeeze, (Y, D, X))
def make_sparse_uncorrelated(n_samples=100, n_features=10, random_state=None):
"""Generate a random regression problem with sparse uncorrelated design
This dataset is described in Celeux et al [1]. as::
X ~ N(0, 1)
y(X) = X[:, 0] + 2 * X[:, 1] - 2 * X[:, 2] - 1.5 * X[:, 3]
Only the first 4 features are informative. The remaining features are
useless.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] G. Celeux, M. El Anbari, J.-M. Marin, C. P. Robert,
"Regularization in regression: comparing Bayesian and frequentist
methods in a poorly informative situation", 2009.
"""
generator = check_random_state(random_state)
X = generator.normal(loc=0, scale=1, size=(n_samples, n_features))
y = generator.normal(loc=(X[:, 0] +
2 * X[:, 1] -
2 * X[:, 2] -
1.5 * X[:, 3]), scale=np.ones(n_samples))
return X, y
def make_spd_matrix(n_dim, random_state=None):
"""Generate a random symmetric, positive-definite matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_dim : int
The matrix dimension.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_dim, n_dim]
The random symmetric, positive-definite matrix.
See also
--------
make_sparse_spd_matrix
"""
generator = check_random_state(random_state)
A = generator.rand(n_dim, n_dim)
U, s, V = linalg.svd(np.dot(A.T, A))
X = np.dot(np.dot(U, 1.0 + np.diag(generator.rand(n_dim))), V)
return X
def make_sparse_spd_matrix(dim=1, alpha=0.95, norm_diag=False,
smallest_coef=.1, largest_coef=.9,
random_state=None):
"""Generate a sparse symmetric definite positive matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
dim: integer, optional (default=1)
The size of the random matrix to generate.
alpha: float between 0 and 1, optional (default=0.95)
The probability that a coefficient is non zero (see notes).
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
largest_coef : float between 0 and 1, optional (default=0.9)
The value of the largest coefficient.
smallest_coef : float between 0 and 1, optional (default=0.1)
The value of the smallest coefficient.
norm_diag : boolean, optional (default=False)
Whether to normalize the output matrix to make the leading diagonal
elements all 1
Returns
-------
prec : sparse matrix of shape (dim, dim)
The generated matrix.
Notes
-----
The sparsity is actually imposed on the cholesky factor of the matrix.
Thus alpha does not translate directly into the filling fraction of
the matrix itself.
See also
--------
make_spd_matrix
"""
random_state = check_random_state(random_state)
chol = -np.eye(dim)
aux = random_state.rand(dim, dim)
aux[aux < alpha] = 0
aux[aux > alpha] = (smallest_coef
+ (largest_coef - smallest_coef)
* random_state.rand(np.sum(aux > alpha)))
aux = np.tril(aux, k=-1)
# Permute the lines: we don't want to have asymmetries in the final
# SPD matrix
permutation = random_state.permutation(dim)
aux = aux[permutation].T[permutation]
chol += aux
prec = np.dot(chol.T, chol)
if norm_diag:
# Form the diagonal vector into a row matrix
d = np.diag(prec).reshape(1, prec.shape[0])
d = 1. / np.sqrt(d)
prec *= d
prec *= d.T
return prec
def make_swiss_roll(n_samples=100, noise=0.0, random_state=None):
"""Generate a swiss roll dataset.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
Notes
-----
The algorithm is from Marsland [1].
References
----------
.. [1] S. Marsland, "Machine Learning: An Algorithmic Perspective",
Chapter 10, 2009.
http://www-ist.massey.ac.nz/smarsland/Code/10/lle.py
"""
generator = check_random_state(random_state)
t = 1.5 * np.pi * (1 + 2 * generator.rand(1, n_samples))
x = t * np.cos(t)
y = 21 * generator.rand(1, n_samples)
z = t * np.sin(t)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_s_curve(n_samples=100, noise=0.0, random_state=None):
"""Generate an S curve dataset.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
"""
generator = check_random_state(random_state)
t = 3 * np.pi * (generator.rand(1, n_samples) - 0.5)
x = np.sin(t)
y = 2.0 * generator.rand(1, n_samples)
z = np.sign(t) * (np.cos(t) - 1)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_gaussian_quantiles(mean=None, cov=1., n_samples=100,
n_features=2, n_classes=3,
shuffle=True, random_state=None):
"""Generate isotropic Gaussian and label samples by quantile
This classification dataset is constructed by taking a multi-dimensional
standard normal distribution and defining classes separated by nested
concentric multi-dimensional spheres such that roughly equal numbers of
samples are in each class (quantiles of the :math:`\chi^2` distribution).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
mean : array of shape [n_features], optional (default=None)
The mean of the multi-dimensional normal distribution.
If None then use the origin (0, 0, ...).
cov : float, optional (default=1.)
The covariance matrix will be this value times the unit matrix. This
dataset only produces symmetric normal distributions.
n_samples : int, optional (default=100)
The total number of points equally divided among classes.
n_features : int, optional (default=2)
The number of features for each sample.
n_classes : int, optional (default=3)
The number of classes
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for quantile membership of each sample.
Notes
-----
The dataset is from Zhu et al [1].
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
if n_samples < n_classes:
raise ValueError("n_samples must be at least n_classes")
generator = check_random_state(random_state)
if mean is None:
mean = np.zeros(n_features)
else:
mean = np.array(mean)
# Build multivariate normal distribution
X = generator.multivariate_normal(mean, cov * np.identity(n_features),
(n_samples,))
# Sort by distance from origin
idx = np.argsort(np.sum((X - mean[np.newaxis, :]) ** 2, axis=1))
X = X[idx, :]
# Label by quantile
step = n_samples // n_classes
y = np.hstack([np.repeat(np.arange(n_classes), step),
np.repeat(n_classes - 1, n_samples - step * n_classes)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
return X, y
def _shuffle(data, random_state=None):
generator = check_random_state(random_state)
n_rows, n_cols = data.shape
row_idx = generator.permutation(n_rows)
col_idx = generator.permutation(n_cols)
result = data[row_idx][:, col_idx]
return result, row_idx, col_idx
def make_biclusters(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with constant block diagonal structure for
biclustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer
The number of biclusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Dhillon, I. S. (2001, August). Co-clustering documents and
words using bipartite spectral graph partitioning. In Proceedings
of the seventh ACM SIGKDD international conference on Knowledge
discovery and data mining (pp. 269-274). ACM.
See also
--------
make_checkerboard
"""
generator = check_random_state(random_state)
n_rows, n_cols = shape
consts = generator.uniform(minval, maxval, n_clusters)
# row and column clusters of approximately equal sizes
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_clusters,
n_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_clusters,
n_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_clusters):
selector = np.outer(row_labels == i, col_labels == i)
result[selector] += consts[i]
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == c for c in range(n_clusters))
cols = np.vstack(col_labels == c for c in range(n_clusters))
return result, rows, cols
def make_checkerboard(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with block checkerboard structure for
biclustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer or iterable (n_row_clusters, n_column_clusters)
The number of row and column clusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Kluger, Y., Basri, R., Chang, J. T., & Gerstein, M. (2003).
Spectral biclustering of microarray data: coclustering genes
and conditions. Genome research, 13(4), 703-716.
See also
--------
make_biclusters
"""
generator = check_random_state(random_state)
if hasattr(n_clusters, "__len__"):
n_row_clusters, n_col_clusters = n_clusters
else:
n_row_clusters = n_col_clusters = n_clusters
# row and column clusters of approximately equal sizes
n_rows, n_cols = shape
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_row_clusters,
n_row_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_col_clusters,
n_col_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_row_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_col_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_row_clusters):
for j in range(n_col_clusters):
selector = np.outer(row_labels == i, col_labels == j)
result[selector] += generator.uniform(minval, maxval)
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters))
cols = np.vstack(col_labels == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters))
return result, rows, cols
| mit |
zofuthan/edx-platform | common/lib/xmodule/xmodule/video_module/video_handlers.py | 108 | 14344 | """
Handlers for video module.
StudentViewHandlers are handlers for video module instance.
StudioViewHandlers are handlers for video descriptor instance.
"""
import json
import logging
from datetime import datetime
from webob import Response
from xblock.core import XBlock
from xmodule.exceptions import NotFoundError
from xmodule.fields import RelativeTime
from opaque_keys.edx.locator import CourseLocator
from .transcripts_utils import (
get_or_create_sjson,
TranscriptException,
TranscriptsGenerationException,
generate_sjson_for_all_speeds,
youtube_speed_dict,
Transcript,
save_to_store,
subs_filename
)
log = logging.getLogger(__name__)
# Disable no-member warning:
# pylint: disable=no-member
class VideoStudentViewHandlers(object):
"""
Handlers for video module instance.
"""
def handle_ajax(self, dispatch, data):
"""
Update values of xfields, that were changed by student.
"""
accepted_keys = [
'speed', 'saved_video_position', 'transcript_language',
'transcript_download_format', 'youtube_is_available',
'bumper_last_view_date', 'bumper_do_not_show_again'
]
conversions = {
'speed': json.loads,
'saved_video_position': RelativeTime.isotime_to_timedelta,
'youtube_is_available': json.loads,
}
if dispatch == 'save_user_state':
for key in data:
if key in accepted_keys:
if key in conversions:
value = conversions[key](data[key])
else:
value = data[key]
if key == 'bumper_last_view_date':
value = datetime.utcnow()
setattr(self, key, value)
if key == 'speed':
self.global_speed = self.speed
return json.dumps({'success': True})
log.debug(u"GET {0}".format(data))
log.debug(u"DISPATCH {0}".format(dispatch))
raise NotFoundError('Unexpected dispatch type')
def translation(self, youtube_id, transcripts):
"""
This is called to get transcript file for specific language.
youtube_id: str: must be one of youtube_ids or None if HTML video
transcripts (dict): A dict with all transcripts and a sub.
Logic flow:
If youtube_id doesn't exist, we have a video in HTML5 mode. Otherwise,
video in Youtube or Flash modes.
if youtube:
If english -> give back youtube_id subtitles:
Return what we have in contentstore for given youtube_id.
If non-english:
a) extract youtube_id from srt file name.
b) try to find sjson by youtube_id and return if successful.
c) generate sjson from srt for all youtube speeds.
if non-youtube:
If english -> give back `sub` subtitles:
Return what we have in contentstore for given subs_if that is stored in self.sub.
If non-english:
a) try to find previously generated sjson.
b) otherwise generate sjson from srt and return it.
Filenames naming:
en: subs_videoid.srt.sjson
non_en: uk_subs_videoid.srt.sjson
Raises:
NotFoundError if for 'en' subtitles no asset is uploaded.
NotFoundError if youtube_id does not exist / invalid youtube_id
"""
sub, other_lang = transcripts["sub"], transcripts["transcripts"]
if youtube_id:
# Youtube case:
if self.transcript_language == 'en':
return Transcript.asset(self.location, youtube_id).data
youtube_ids = youtube_speed_dict(self)
if youtube_id not in youtube_ids:
log.info("Youtube_id %s does not exist", youtube_id)
raise NotFoundError
try:
sjson_transcript = Transcript.asset(self.location, youtube_id, self.transcript_language).data
except NotFoundError:
log.info("Can't find content in storage for %s transcript: generating.", youtube_id)
generate_sjson_for_all_speeds(
self,
other_lang[self.transcript_language],
{speed: youtube_id for youtube_id, speed in youtube_ids.iteritems()},
self.transcript_language
)
sjson_transcript = Transcript.asset(self.location, youtube_id, self.transcript_language).data
return sjson_transcript
else:
# HTML5 case
if self.transcript_language == 'en':
if '.srt' not in sub: # not bumper case
return Transcript.asset(self.location, sub).data
try:
return get_or_create_sjson(self, {'en': sub})
except TranscriptException:
pass # to raise NotFoundError and try to get data in get_static_transcript
elif other_lang:
return get_or_create_sjson(self, other_lang)
raise NotFoundError
def get_static_transcript(self, request, transcripts):
"""
Courses that are imported with the --nostatic flag do not show
transcripts/captions properly even if those captions are stored inside
their static folder. This adds a last resort method of redirecting to
the static asset path of the course if the transcript can't be found
inside the contentstore and the course has the static_asset_path field
set.
transcripts (dict): A dict with all transcripts and a sub.
"""
response = Response(status=404)
# Only do redirect for English
if not self.transcript_language == 'en':
return response
# If this video lives in library, the code below is not relevant and will error.
if not isinstance(self.course_id, CourseLocator):
return response
video_id = request.GET.get('videoId', None)
if video_id:
transcript_name = video_id
else:
transcript_name = transcripts["sub"]
if transcript_name:
# Get the asset path for course
asset_path = None
course = self.descriptor.runtime.modulestore.get_course(self.course_id)
if course.static_asset_path:
asset_path = course.static_asset_path
else:
# It seems static_asset_path is not set in any XMLModuleStore courses.
asset_path = getattr(course, 'data_dir', '')
if asset_path:
response = Response(
status=307,
location='/static/{0}/{1}'.format(
asset_path,
subs_filename(transcript_name, self.transcript_language)
)
)
return response
@XBlock.handler
def transcript(self, request, dispatch):
"""
Entry point for transcript handlers for student_view.
Request GET contains:
(optional) `videoId` for `translation` dispatch.
`is_bumper=1` flag for bumper case.
Dispatches, (HTTP GET):
/translation/[language_id]
/download
/available_translations/
Explanations:
`download`: returns SRT or TXT file.
`translation`: depends on HTTP methods:
Provide translation for requested language, SJSON format is sent back on success,
Proper language_id should be in url.
`available_translations`:
Returns list of languages, for which transcript files exist.
For 'en' check if SJSON exists. For non-`en` check if SRT file exists.
"""
is_bumper = request.GET.get('is_bumper', False)
transcripts = self.get_transcripts_info(is_bumper)
if dispatch.startswith('translation'):
language = dispatch.replace('translation', '').strip('/')
if not language:
log.info("Invalid /translation request: no language.")
return Response(status=400)
if language not in ['en'] + transcripts["transcripts"].keys():
log.info("Video: transcript facilities are not available for given language.")
return Response(status=404)
if language != self.transcript_language:
self.transcript_language = language
try:
transcript = self.translation(request.GET.get('videoId', None), transcripts)
except (TypeError, NotFoundError) as ex:
log.info(ex.message)
# Try to return static URL redirection as last resort
# if no translation is required
return self.get_static_transcript(request, transcripts)
except (
TranscriptException,
UnicodeDecodeError,
TranscriptsGenerationException
) as ex:
log.info(ex.message)
response = Response(status=404)
else:
response = Response(transcript, headerlist=[('Content-Language', language)])
response.content_type = Transcript.mime_types['sjson']
elif dispatch == 'download':
lang = request.GET.get('lang', None)
try:
transcript_content, transcript_filename, transcript_mime_type = self.get_transcript(
transcripts, transcript_format=self.transcript_download_format, lang=lang
)
except (NotFoundError, ValueError, KeyError, UnicodeDecodeError):
log.debug("Video@download exception")
return Response(status=404)
else:
response = Response(
transcript_content,
headerlist=[
('Content-Disposition', 'attachment; filename="{}"'.format(transcript_filename.encode('utf8'))),
('Content-Language', self.transcript_language),
]
)
response.content_type = transcript_mime_type
elif dispatch.startswith('available_translations'):
available_translations = self.available_translations(transcripts)
if available_translations:
response = Response(json.dumps(available_translations))
response.content_type = 'application/json'
else:
response = Response(status=404)
else: # unknown dispatch
log.debug("Dispatch is not allowed")
response = Response(status=404)
return response
class VideoStudioViewHandlers(object):
"""
Handlers for Studio view.
"""
@XBlock.handler
def studio_transcript(self, request, dispatch):
"""
Entry point for Studio transcript handlers.
Dispatches:
/translation/[language_id] - language_id sould be in url.
`translation` dispatch support following HTTP methods:
`POST`:
Upload srt file. Check possibility of generation of proper sjson files.
For now, it works only for self.transcripts, not for `en`.
Do not update self.transcripts, as fields are updated on save in Studio.
`GET:
Return filename from storage. SRT format is sent back on success. Filename should be in GET dict.
We raise all exceptions right in Studio:
NotFoundError:
Video or asset was deleted from module/contentstore, but request came later.
Seems impossible to be raised. module_render.py catches NotFoundErrors from here.
/translation POST:
TypeError:
Unjsonable filename or content.
TranscriptsGenerationException, TranscriptException:
no SRT extension or not parse-able by PySRT
UnicodeDecodeError: non-UTF8 uploaded file content encoding.
"""
_ = self.runtime.service(self, "i18n").ugettext
if dispatch.startswith('translation'):
language = dispatch.replace('translation', '').strip('/')
if not language:
log.info("Invalid /translation request: no language.")
return Response(status=400)
if request.method == 'POST':
subtitles = request.POST['file']
try:
file_data = subtitles.file.read()
unicode(file_data, "utf-8", "strict")
except UnicodeDecodeError:
log.info("Invalid encoding type for transcript file: {}".format(subtitles.filename))
msg = _("Invalid encoding type, transcripts should be UTF-8 encoded.")
return Response(msg, status=400)
save_to_store(file_data, unicode(subtitles.filename), 'application/x-subrip', self.location)
generate_sjson_for_all_speeds(self, unicode(subtitles.filename), {}, language)
response = {'filename': unicode(subtitles.filename), 'status': 'Success'}
return Response(json.dumps(response), status=201)
elif request.method == 'GET':
filename = request.GET.get('filename')
if not filename:
log.info("Invalid /translation request: no filename in request.GET")
return Response(status=400)
content = Transcript.get_asset(self.location, filename).data
response = Response(content, headerlist=[
('Content-Disposition', 'attachment; filename="{}"'.format(filename.encode('utf8'))),
('Content-Language', language),
])
response.content_type = Transcript.mime_types['srt']
else: # unknown dispatch
log.debug("Dispatch is not allowed")
response = Response(status=404)
return response
| agpl-3.0 |
Antiun/c2c-rd-addons | c2c_stock/__openerp__.py | 4 | 1536 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010-2012 ChriCar Beteiligungs- und Beratungs- GmbH (<http://www.camptocamp.at>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{ 'sequence': 500,
'name': 'Stock Extensions',
'version': '0.7',
'category': 'Warehouse Management',
'description': """
Adds some info fields to stock location
makes lot ref searchable
""",
'author': 'ChriCar Beteiligungs- und Beratungs- GmbH',
'depends': [ 'stock' ],
'data': ['stock_view.xml',
],
#'data': ['product_view.xml'],
'demo_xml': [],
'installable': False,
'active': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
fenglu-g/incubator-airflow | airflow/contrib/example_dags/example_twitter_dag.py | 20 | 7310 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# --------------------------------------------------------------------------------
# Written By: Ekhtiar Syed
# Last Update: 8th April 2016
# Caveat: This Dag will not run because of missing scripts.
# The purpose of this is to give you a sample of a real world example DAG!
# --------------------------------------------------------------------------------
# --------------------------------------------------------------------------------
# Load The Dependencies
# --------------------------------------------------------------------------------
import airflow
from airflow import DAG
from airflow.operators.bash_operator import BashOperator
from airflow.operators.python_operator import PythonOperator
from airflow.operators.hive_operator import HiveOperator
from datetime import date, timedelta
# --------------------------------------------------------------------------------
# Create a few placeholder scripts. In practice these would be different python
# script files, which are imported in this section with absolute or relative imports
# --------------------------------------------------------------------------------
def fetchtweets():
return None
def cleantweets():
return None
def analyzetweets():
return None
def transfertodb():
return None
# --------------------------------------------------------------------------------
# set default arguments
# --------------------------------------------------------------------------------
default_args = {
'owner': 'Ekhtiar',
'depends_on_past': False,
'start_date': airflow.utils.dates.days_ago(5),
'email': ['airflow@example.com'],
'email_on_failure': False,
'email_on_retry': False,
'retries': 1,
'retry_delay': timedelta(minutes=5),
# 'queue': 'bash_queue',
# 'pool': 'backfill',
# 'priority_weight': 10,
# 'end_date': datetime(2016, 1, 1),
}
dag = DAG(
'example_twitter_dag', default_args=default_args,
schedule_interval="@daily")
# --------------------------------------------------------------------------------
# This task should call Twitter API and retrieve tweets from yesterday from and to
# for the four twitter users (Twitter_A,..,Twitter_D) There should be eight csv
# output files generated by this task and naming convention
# is direction(from or to)_twitterHandle_date.csv
# --------------------------------------------------------------------------------
fetch_tweets = PythonOperator(
task_id='fetch_tweets',
python_callable=fetchtweets,
dag=dag)
# --------------------------------------------------------------------------------
# Clean the eight files. In this step you can get rid of or cherry pick columns
# and different parts of the text
# --------------------------------------------------------------------------------
clean_tweets = PythonOperator(
task_id='clean_tweets',
python_callable=cleantweets,
dag=dag)
clean_tweets.set_upstream(fetch_tweets)
# --------------------------------------------------------------------------------
# In this section you can use a script to analyze the twitter data. Could simply
# be a sentiment analysis through algorithms like bag of words or something more
# complicated. You can also take a look at Web Services to do such tasks
# --------------------------------------------------------------------------------
analyze_tweets = PythonOperator(
task_id='analyze_tweets',
python_callable=analyzetweets,
dag=dag)
analyze_tweets.set_upstream(clean_tweets)
# --------------------------------------------------------------------------------
# Although this is the last task, we need to declare it before the next tasks as we
# will use set_downstream This task will extract summary from Hive data and store
# it to MySQL
# --------------------------------------------------------------------------------
hive_to_mysql = PythonOperator(
task_id='hive_to_mysql',
python_callable=transfertodb,
dag=dag)
# --------------------------------------------------------------------------------
# The following tasks are generated using for loop. The first task puts the eight
# csv files to HDFS. The second task loads these files from HDFS to respected Hive
# tables. These two for loops could be combined into one loop. However, in most cases,
# you will be running different analysis on your incoming incoming and outgoing tweets,
# and hence they are kept separated in this example.
# --------------------------------------------------------------------------------
from_channels = ['fromTwitter_A', 'fromTwitter_B', 'fromTwitter_C', 'fromTwitter_D']
to_channels = ['toTwitter_A', 'toTwitter_B', 'toTwitter_C', 'toTwitter_D']
yesterday = date.today() - timedelta(days=1)
dt = yesterday.strftime("%Y-%m-%d")
# define where you want to store the tweets csv file in your local directory
local_dir = "/tmp/"
# define the location where you want to store in HDFS
hdfs_dir = " /tmp/"
for channel in to_channels:
file_name = "to_" + channel + "_" + yesterday.strftime("%Y-%m-%d") + ".csv"
load_to_hdfs = BashOperator(
task_id="put_" + channel + "_to_hdfs",
bash_command="HADOOP_USER_NAME=hdfs hadoop fs -put -f " +
local_dir + file_name +
hdfs_dir + channel + "/",
dag=dag)
load_to_hdfs.set_upstream(analyze_tweets)
load_to_hive = HiveOperator(
task_id="load_" + channel + "_to_hive",
hql="LOAD DATA INPATH '" +
hdfs_dir + channel + "/" + file_name + "' "
"INTO TABLE " + channel + " "
"PARTITION(dt='" + dt + "')",
dag=dag)
load_to_hive.set_upstream(load_to_hdfs)
load_to_hive.set_downstream(hive_to_mysql)
for channel in from_channels:
file_name = "from_" + channel + "_" + yesterday.strftime("%Y-%m-%d") + ".csv"
load_to_hdfs = BashOperator(
task_id="put_" + channel + "_to_hdfs",
bash_command="HADOOP_USER_NAME=hdfs hadoop fs -put -f " +
local_dir + file_name +
hdfs_dir + channel + "/",
dag=dag)
load_to_hdfs.set_upstream(analyze_tweets)
load_to_hive = HiveOperator(
task_id="load_" + channel + "_to_hive",
hql="LOAD DATA INPATH '" +
hdfs_dir + channel + "/" + file_name + "' "
"INTO TABLE " + channel + " "
"PARTITION(dt='" + dt + "')",
dag=dag)
load_to_hive.set_upstream(load_to_hdfs)
load_to_hive.set_downstream(hive_to_mysql)
| apache-2.0 |
Geeglee/scrapy | tests/test_link.py | 68 | 2014 | import unittest
import warnings
from scrapy.link import Link
class LinkTest(unittest.TestCase):
def _assert_same_links(self, link1, link2):
self.assertEqual(link1, link2)
self.assertEqual(hash(link1), hash(link2))
def _assert_different_links(self, link1, link2):
self.assertNotEqual(link1, link2)
self.assertNotEqual(hash(link1), hash(link2))
def test_eq_and_hash(self):
l1 = Link(b"http://www.example.com")
l2 = Link(b"http://www.example.com/other")
l3 = Link(b"http://www.example.com")
self._assert_same_links(l1, l1)
self._assert_different_links(l1, l2)
self._assert_same_links(l1, l3)
l4 = Link(b"http://www.example.com", text="test")
l5 = Link(b"http://www.example.com", text="test2")
l6 = Link(b"http://www.example.com", text="test")
self._assert_same_links(l4, l4)
self._assert_different_links(l4, l5)
self._assert_same_links(l4, l6)
l7 = Link(b"http://www.example.com", text="test", fragment='something', nofollow=False)
l8 = Link(b"http://www.example.com", text="test", fragment='something', nofollow=False)
l9 = Link(b"http://www.example.com", text="test", fragment='something', nofollow=True)
l10 = Link(b"http://www.example.com", text="test", fragment='other', nofollow=False)
self._assert_same_links(l7, l8)
self._assert_different_links(l7, l9)
self._assert_different_links(l7, l10)
def test_repr(self):
l1 = Link(b"http://www.example.com", text="test", fragment='something', nofollow=True)
l2 = eval(repr(l1))
self._assert_same_links(l1, l2)
def test_unicode_url(self):
with warnings.catch_warnings(record=True) as w:
link = Link(u"http://www.example.com/\xa3")
self.assertIsInstance(link.url, bytes)
self.assertEqual(link.url, b'http://www.example.com/\xc2\xa3')
assert len(w) == 1, "warning not issued"
| bsd-3-clause |
CalebBell/thermo | tests/test_eos_mix_methods.py | 1 | 14103 | # -*- coding: utf-8 -*-
'''Chemical Engineering Design Library (ChEDL). Utilities for process modeling.
Copyright (C) 2020, Caleb Bell <Caleb.Andrew.Bell@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.'''
import pytest
from thermo.eos import *
from thermo.eos_mix import *
from thermo.eos_alpha_functions import *
from thermo.eos_mix_methods import *
from fluids.constants import R
from fluids.numerics import jacobian, hessian, assert_close, assert_close1d, assert_close2d, assert_close3d, derivative
from math import log, exp, sqrt
import numpy as np
from thermo.eos_mix_methods import a_alpha_quadratic_terms, a_alpha_and_derivatives_quadratic_terms
def test_a_alpha_quadratic_terms():
# useful test case for speed.
expect = [1.018836674553355, 2.191757517626393, 2.563258602852081, 1.5598326706034975, 2.70593281974093, 3.7034025281989855, 4.539954054126808, 4.699007689627005, 5.544738410220301, 5.727506758376061, 6.747016798786708, 7.772541929210375, 8.824329534067225, 9.881609693824497, 10.818879356535186, 11.967885231615968, 13.064056888046336, 14.301191101517293, 15.549382410454996, 16.514506861687853, 17.70128879207487, 18.588871716258463, 19.587383418298344, 21.163882746233718, 22.71677093839829, 23.693174106957997, 24.84638402761533, 26.32710900857889, 27.628174407150638, 27.35173402605858, 30.078139085433158, 29.6938067153124, 30.975794852828585, 31.612211604350215, 37.346889330614765, 5.8657490543188056, 6.918460471177853, 7.885934394505012, 7.987258405203353, 9.096924819311049, 5.4186445304744675, 6.364741674932172, 6.247071329729653, 7.191150355969193]
a_alphas = [0.0865274961332042, 0.4004331347550168, 0.5476837363175464, 0.20281544374537322, 0.610350096562494, 1.1432648066725495, 1.7180979223407897, 1.8405910620140276, 2.56275518543631, 2.734489234665559, 3.794622523842678, 5.035830969924731, 6.490952532386477, 8.139549888291587, 9.756848311930623, 11.939326501216337, 14.226600071224336, 17.048627321670082, 20.154465549725934, 22.73401890914733, 26.118893369963804, 28.803884311242584, 31.98142763556359, 37.33667941647009, 43.0168093920849, 46.79414203338489, 51.460189856771855, 57.77651478272769, 63.62816155455672, 62.36123776101297, 75.41312259487229, 73.4982082371554, 79.98156837889205, 83.30187138391334, 116.2663039720862, 2.8680845884126343, 3.9899175858237754, 5.183836756317098, 5.317903685129213, 6.898175009281366, 2.447520402314526, 3.3768094978613767, 3.2531038444204294, 4.3106398143326805]
a_alpha_roots = [i**0.5 for i in a_alphas]
kijs = np.zeros((44, 44)).tolist()
zs = [9.11975115499676e-05, 9.986813065240533e-05, 0.0010137795304828892, 0.019875879000370657, 0.013528874875432457, 0.021392773691700402, 0.00845450438914824, 0.02500218071904368, 0.016114189201071587, 0.027825798446635016, 0.05583179467176313, 0.0703116540769539, 0.07830577180555454, 0.07236459223729574, 0.0774523322851419, 0.057755091407705975, 0.04030134965162674, 0.03967043780553758, 0.03514481759005302, 0.03175471055284055, 0.025411123554079325, 0.029291866298718154, 0.012084986551713202, 0.01641114551124426, 0.01572454598093482, 0.012145363820829673, 0.01103585282423499, 0.010654818322680342, 0.008777712911254239, 0.008732073853067238, 0.007445155260036595, 0.006402875549212365, 0.0052908087849774296, 0.0048199150683177075, 0.015943943854195963, 0.004452253754752775, 0.01711981267072777, 0.0024032720444511282, 0.032178399403544646, 0.0018219517069058137, 0.003403378548794345, 0.01127516775495176, 0.015133143423489698, 0.029483213283483682]
a_alpha, a_alpha_j_rows = a_alpha_quadratic_terms(a_alphas, a_alpha_roots, 299.0, zs, kijs)
assert_close1d(expect, a_alpha_j_rows, rtol=1e-14)
assert_close(a_alpha, 11.996512274167202, rtol=1e-14)
# Small case but with constant kijs
kijs = [[0,.083],[0.083,0]]
zs = [0.1164203, 0.8835797]
a_alphas = [0.2491099357671155, 0.6486495863528039]
a_alpha_roots = [i**0.5 for i in a_alphas]
a_alpha, a_alpha_j_rows = a_alpha_quadratic_terms(a_alphas, a_alpha_roots, 299.0, zs, kijs)
assert_close1d([0.35469988173420947, 0.6160475723779467], a_alpha_j_rows, rtol=1e-14)
assert_close(a_alpha, 0.5856213958288955, rtol=1e-14)
def test_a_alpha_and_derivatives_quadratic_terms():
expect = [1.018836674553355, 2.191757517626393, 2.563258602852081, 1.5598326706034975, 2.70593281974093, 3.7034025281989855, 4.539954054126808, 4.699007689627005, 5.544738410220301, 5.727506758376061, 6.747016798786708, 7.772541929210375, 8.824329534067225, 9.881609693824497, 10.818879356535186, 11.967885231615968, 13.064056888046336, 14.301191101517293, 15.549382410454996, 16.514506861687853, 17.70128879207487, 18.588871716258463, 19.587383418298344, 21.163882746233718, 22.71677093839829, 23.693174106957997, 24.84638402761533, 26.32710900857889, 27.628174407150638, 27.35173402605858, 30.078139085433158, 29.6938067153124, 30.975794852828585, 31.612211604350215, 37.346889330614765, 5.8657490543188056, 6.918460471177853, 7.885934394505012, 7.987258405203353, 9.096924819311049, 5.4186445304744675, 6.364741674932172, 6.247071329729653, 7.191150355969193]
a_alphas = [0.0865274961332042, 0.4004331347550168, 0.5476837363175464, 0.20281544374537322, 0.610350096562494, 1.1432648066725495, 1.7180979223407897, 1.8405910620140276, 2.56275518543631, 2.734489234665559, 3.794622523842678, 5.035830969924731, 6.490952532386477, 8.139549888291587, 9.756848311930623, 11.939326501216337, 14.226600071224336, 17.048627321670082, 20.154465549725934, 22.73401890914733, 26.118893369963804, 28.803884311242584, 31.98142763556359, 37.33667941647009, 43.0168093920849, 46.79414203338489, 51.460189856771855, 57.77651478272769, 63.62816155455672, 62.36123776101297, 75.41312259487229, 73.4982082371554, 79.98156837889205, 83.30187138391334, 116.2663039720862, 2.8680845884126343, 3.9899175858237754, 5.183836756317098, 5.317903685129213, 6.898175009281366, 2.447520402314526, 3.3768094978613767, 3.2531038444204294, 4.3106398143326805]
a_alpha_roots = [i**0.5 for i in a_alphas]
a_alpha_i_root_invs = [1.0/i for i in a_alphas]
da_alpha_dTs = [-0.00025377859043732546, -0.000934247068461214, -0.000816789460173304, -0.0003641243787874678, -0.0010503058450047169, -0.0019521746900983052, -0.0028718927680108602, -0.0030862530923667516, -0.0043109072968568855, -0.004719357153237089, -0.006631042744989444, -0.008954841106859145, -0.01175296124567969, -0.015014798912202318, -0.018394836388991746, -0.02261696126764091, -0.02691416109598246, -0.03306276569415665, -0.03972067690500332, -0.04434234645435802, -0.05166183446540069, -0.05661884581837739, -0.06384511544740731, -0.07534567027524366, -0.08688546863889157, -0.09454104531596857, -0.1047355386575357, -0.12085503194237243, -0.13251190497391216, -0.13109044690165458, -0.1584965979082082, -0.15738400415699616, -0.1706975126112625, -0.17869250096210298, -0.24786999267933035, -0.0040612961454164305, -0.005861031978967661, -0.007870669654243058, -0.00806706054424201, -0.011089166549563573, -0.0035751401389282128, -0.005057878813908274, -0.004795418755334288, -0.0063951285412122945]
d2a_alpha_dT2s = [7.951210065548482e-07, 2.6469203076280187e-06, 1.970376231974855e-06, 9.337390218103036e-07, 2.654206140072756e-06, 4.920336341685227e-06, 7.186749294919237e-06, 7.73122782691325e-06, 1.0810615491775454e-05, 1.1938080101460763e-05, 1.6845558981373303e-05, 2.288659685773046e-05, 3.022862525081902e-05, 3.887335363056251e-05, 4.799818908733702e-05, 5.9116869795960396e-05, 7.031530412634311e-05, 8.71642719698682e-05, 0.00010534213565791343, 0.00011714843555809333, 0.00013719528984525276, 0.00015001164237180505, 0.00017013611809931108, 0.0002016001519076944, 0.00023255486736407165, 0.0002530719148656703, 0.0002811419418128126, 0.00032782536312720063, 0.000358837713019585, 0.00035626762677964024, 0.00043071802720069994, 0.0004308123103893313, 0.0004666480764343225, 0.0004894792537071127, 0.0006773356550351481, 9.64428714604626e-06, 1.4073199340092461e-05, 1.9092839815989808e-05, 1.956381512959782e-05, 2.739514336342284e-05, 8.569704889318595e-06, 1.2217713526317966e-05, 1.1526841531601815e-05, 1.5402352528062937e-05]
da_alpha_dT_j_rows_expect = [-0.0024659779471849236, -0.0046475548895564215, -0.004356514353727929, -0.002888183050970737, -0.0049094724710971645, -0.0066946247849404734, -0.008125158529797675, -0.008422079528590325, -0.009952764932789312, -0.010406054570834938, -0.012331292438012833, -0.014325077425132872, -0.01640670440194842, -0.01854046658049185, -0.02051894196830183, -0.022751981036326308, -0.02481953443659406, -0.027509548756389217, -0.030155386331164644, -0.031859224259789314, -0.03439180249090889, -0.036002133443470065, -0.0382361992513997, -0.0415431605007282, -0.04461176649968248, -0.046535861707927346, -0.04898614541953604, -0.05264915066454394, -0.055124368695664686, -0.05483970527179004, -0.06030003256343941, -0.06011776608310644, -0.06260298333060192, -0.0640616331561035, -0.07543630216258783, -0.009748518366766266, -0.011681157292387554, -0.013509225924011457, -0.013677421745325026, -0.015989657410498563, -0.009126533178948, -0.010838121814247793, -0.010563651638562304, -0.01219409084892938]
kijs = np.zeros((44, 44)).tolist()
zs = [9.11975115499676e-05, 9.986813065240533e-05, 0.0010137795304828892, 0.019875879000370657, 0.013528874875432457, 0.021392773691700402, 0.00845450438914824, 0.02500218071904368, 0.016114189201071587, 0.027825798446635016, 0.05583179467176313, 0.0703116540769539, 0.07830577180555454, 0.07236459223729574, 0.0774523322851419, 0.057755091407705975, 0.04030134965162674, 0.03967043780553758, 0.03514481759005302, 0.03175471055284055, 0.025411123554079325, 0.029291866298718154, 0.012084986551713202, 0.01641114551124426, 0.01572454598093482, 0.012145363820829673, 0.01103585282423499, 0.010654818322680342, 0.008777712911254239, 0.008732073853067238, 0.007445155260036595, 0.006402875549212365, 0.0052908087849774296, 0.0048199150683177075, 0.015943943854195963, 0.004452253754752775, 0.01711981267072777, 0.0024032720444511282, 0.032178399403544646, 0.0018219517069058137, 0.003403378548794345, 0.01127516775495176, 0.015133143423489698, 0.029483213283483682]
a_alpha, da_alpha_dT, d2a_alpha_dT2, a_alpha_j_rows, da_alpha_dT_j_rows = a_alpha_and_derivatives_quadratic_terms(a_alphas, a_alpha_roots, da_alpha_dTs, d2a_alpha_dT2s, 299.0, zs, kijs)
assert_close1d(expect, a_alpha_j_rows, rtol=1e-14)
assert_close(a_alpha, 11.996512274167202, rtol=1e-14)
assert_close(da_alpha_dT, -0.0228875173310534, rtol=1e-14)
assert_close(d2a_alpha_dT2, 5.9978809895526926e-05, rtol=1e-14)
assert_close1d(da_alpha_dT_j_rows_expect, da_alpha_dT_j_rows, rtol=1e-14)
kijs = [[0,.083],[0.083,0]]
zs = [0.1164203, 0.8835797]
# eos = PRMIX(T=190.0, P=40.53e5, Tcs=[190.63, 373.55], Pcs=[46.17E5, 90.07E5], omegas=[0.01, 0.1], zs=zs, kijs=kijs)
a_alphas = [0.2491099357671155, 0.6486495863528039]
da_alpha_dTs = [-0.0005102028006086241, -0.0011131153520304886]
d2a_alpha_dT2s = [1.8651128859234162e-06, 3.884331923127011e-06]
a_alpha_roots = [i**0.5 for i in a_alphas]
a_alpha, da_alpha_dT, d2a_alpha_dT2, a_alpha_j_rows, da_alpha_dT_j_rows = a_alpha_and_derivatives_quadratic_terms(a_alphas, a_alpha_roots, da_alpha_dTs, d2a_alpha_dT2s, 299.0, zs, kijs)
assert_close(a_alpha, 0.5856213958288957, rtol=1e-14)
assert_close(da_alpha_dT, -0.001018667672891354, rtol=1e-14)
assert_close(d2a_alpha_dT2, 3.5666981785619988e-06, rtol=1e-14)
assert_close1d(a_alpha_j_rows, [0.35469988173420947, 0.6160475723779467], rtol=1e-14)
assert_close1d(da_alpha_dT_j_rows, [-0.0006723873746135188, -0.0010642935017889568], rtol=1e-14)
def test_a_alpha_aijs_composition_independent():
kijs = [[0,.083],[0.083,0]]
a_alphas = [0.2491099357671155, 0.6486495863528039]
a_alpha_ijs, a_alpha_roots, a_alpha_ij_roots_inv = a_alpha_aijs_composition_independent(a_alphas, kijs)
assert_close2d(a_alpha_ijs, [[0.2491099357671155, 0.3686123937424334], [0.3686123937424334, 0.6486495863528038]], rtol=1e-13)
assert_close1d(a_alpha_roots, [0.4991091421393877, 0.8053878484015039], rtol=1e-13)
assert_close1d(a_alpha_ij_roots_inv, [[4.014291910599931, 2.4877079977965977], [2.4877079977965977, 1.5416644379945614]], rtol=1e-13)
def test_PR_lnphis_fastest():
kwargs = dict(Tcs=[190.56400000000002, 305.32, 369.83, 126.2],
Pcs=[4599000.0, 4872000.0, 4248000.0, 3394387.5],
omegas=[0.008, 0.098, 0.152, 0.04],
zs=[.1, .2, .3, .4],
kijs=[[0.0, -0.0059, 0.0119, 0.0289], [-0.0059, 0.0, 0.0011, 0.0533], [0.0119, 0.0011, 0.0, 0.0878], [0.0289, 0.0533, 0.0878, 0.0]])
eos = PRMIX(T=200, P=1e5, **kwargs)
expect = eos.lnphis_l
calc = PR_lnphis_fastest(eos.zs, eos.T, eos.P, 4, eos.kijs, True, False, eos.bs, eos.a_alphas, eos.a_alpha_roots)
assert_close1d(expect, calc, rtol=1e-14)
expect = eos.lnphis_g
calc = PR_lnphis_fastest(eos.zs, eos.T, eos.P, 4, eos.kijs, False, True, eos.bs, eos.a_alphas, eos.a_alpha_roots)
assert_close1d(expect, calc, rtol=1e-14) | mit |
kuboschek/jay | settings/views/systems.py | 2 | 3752 | import time
from django.http import Http404
from django.shortcuts import render, get_object_or_404, redirect
from django.contrib.auth.decorators import login_required
from jay.utils import superadmin
from django.core.urlresolvers import reverse
from settings.models import VotingSystem
from settings.forms import EditSystemForm
SETTINGS_SYSTEMS_TEMPLATE = "systems/systems_overview.html"
SETTINGS_SYSTEMS_EDIT_TEMPLATE = "systems/systems_edit.html"
@login_required
@superadmin
def systems(request, alert_type=None, alert_head=None, alert_text=None):
voting_system_list = VotingSystem.objects.all()
ctx = {'voting_system_list': voting_system_list}
# add an alert state if needed
if alert_head or alert_text or alert_type:
ctx['alert_type'] = alert_type
ctx['alert_head'] = alert_head
ctx['alert_text'] = alert_text
return render(request, SETTINGS_SYSTEMS_TEMPLATE, ctx)
@login_required
@superadmin
def system_edit(request, system_id):
# get the voting system object
vs = get_object_or_404(VotingSystem, id=system_id)
# make a context
ctx = {'vs': vs}
if request.method == "POST":
try:
# parse the form
form = EditSystemForm(request.POST)
if not form.is_valid():
raise Exception
except Exception as e:
ctx['alert_head'] = 'Saving failed'
ctx['alert_text'] = 'Invalid data submitted'
print(e)
return render(request, SETTINGS_SYSTEMS_EDIT_TEMPLATE, ctx)
try:
# store the fields
vs.machine_name = form.cleaned_data['machine_name']
vs.simple_name = form.cleaned_data['simple_name']
# and try to clean + save
vs.clean()
vs.save()
except Exception as e:
ctx['alert_head'] = 'Saving failed'
ctx['alert_text'] = str(e)
return render(request, SETTINGS_SYSTEMS_EDIT_TEMPLATE, ctx)
ctx['alert_type'] = 'success'
ctx['alert_head'] = 'Saving suceeded'
ctx['alert_text'] = 'Voting System saved'
# render the response
return render(request, SETTINGS_SYSTEMS_EDIT_TEMPLATE, ctx)
@login_required
@superadmin
def system_delete(request, system_id):
# only POST is supported
if request.method != "POST":
raise Http404
# get the voting system object
vs = get_object_or_404(VotingSystem, id=system_id)
# if the vote set is not empty
if vs.vote_set.count() != 0:
return systems(request, alert_head="Deletion failed",
alert_text="Voting System is not empty. "
"Please delete all votes first. ")
# try to delete
try:
vs.delete()
except:
return systems(request, alert_head="Deletion failed")
# done
return systems(request, alert_type="success",
alert_head="Deletion succeeded",
alert_text="Voting System Deleted. ")
@login_required
@superadmin
def system_new(request):
# only POST is supported
if request.method != "POST":
raise Http404
# TODO: Sensible defaults
now = str(int(time.time()))
simple_name = 'Voting System ' + now
machine_name = 'voting_system_' + now
# Create a new voting system
vs = VotingSystem(simple_name=simple_name, machine_name=machine_name)
# try to save and clean
try:
vs.clean()
vs.save()
except:
return systems(request, alert_head="Creation failed. ",
alert_text="Unable to save new VotingSystem. ")
# redirect to the edit page
return redirect(reverse('settings:edit', kwargs={'system_id': str(vs.id)}))
| mit |
appleseedhq/gaffer | python/GafferUITest/StandardNodeUITest.py | 4 | 3230 | ##########################################################################
#
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import Gaffer
import GafferUI
import GafferUITest
class StandardNodeUITest( GafferUITest.TestCase ) :
def testPlugValueWidgetAccess( self ) :
n = Gaffer.Node()
n["c"] = Gaffer.Plug()
n["c"]["i"] = Gaffer.IntPlug()
n["c"]["s"] = Gaffer.StringPlug()
Gaffer.Metadata.registerValue( n["c"], "plugValueWidget:type", "GafferUI.LayoutPlugValueWidget" )
u = GafferUI.StandardNodeUI( n )
self.assertTrue( isinstance( u.plugValueWidget( n["c"], lazy=False ), GafferUI.PlugValueWidget ) )
self.assertTrue( u.plugValueWidget( n["c"] ).getPlug().isSame( n["c"] ) )
self.assertEqual( u.plugValueWidget( n["c"]["i"] ), None )
self.assertTrue( isinstance( u.plugValueWidget( n["c"]["i"], lazy=False ), GafferUI.PlugValueWidget ) )
self.assertTrue( u.plugValueWidget( n["c"]["i"] ).getPlug().isSame( n["c"]["i"] ) )
def testSetReadOnlyForUserPlugs( self ) :
n = Gaffer.Node()
n["user"]["a"] = Gaffer.IntPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
u = GafferUI.StandardNodeUI( n )
self.assertEqual( u.plugValueWidget( n["user"]["a"], lazy=False ).getReadOnly(), False )
u.setReadOnly( True )
self.assertEqual( u.plugValueWidget( n["user"]["a"] ).getReadOnly(), True )
u = GafferUI.StandardNodeUI( n )
w = u.plugValueWidget( n["user"]["a"], lazy=False )
u.setReadOnly( True )
self.assertEqual( w.getReadOnly(), True )
if __name__ == "__main__":
unittest.main()
| bsd-3-clause |
egaxegax/django-dbcartajs | djangoappengine/tests/test_not_return_sets.py | 28 | 4299 | import datetime
from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned
from django.test import TestCase
from .models import FieldsWithOptionsModel, OrderedModel, \
SelfReferenceModel
class NonReturnSetsTest(TestCase):
floats = [5.3, 2.6, 9.1, 1.58, 2.4]
emails = ['app-engine@scholardocs.com', 'sharingan@uchias.com',
'rinnengan@sage.de', 'rasengan@naruto.com', 'itachi@uchia.com']
def setUp(self):
for index, (float, email) in enumerate(zip(NonReturnSetsTest.floats,
NonReturnSetsTest.emails)):
self.last_save_time = datetime.datetime.now().time()
ordered_instance = OrderedModel(priority=index, pk=index + 1)
ordered_instance.save()
model = FieldsWithOptionsModel(floating_point=float,
integer=int(float), email=email,
time=self.last_save_time,
foreign_key=ordered_instance)
model.save()
def test_get(self):
self.assertEquals(
FieldsWithOptionsModel.objects.get(
email='itachi@uchia.com').email,
'itachi@uchia.com')
# Test exception when matching multiple entities.
self.assertRaises(MultipleObjectsReturned,
FieldsWithOptionsModel.objects.get,
integer=2)
# Test exception when entity does not exist.
self.assertRaises(ObjectDoesNotExist,
FieldsWithOptionsModel.objects.get,
floating_point=5.2)
# TODO: Test create when djangos model.save_base is refactored.
# TODO: Test get_or_create when refactored.
def test_count(self):
self.assertEquals(
FieldsWithOptionsModel.objects.filter(integer=2).count(), 2)
def test_in_bulk(self):
self.assertEquals(
[key in ['sharingan@uchias.com', 'itachi@uchia.com']
for key in FieldsWithOptionsModel.objects.in_bulk(
['sharingan@uchias.com', 'itachi@uchia.com']).keys()],
[True, ] * 2)
def test_latest(self):
self.assertEquals(
FieldsWithOptionsModel.objects.latest('time').email,
'itachi@uchia.com')
def test_exists(self):
self.assertEquals(FieldsWithOptionsModel.objects.exists(), True)
def test_deletion(self):
# TODO: ForeignKeys will not be deleted! This has to be done
# via background tasks.
self.assertEquals(FieldsWithOptionsModel.objects.count(), 5)
FieldsWithOptionsModel.objects.get(email='itachi@uchia.com').delete()
self.assertEquals(FieldsWithOptionsModel.objects.count(), 4)
FieldsWithOptionsModel.objects.filter(email__in=[
'sharingan@uchias.com', 'itachi@uchia.com',
'rasengan@naruto.com', ]).delete()
self.assertEquals(FieldsWithOptionsModel.objects.count(), 2)
def test_selfref_deletion(self):
entity = SelfReferenceModel()
entity.save()
entity.delete()
def test_foreign_key_fetch(self):
# Test fetching the ForeignKey.
ordered_instance = OrderedModel.objects.get(priority=2)
self.assertEquals(
FieldsWithOptionsModel.objects.get(integer=9).foreign_key,
ordered_instance)
def test_foreign_key_backward(self):
entity = OrderedModel.objects.all()[0]
self.assertEquals(entity.keys.count(), 1)
# TODO: Add should save the added instance transactional via for
# example force_insert.
new_foreign_key = FieldsWithOptionsModel(
floating_point=5.6, integer=3,
email='temp@temp.com', time=datetime.datetime.now())
entity.keys.add(new_foreign_key)
self.assertEquals(entity.keys.count(), 2)
# TODO: Add test for create.
entity.keys.remove(new_foreign_key)
self.assertEquals(entity.keys.count(), 1)
entity.keys.clear()
self.assertTrue(not entity.keys.exists())
entity.keys = [new_foreign_key, new_foreign_key]
self.assertEquals(entity.keys.count(), 1)
self.assertEquals(entity.keys.all()[0].integer, 3)
| gpl-2.0 |
gavinfaux/namebench | nb_third_party/dns/rdtypes/IN/IPSECKEY.py | 248 | 5985 | # Copyright (C) 2006, 2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import cStringIO
import struct
import dns.exception
import dns.inet
import dns.name
class IPSECKEY(dns.rdata.Rdata):
"""IPSECKEY record
@ivar precedence: the precedence for this key data
@type precedence: int
@ivar gateway_type: the gateway type
@type gateway_type: int
@ivar algorithm: the algorithm to use
@type algorithm: int
@ivar gateway: the public key
@type gateway: None, IPv4 address, IPV6 address, or domain name
@ivar key: the public key
@type key: string
@see: RFC 4025"""
__slots__ = ['precedence', 'gateway_type', 'algorithm', 'gateway', 'key']
def __init__(self, rdclass, rdtype, precedence, gateway_type, algorithm,
gateway, key):
super(IPSECKEY, self).__init__(rdclass, rdtype)
if gateway_type == 0:
if gateway != '.' and not gateway is None:
raise SyntaxError('invalid gateway for gateway type 0')
gateway = None
elif gateway_type == 1:
# check that it's OK
junk = dns.inet.inet_pton(dns.inet.AF_INET, gateway)
elif gateway_type == 2:
# check that it's OK
junk = dns.inet.inet_pton(dns.inet.AF_INET6, gateway)
elif gateway_type == 3:
pass
else:
raise SyntaxError('invalid IPSECKEY gateway type: %d' % gateway_type)
self.precedence = precedence
self.gateway_type = gateway_type
self.algorithm = algorithm
self.gateway = gateway
self.key = key
def to_text(self, origin=None, relativize=True, **kw):
if self.gateway_type == 0:
gateway = '.'
elif self.gateway_type == 1:
gateway = self.gateway
elif self.gateway_type == 2:
gateway = self.gateway
elif self.gateway_type == 3:
gateway = str(self.gateway.choose_relativity(origin, relativize))
else:
raise ValueError('invalid gateway type')
return '%d %d %d %s %s' % (self.precedence, self.gateway_type,
self.algorithm, gateway,
dns.rdata._base64ify(self.key))
def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
precedence = tok.get_uint8()
gateway_type = tok.get_uint8()
algorithm = tok.get_uint8()
if gateway_type == 3:
gateway = tok.get_name().choose_relativity(origin, relativize)
else:
gateway = tok.get_string()
chunks = []
while 1:
t = tok.get().unescape()
if t.is_eol_or_eof():
break
if not t.is_identifier():
raise dns.exception.SyntaxError
chunks.append(t.value)
b64 = ''.join(chunks)
key = b64.decode('base64_codec')
return cls(rdclass, rdtype, precedence, gateway_type, algorithm,
gateway, key)
from_text = classmethod(from_text)
def to_wire(self, file, compress = None, origin = None):
header = struct.pack("!BBB", self.precedence, self.gateway_type,
self.algorithm)
file.write(header)
if self.gateway_type == 0:
pass
elif self.gateway_type == 1:
file.write(dns.inet.inet_pton(dns.inet.AF_INET, self.gateway))
elif self.gateway_type == 2:
file.write(dns.inet.inet_pton(dns.inet.AF_INET6, self.gateway))
elif self.gateway_type == 3:
self.gateway.to_wire(file, None, origin)
else:
raise ValueError('invalid gateway type')
file.write(self.key)
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
if rdlen < 3:
raise dns.exception.FormError
header = struct.unpack('!BBB', wire[current : current + 3])
gateway_type = header[1]
current += 3
rdlen -= 3
if gateway_type == 0:
gateway = None
elif gateway_type == 1:
gateway = dns.inet.inet_ntop(dns.inet.AF_INET,
wire[current : current + 4])
current += 4
rdlen -= 4
elif gateway_type == 2:
gateway = dns.inet.inet_ntop(dns.inet.AF_INET6,
wire[current : current + 16])
current += 16
rdlen -= 16
elif gateway_type == 3:
(gateway, cused) = dns.name.from_wire(wire[: current + rdlen],
current)
current += cused
rdlen -= cused
else:
raise dns.exception.FormError('invalid IPSECKEY gateway type')
key = wire[current : current + rdlen]
return cls(rdclass, rdtype, header[0], gateway_type, header[2],
gateway, key)
from_wire = classmethod(from_wire)
def _cmp(self, other):
f = cStringIO.StringIO()
self.to_wire(f)
wire1 = f.getvalue()
f.seek(0)
f.truncate()
other.to_wire(f)
wire2 = f.getvalue()
f.close()
return cmp(wire1, wire2)
| apache-2.0 |
redhat-cip/tempest | tempest/api/identity/test_extension.py | 24 | 1170 | # Copyright 2014 NEC Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.identity import base
from tempest import test
class ExtensionTestJSON(base.BaseIdentityV2AdminTest):
@test.idempotent_id('85f3f661-f54c-4d48-b563-72ae952b9383')
def test_list_extensions(self):
# List all the extensions
body = self.non_admin_client.list_extensions()
self.assertNotEmpty(body)
keys = ['name', 'updated', 'alias', 'links',
'namespace', 'description']
for value in body:
for key in keys:
self.assertIn(key, value)
| apache-2.0 |
dednal/chromium.src | tools/cr/cr/fixups/arch.py | 103 | 1581 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A module for architecture output directory fixups."""
import cr
class _ArchInitHookHelper(cr.InitHook):
"""Base class helper for CR_ARCH value fixups."""
def _VersionTest(self, old_version):
_ = old_version
return True
def _ArchConvert(self, old_arch):
return old_arch
def Run(self, old_version, config):
if old_version is None or not self._VersionTest(old_version):
return
old_arch = config.OVERRIDES.Find(cr.Arch.SELECTOR)
new_arch = self._ArchConvert(old_arch)
if new_arch != old_arch:
print '** Fixing architecture from {0} to {1}'.format(old_arch, new_arch)
config.OVERRIDES[cr.Arch.SELECTOR] = new_arch
class WrongArchDefaultInitHook(_ArchInitHookHelper):
"""Fixes bad initial defaults.
In the initial versions of cr before output directories were versioned
it was writing invalid architecture defaults. This detects that case and sets
the architecture to the current default instead.
"""
def _VersionTest(self, old_version):
return old_version <= 0.0
def _ArchConvert(self, _):
return cr.Arch.default.name
class MipsAndArmRenameInitHook(_ArchInitHookHelper):
"""Fixes rename of Mips and Arm to Mips32 and Arm32."""
def _ArchConvert(self, old_arch):
if old_arch == 'mips':
return cr.Mips32Arch.GetInstance().name
if old_arch == 'arm':
return cr.Arm32Arch.GetInstance().name
return old_arch
| bsd-3-clause |
takeshineshiro/nova | nova/tests/unit/scheduler/test_filter_scheduler.py | 15 | 14306 | # Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For Filter Scheduler.
"""
import mock
from nova import exception
from nova.scheduler import filter_scheduler
from nova.scheduler import host_manager
from nova.scheduler import utils as scheduler_utils
from nova.scheduler import weights
from nova import test # noqa
from nova.tests.unit.scheduler import fakes
from nova.tests.unit.scheduler import test_scheduler
def fake_get_filtered_hosts(hosts, filter_properties, index):
return list(hosts)
class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
"""Test case for Filter Scheduler."""
driver_cls = filter_scheduler.FilterScheduler
@mock.patch('nova.objects.ServiceList.get_by_binary',
return_value=fakes.SERVICES)
@mock.patch('nova.objects.InstanceList.get_by_host')
@mock.patch('nova.objects.ComputeNodeList.get_all',
return_value=fakes.COMPUTE_NODES)
@mock.patch('nova.db.instance_extra_get_by_instance_uuid',
return_value={'numa_topology': None,
'pci_requests': None})
def test_schedule_happy_day(self, mock_get_extra, mock_get_all,
mock_by_host, mock_get_by_binary):
"""Make sure there's nothing glaringly wrong with _schedule()
by doing a happy day pass through.
"""
self.next_weight = 1.0
def _fake_weigh_objects(_self, functions, hosts, options):
self.next_weight += 2.0
host_state = hosts[0]
return [weights.WeighedHost(host_state, self.next_weight)]
self.stubs.Set(self.driver.host_manager, 'get_filtered_hosts',
fake_get_filtered_hosts)
self.stubs.Set(weights.HostWeightHandler,
'get_weighed_objects', _fake_weigh_objects)
request_spec = {'num_instances': 10,
'instance_type': {'memory_mb': 512, 'root_gb': 512,
'ephemeral_gb': 0,
'vcpus': 1},
'instance_properties': {'project_id': 1,
'root_gb': 512,
'memory_mb': 512,
'ephemeral_gb': 0,
'vcpus': 1,
'os_type': 'Linux',
'uuid': 'fake-uuid'}}
self.mox.ReplayAll()
weighed_hosts = self.driver._schedule(self.context, request_spec, {})
self.assertEqual(len(weighed_hosts), 10)
for weighed_host in weighed_hosts:
self.assertIsNotNone(weighed_host.obj)
def test_max_attempts(self):
self.flags(scheduler_max_attempts=4)
self.assertEqual(4, scheduler_utils._max_attempts())
def test_invalid_max_attempts(self):
self.flags(scheduler_max_attempts=0)
self.assertRaises(exception.NovaException,
scheduler_utils._max_attempts)
def test_add_retry_host(self):
retry = dict(num_attempts=1, hosts=[])
filter_properties = dict(retry=retry)
host = "fakehost"
node = "fakenode"
scheduler_utils._add_retry_host(filter_properties, host, node)
hosts = filter_properties['retry']['hosts']
self.assertEqual(1, len(hosts))
self.assertEqual([host, node], hosts[0])
def test_post_select_populate(self):
# Test addition of certain filter props after a node is selected.
retry = {'hosts': [], 'num_attempts': 1}
filter_properties = {'retry': retry}
host_state = host_manager.HostState('host', 'node')
host_state.limits['vcpus'] = 5
scheduler_utils.populate_filter_properties(filter_properties,
host_state)
self.assertEqual(['host', 'node'],
filter_properties['retry']['hosts'][0])
self.assertEqual({'vcpus': 5}, host_state.limits)
@mock.patch('nova.objects.ServiceList.get_by_binary',
return_value=fakes.SERVICES)
@mock.patch('nova.objects.InstanceList.get_by_host')
@mock.patch('nova.objects.ComputeNodeList.get_all',
return_value=fakes.COMPUTE_NODES)
@mock.patch('nova.db.instance_extra_get_by_instance_uuid',
return_value={'numa_topology': None,
'pci_requests': None})
def test_schedule_host_pool(self, mock_get_extra, mock_get_all,
mock_by_host, mock_get_by_binary):
"""Make sure the scheduler_host_subset_size property works properly."""
self.flags(scheduler_host_subset_size=2)
self.stubs.Set(self.driver.host_manager, 'get_filtered_hosts',
fake_get_filtered_hosts)
instance_properties = {'project_id': 1,
'root_gb': 512,
'memory_mb': 512,
'ephemeral_gb': 0,
'vcpus': 1,
'os_type': 'Linux',
'uuid': 'fake-uuid'}
request_spec = dict(instance_properties=instance_properties,
instance_type={})
filter_properties = {}
self.mox.ReplayAll()
hosts = self.driver._schedule(self.context, request_spec,
filter_properties=filter_properties)
# one host should be chosen
self.assertEqual(len(hosts), 1)
@mock.patch('nova.objects.ServiceList.get_by_binary',
return_value=fakes.SERVICES)
@mock.patch('nova.objects.InstanceList.get_by_host')
@mock.patch('nova.objects.ComputeNodeList.get_all',
return_value=fakes.COMPUTE_NODES)
@mock.patch('nova.db.instance_extra_get_by_instance_uuid',
return_value={'numa_topology': None,
'pci_requests': None})
def test_schedule_large_host_pool(self, mock_get_extra, mock_get_all,
mock_by_host, mock_get_by_binary):
"""Hosts should still be chosen if pool size
is larger than number of filtered hosts.
"""
self.flags(scheduler_host_subset_size=20)
self.stubs.Set(self.driver.host_manager, 'get_filtered_hosts',
fake_get_filtered_hosts)
instance_properties = {'project_id': 1,
'root_gb': 512,
'memory_mb': 512,
'ephemeral_gb': 0,
'vcpus': 1,
'os_type': 'Linux',
'uuid': 'fake-uuid'}
request_spec = dict(instance_properties=instance_properties,
instance_type={})
filter_properties = {}
self.mox.ReplayAll()
hosts = self.driver._schedule(self.context, request_spec,
filter_properties=filter_properties)
# one host should be chose
self.assertEqual(len(hosts), 1)
@mock.patch('nova.scheduler.host_manager.HostManager._add_instance_info')
@mock.patch('nova.objects.ServiceList.get_by_binary',
return_value=fakes.SERVICES)
@mock.patch('nova.objects.ComputeNodeList.get_all',
return_value=fakes.COMPUTE_NODES)
@mock.patch('nova.db.instance_extra_get_by_instance_uuid',
return_value={'numa_topology': None,
'pci_requests': None})
def test_schedule_chooses_best_host(self, mock_get_extra, mock_cn_get_all,
mock_get_by_binary,
mock_add_inst_info):
"""If scheduler_host_subset_size is 1, the largest host with greatest
weight should be returned.
"""
self.flags(scheduler_host_subset_size=1)
self.stubs.Set(self.driver.host_manager, 'get_filtered_hosts',
fake_get_filtered_hosts)
self.next_weight = 50
def _fake_weigh_objects(_self, functions, hosts, options):
this_weight = self.next_weight
self.next_weight = 0
host_state = hosts[0]
return [weights.WeighedHost(host_state, this_weight)]
instance_properties = {'project_id': 1,
'root_gb': 512,
'memory_mb': 512,
'ephemeral_gb': 0,
'vcpus': 1,
'os_type': 'Linux',
'uuid': 'fake-uuid'}
request_spec = dict(instance_properties=instance_properties,
instance_type={})
self.stubs.Set(weights.HostWeightHandler,
'get_weighed_objects', _fake_weigh_objects)
filter_properties = {}
self.mox.ReplayAll()
hosts = self.driver._schedule(self.context, request_spec,
filter_properties=filter_properties)
# one host should be chosen
self.assertEqual(1, len(hosts))
self.assertEqual(50, hosts[0].weight)
@mock.patch('nova.objects.ServiceList.get_by_binary',
return_value=fakes.SERVICES)
@mock.patch('nova.objects.InstanceList.get_by_host')
@mock.patch('nova.objects.ComputeNodeList.get_all',
return_value=fakes.COMPUTE_NODES)
@mock.patch('nova.db.instance_extra_get_by_instance_uuid',
return_value={'numa_topology': None,
'pci_requests': None})
def test_select_destinations(self, mock_get_extra, mock_get_all,
mock_by_host, mock_get_by_binary):
"""select_destinations is basically a wrapper around _schedule().
Similar to the _schedule tests, this just does a happy path test to
ensure there is nothing glaringly wrong.
"""
self.next_weight = 1.0
selected_hosts = []
selected_nodes = []
def _fake_weigh_objects(_self, functions, hosts, options):
self.next_weight += 2.0
host_state = hosts[0]
selected_hosts.append(host_state.host)
selected_nodes.append(host_state.nodename)
return [weights.WeighedHost(host_state, self.next_weight)]
self.stubs.Set(self.driver.host_manager, 'get_filtered_hosts',
fake_get_filtered_hosts)
self.stubs.Set(weights.HostWeightHandler,
'get_weighed_objects', _fake_weigh_objects)
request_spec = {'instance_type': {'memory_mb': 512, 'root_gb': 512,
'ephemeral_gb': 0,
'vcpus': 1},
'instance_properties': {'project_id': 1,
'root_gb': 512,
'memory_mb': 512,
'ephemeral_gb': 0,
'vcpus': 1,
'os_type': 'Linux',
'uuid': 'fake-uuid'},
'num_instances': 1}
self.mox.ReplayAll()
dests = self.driver.select_destinations(self.context, request_spec, {})
(host, node) = (dests[0]['host'], dests[0]['nodename'])
self.assertEqual(host, selected_hosts[0])
self.assertEqual(node, selected_nodes[0])
@mock.patch.object(filter_scheduler.FilterScheduler, '_schedule')
def test_select_destinations_notifications(self, mock_schedule):
mock_schedule.return_value = [mock.Mock()]
with mock.patch.object(self.driver.notifier, 'info') as mock_info:
request_spec = {'num_instances': 1}
self.driver.select_destinations(self.context, request_spec, {})
expected = [
mock.call(self.context, 'scheduler.select_destinations.start',
dict(request_spec=request_spec)),
mock.call(self.context, 'scheduler.select_destinations.end',
dict(request_spec=request_spec))]
self.assertEqual(expected, mock_info.call_args_list)
def test_select_destinations_no_valid_host(self):
def _return_no_host(*args, **kwargs):
return []
self.stubs.Set(self.driver, '_schedule', _return_no_host)
self.assertRaises(exception.NoValidHost,
self.driver.select_destinations, self.context,
{'num_instances': 1}, {})
def test_select_destinations_no_valid_host_not_enough(self):
# Tests that we have fewer hosts available than number of instances
# requested to build.
consumed_hosts = [mock.MagicMock(), mock.MagicMock()]
with mock.patch.object(self.driver, '_schedule',
return_value=consumed_hosts):
try:
self.driver.select_destinations(
self.context, {'num_instances': 3}, {})
self.fail('Expected NoValidHost to be raised.')
except exception.NoValidHost as e:
# Make sure that we provided a reason why NoValidHost.
self.assertIn('reason', e.kwargs)
self.assertTrue(len(e.kwargs['reason']) > 0)
# Make sure that the consumed hosts have chance to be reverted.
for host in consumed_hosts:
self.assertIsNone(host.obj.updated)
| apache-2.0 |
kernsuite-debian/lofar | SAS/TriggerEmailService/Server/lib/TriggerEmailService.py | 1 | 12860 | #!/usr/bin/env python3
#
# Copyright (C) 2017
# ASTRON (Netherlands Institute for Radio Astronomy)
# P.O.Box 2, 7990 AA Dwingeloo, The Netherlands
#
# This file is part of the LOFAR software suite.
# The LOFAR software suite is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# The LOFAR software suite is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with the LOFAR software suite. If not, see <http://www.gnu.org/licenses/>.
import os
import smtplib
import logging
from lofar.sas.TriggerEmailService.Templates import ABORTED_TEMPLATE_BODY, ABORTED_TEMPLATE_SUBJECT
from lofar.sas.TriggerEmailService.Templates import ACCEPTED_TEMPLATE_BODY, ACCEPTED_TEMPLATE_SUBJECT
from lofar.sas.TriggerEmailService.Templates import FINISHED_TEMPLATE_BODY, FINISHED_TEMPLATE_SUBJECT
from lofar.sas.TriggerEmailService.Templates import REJECTED_TEMPLATE_BODY, REJECTED_TEMPLATE_SUBJECT
from lofar.sas.TriggerEmailService.Templates import RECEIVED_TEMPLATE_BODY, RECEIVED_TEMPLATE_SUBJECT
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from datetime import timedelta, datetime
import time
from lofar.sas.otdb.OTDBBusListener import OTDBBusListener, OTDBEventMessageHandler
from lofar.common.util import waitForInterrupt
from lofar.messaging.messagebus import BusListener, AbstractMessageHandler
from lofar.messaging import DEFAULT_BROKER, DEFAULT_BUSNAME
from lofar.sas.TriggerEmailService.common.config import DEFAULT_TRIGGER_NOTIFICATION_SUBJECT
from lofar.mom.momqueryservice.momqueryrpc import MoMQueryRPC
from lxml import etree
from io import BytesIO
from re import findall
import socket
logger = logging.getLogger(__name__)
def email(recipients, subject, body, attachment, attachment_name):
if "LOFARENV" in os.environ:
lofar_environment = os.environ['LOFARENV']
if lofar_environment == "PRODUCTION":
recipients.append("sos@astron.nl")
recipients.append("observer@astron.nl")
hostname = socket.gethostname()
sender = "lofarsys@" + hostname
commaspace = ', '
msg = MIMEMultipart()
msg.attach(MIMEText(body, 'plain'))
msg["Subject"] = subject
msg["From"] = "LOFAR Science Operations & Support <sos@astron.nl>"
msg["To"] = commaspace.join(recipients)
if attachment:
txt = MIMEText(attachment)
txt.add_header('Content-Disposition', "attachment; filename= %s" % attachment_name)
msg.attach(txt)
s = smtplib.SMTP('localhost')
s.sendmail(sender, recipients, msg.as_string())
s.quit()
class MoMIdError(Exception):
pass
class OTDBTriggerListener(OTDBBusListener):
def __init__(self, momquery_rpc=MoMQueryRPC(), exchange=DEFAULT_BUSNAME, broker=DEFAULT_BROKER):
"""
TriggerNotificationListener listens on the lofar trigger message bus and emails when trigger
gets submitted.
:param exchange: valid message exchange address
:param broker: valid broker host (default: None, which means localhost)
"""
super(OTDBTriggerListener, self).__init__(handler_type=OTDBTriggerHandler,
handler_kwargs={'momquery_rpc': momquery_rpc},
exchange=exchange, broker=broker)
class OTDBTriggerHandler(OTDBEventMessageHandler):
def __init__(self, momquery_rpc=MoMQueryRPC()):
super().__init__()
self.mom_rpc_client = momquery_rpc
def start_handling(self):
self.mom_rpc_client.open()
super(OTDBTriggerHandler, self).start_handling()
def stop_handling(self):
self.mom_rpc_client.close()
super(OTDBTriggerHandler, self).stop_handling()
def onObservationAborted(self, otdb_id, _):
self.when_trigger_send_email(otdb_id, ABORTED_TEMPLATE_SUBJECT, ABORTED_TEMPLATE_BODY)
def onObservationScheduled(self, otdb_id, _):
self.when_trigger_send_email(otdb_id, ACCEPTED_TEMPLATE_SUBJECT, ACCEPTED_TEMPLATE_BODY)
def onObservationFinished(self, otdb_id, _):
self.when_trigger_send_email(otdb_id, FINISHED_TEMPLATE_SUBJECT, FINISHED_TEMPLATE_BODY)
def onObservationConflict(self, otdb_id, _):
self.when_trigger_send_email(otdb_id, REJECTED_TEMPLATE_SUBJECT, REJECTED_TEMPLATE_BODY)
def onObservationError(self, otdb_id, _):
self.when_trigger_send_email(otdb_id, REJECTED_TEMPLATE_SUBJECT, REJECTED_TEMPLATE_BODY)
def when_trigger_send_email(self, otdb_id, template_subject, template_body):
try:
mom_id, trigger_id = self._get_mom_and_trigger_id(otdb_id)
if trigger_id:
self._send_email(otdb_id, mom_id, trigger_id, template_subject, template_body)
except MoMIdError:
logger.error("Could not retrieve a mom_id for otdb_id: %s", otdb_id)
def _get_mom_and_trigger_id(self, otdb_id):
mom_id = self._try_get_mom_id(otdb_id)
if not mom_id:
raise MoMIdError
trigger_id = self.mom_rpc_client.get_trigger_id(mom_id)['trigger_id']
return mom_id, trigger_id
def _try_get_mom_id(self, otdb_id):
# sometimes we are too fast for MoM so we need to retry
mom_id = None
for _ in range(10):
mom_id = self.mom_rpc_client.getMoMIdsForOTDBIds(otdb_id)[otdb_id]
if mom_id:
break
time.sleep(3)
return mom_id
def _send_email(self, otdb_id, mom_id, trigger_id, template_subject, template_body):
logger.info("Emailing otdb_id: %s, mom_id: %s, trigger_id: %s, template_subject: %s, template_body: %s",
otdb_id, mom_id, trigger_id, template_subject, template_body)
subject, body = self._fill_template(otdb_id, mom_id, trigger_id, template_subject, template_body)
recipients = self._get_recipients(mom_id)
email(recipients, subject, body, None, "")
def _fill_template(self, otdb_id, mom_id, trigger_id, template_subject, template_body):
project = self.mom_rpc_client.getObjectDetails(mom_id)[mom_id]
data = {
"PROJECTNAME": project["project_name"], "TRIGGERID": trigger_id, "OBSSASID": otdb_id, "OBSMOMID": mom_id,
"MOMLINK": "https://lofar.astron.nl/mom3/user/project/setUpMom2ObjectDetails.do?view="
"generalinfo&mom2ObjectId=%s" % project["object_mom2objectid"]
}
subject = template_subject % data
body = template_body % data
return subject, body
def _get_recipients(self, mom_id):
recipients = []
project = self.mom_rpc_client.getObjectDetails(mom_id)[mom_id]
emails = self.mom_rpc_client.get_project_details(project['project_mom2id'])
for k, v in list(emails.items()):
recipients.append(v)
return recipients
class TriggerNotificationListener(BusListener):
def __init__(self, momquery_rpc=MoMQueryRPC(), busname=DEFAULT_BUSNAME, broker=DEFAULT_BROKER):
"""
TriggerNotificationListener listens on the lofar trigger message bus and emails when trigger
gets submitted.
:param address: valid Qpid address (default: lofar.otdb.status)
:param broker: valid Qpid broker host (default: None, which means localhost)
"""
super(TriggerNotificationListener, self).__init__(
handler_type=TriggerNotificationHandler, handler_kwargs={'momquery_rpc': momquery_rpc},
exchange=busname, routing_key=DEFAULT_TRIGGER_NOTIFICATION_SUBJECT, broker=broker)
class TriggerNotificationHandler(AbstractMessageHandler):
def __init__(self, momquery_rpc=MoMQueryRPC()):
"""
TriggerNotificationHandler listens on the lofar trigger message bus and emails when trigger
gets submitted.
"""
super(TriggerNotificationHandler, self).__init__()
self.mom_rpc_client = momquery_rpc
def handle_message(self, msg):
trigger_id = msg.content['trigger_id']
project_name = msg.content['project']
trigger_xml = msg.content['metadata']
start_time, stop_time = self._get_observation_start_stop_times(trigger_xml)
mom_id = self._get_mom_id(project_name)
if mom_id:
subject, body = self._fill_template(trigger_id, project_name, start_time, stop_time,
RECEIVED_TEMPLATE_SUBJECT, RECEIVED_TEMPLATE_BODY)
recipients = self._get_recipients(mom_id)
email(recipients, subject, body, trigger_xml, "trigger.xml")
else:
logger.error("Trigger got entered for a non existing project: %s", project_name)
def _get_mom_id(self, project_name):
# todo add function to momqueryserivce for it (get mom2id for project name)
mom_id = None
projects = self.mom_rpc_client.getProjects()
for project in projects:
if project["name"] == project_name:
mom_id = project["mom2id"]
return mom_id
def _get_recipients(self, mom_id):
recipients = []
emails = self.mom_rpc_client.get_project_details(mom_id)
for k, v in list(emails.items()):
recipients.append(v)
return recipients
def _get_observation_start_stop_times(self, trigger_xml):
# for now we work with duration to get stop time
doc = etree.parse(BytesIO(trigger_xml.encode('UTF-8')))
start_times = doc.getroot().findall('specification/activity/observation/timeWindowSpecification/startTime')
if start_times: # Not dwelling
start_time = datetime.strptime(start_times[0].text, '%Y-%m-%dT%H:%M:%S')
durations = doc.getroot().findall(
'specification/activity/observation/timeWindowSpecification/duration/duration')
duration = durations[0].text
duration_seconds = self._iso8601_duration_as_seconds(duration)
stop_time = start_time + timedelta(seconds=duration_seconds)
return start_time, stop_time
else: # Dwelling
min_start_times = doc.getroot().findall('specification/activity/observation/timeWindowSpecification/minStartTime')
min_start_time = datetime.strptime(min_start_times[0].text, '%Y-%m-%dT%H:%M:%S')
max_end_times = doc.getroot().findall('specification/activity/observation/timeWindowSpecification/maxEndTime')
max_end_time = datetime.strptime(max_end_times[0].text, '%Y-%m-%dT%H:%M:%S')
return min_start_time, max_end_time
def start_handling(self, **kwargs):
self.mom_rpc_client.open()
super(TriggerNotificationHandler, self).start_handling()
def stop_handling(self):
self.mom_rpc_client.close()
super(TriggerNotificationHandler, self).stop_handling()
def _fill_template(self, trigger_id, project_name, start_time, stop_time, template_subject, template_body):
data = {
"PROJECTNAME": project_name, "TRIGGERID": trigger_id, "STARTTIME": start_time, "ENDTIME": stop_time
}
subject = template_subject % data
body = template_body % data
return subject, body
def _iso8601_duration_as_seconds(self, duration):
if duration[0] != 'P':
raise ValueError('Not an ISO 8601 Duration string')
seconds = 0
for i, item in enumerate(duration.split('T')):
for number, unit in findall('(?P<number>\d+)(?P<period>S|M|H|D|W|Y)', item):
number = int(number)
this = 0
if unit == 'Y':
this = number * 31557600 # 365.25
elif unit == 'W':
this = number * 604800
elif unit == 'D':
this = number * 86400
elif unit == 'H':
this = number * 3600
elif unit == 'M':
# ambiguity ellivated with index i
if i == 0:
this = number * 2678400 # assume 30 days
else:
this = number * 60
elif unit == 'S':
this = number
seconds += this
return seconds
def main():
with OTDBTriggerListener():
with TriggerNotificationListener():
waitForInterrupt()
if __name__ == '__main__':
main()
| gpl-3.0 |
h3llrais3r/SickRage | lib/twilio/rest/resources/sms_messages.py | 51 | 6376 | from .util import normalize_dates, parse_date
from . import InstanceResource, ListResource
class ShortCode(InstanceResource):
def update(self, **kwargs):
return self.parent.update(self.name, **kwargs)
class ShortCodes(ListResource):
name = "ShortCodes"
key = "short_codes"
instance = ShortCode
def list(self, **kwargs):
"""
Returns a page of :class:`ShortCode` resources as a list. For
paging information see :class:`ListResource`.
:param short_code: Only show the ShortCode resources that match this
pattern. You can specify partial numbers and use '*'
as a wildcard for any digit.
:param friendly_name: Only show the ShortCode resources with friendly
names that exactly match this name.
"""
return self.get_instances(kwargs)
def update(self, sid, url=None, method=None, fallback_url=None,
fallback_method=None, **kwargs):
"""
Update a specific :class:`ShortCode`, by specifying the sid.
:param friendly_name: Description of the short code, with maximum
length 64 characters.
:param api_version: SMSs to this short code will start a new TwiML
session with this API version.
:param url: The URL that Twilio should request when somebody sends an
SMS to the short code.
:param method: The HTTP method that should be used to request the url.
:param fallback_url: A URL that Twilio will request if an error occurs
requesting or executing the TwiML at the url.
:param fallback_method: The HTTP method that should be used to request
the fallback_url.
"""
kwargs["sms_url"] = kwargs.get("sms_url", url)
kwargs["sms_method"] = kwargs.get("sms_method", method)
kwargs["sms_fallback_url"] = \
kwargs.get("sms_fallback_url", fallback_url)
kwargs["sms_fallback_method"] = \
kwargs.get("sms_fallback_method", fallback_method)
return self.update_instance(sid, kwargs)
class Sms(object):
"""
Holds all the specific SMS list resources
"""
name = "SMS"
key = "sms"
def __init__(self, base_uri, auth, timeout):
self.uri = "%s/SMS" % base_uri
self.messages = SmsMessages(self.uri, auth, timeout)
self.short_codes = ShortCodes(self.uri, auth, timeout)
class SmsMessage(InstanceResource):
""" An instance of an SMS Message
.. attribute:: sid
A 34 character string that uniquely identifies this resource.
.. attribute:: date_created
The date that this resource was created, given in RFC 2822 format.
.. attribute:: date_updated
The date that this resource was last updated, given in RFC 2822 format.
.. attribute:: date_sent
The date that the SMS was sent, given in RFC 2822 format.
.. attribute:: account_sid
The unique id of the Account that sent this SMS message.
.. attribute:: from
The phone number that initiated the message in E.164 format.
For incoming messages, this will be the remote phone.
For outgoing messages, this will be one of your Twilio phone numbers.
.. attribute:: to
The phone number that received the message in E.164 format.
For incoming messages, this will be one of your Twilio phone numbers.
For outgoing messages, this will be the remote phone.
.. attribute:: body
The text body of the SMS message.
.. attribute:: status
The status of this SMS message. Either queued, sending, sent, or failed.
.. attribute:: direction
The direction of this SMS message. ``incoming`` for incoming
messages, ``outbound-api`` for messages initiated via the REST
API, ``outbound-call`` for messages initiated during a call or
``outbound-reply`` for messages initiated in response to an incoming
SMS.
.. attribute:: price
The amount billed for the message.
.. attribute:: api_version
The version of the Twilio API used to process the SMS message.
.. attribute:: uri
The URI for this resource, relative to https://api.twilio.com
"""
pass
class SmsMessages(ListResource):
name = "Messages"
key = "sms_messages"
instance = SmsMessage
def create(self, from_=None, **kwargs):
"""
Create and send a SMS Message.
:param str to: The destination phone number.
:param str `from_`: The phone number sending this message
(must be a verified Twilio number)
:param str body: The message you want to send,
limited to 160 characters.
:param status_callback: A URL that Twilio will POST to when
your message is processed.
:param str application_sid: The 34 character sid of the application
Twilio should use to handle this phone call.
Usage:
.. code-block::python
message = client.sms.messages.create(to="+12316851234",
from_="+15555555555",
body="Hello there!")
"""
kwargs["from"] = from_
return self.create_instance(kwargs)
@normalize_dates
def list(self, from_=None, before=None, after=None, date_sent=None, **kw):
"""
Returns a page of :class:`~twilio.rest.resources.SmsMessage` resources
as a list. For paging information see :class:`ListResource`.
:param to: Only show SMS messages to this phone number.
:param from_: Only show SMS messages from this phone number.
:param date after: Only list SMS messages sent after this date.
:param date before: Only list SMS message sent before this date.
:param date date_sent: Only list SMS message sent on this date.
:param `from_`: Only show SMS messages from this phone number.
:param date after: Only list SMS messages logged after this datetime
:param date before: Only list SMS messages logged before this datetime
"""
kw["From"] = from_
kw["DateSent<"] = before
kw["DateSent>"] = after
kw["DateSent"] = parse_date(date_sent)
return self.get_instances(kw)
| gpl-3.0 |
vvv1559/intellij-community | plugins/hg4idea/testData/bin/hgext/relink.py | 93 | 6018 | # Mercurial extension to provide 'hg relink' command
#
# Copyright (C) 2007 Brendan Cully <brendan@kublai.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
"""recreates hardlinks between repository clones"""
from mercurial import hg, util
from mercurial.i18n import _
import os, stat
testedwith = 'internal'
def relink(ui, repo, origin=None, **opts):
"""recreate hardlinks between two repositories
When repositories are cloned locally, their data files will be
hardlinked so that they only use the space of a single repository.
Unfortunately, subsequent pulls into either repository will break
hardlinks for any files touched by the new changesets, even if
both repositories end up pulling the same changes.
Similarly, passing --rev to "hg clone" will fail to use any
hardlinks, falling back to a complete copy of the source
repository.
This command lets you recreate those hardlinks and reclaim that
wasted space.
This repository will be relinked to share space with ORIGIN, which
must be on the same local disk. If ORIGIN is omitted, looks for
"default-relink", then "default", in [paths].
Do not attempt any read operations on this repository while the
command is running. (Both repositories will be locked against
writes.)
"""
if (not util.safehasattr(util, 'samefile') or
not util.safehasattr(util, 'samedevice')):
raise util.Abort(_('hardlinks are not supported on this system'))
src = hg.repository(repo.baseui, ui.expandpath(origin or 'default-relink',
origin or 'default'))
ui.status(_('relinking %s to %s\n') % (src.store.path, repo.store.path))
if repo.root == src.root:
ui.status(_('there is nothing to relink\n'))
return
locallock = repo.lock()
try:
remotelock = src.lock()
try:
candidates = sorted(collect(src, ui))
targets = prune(candidates, src.store.path, repo.store.path, ui)
do_relink(src.store.path, repo.store.path, targets, ui)
finally:
remotelock.release()
finally:
locallock.release()
def collect(src, ui):
seplen = len(os.path.sep)
candidates = []
live = len(src['tip'].manifest())
# Your average repository has some files which were deleted before
# the tip revision. We account for that by assuming that there are
# 3 tracked files for every 2 live files as of the tip version of
# the repository.
#
# mozilla-central as of 2010-06-10 had a ratio of just over 7:5.
total = live * 3 // 2
src = src.store.path
pos = 0
ui.status(_("tip has %d files, estimated total number of files: %s\n")
% (live, total))
for dirpath, dirnames, filenames in os.walk(src):
dirnames.sort()
relpath = dirpath[len(src) + seplen:]
for filename in sorted(filenames):
if filename[-2:] not in ('.d', '.i'):
continue
st = os.stat(os.path.join(dirpath, filename))
if not stat.S_ISREG(st.st_mode):
continue
pos += 1
candidates.append((os.path.join(relpath, filename), st))
ui.progress(_('collecting'), pos, filename, _('files'), total)
ui.progress(_('collecting'), None)
ui.status(_('collected %d candidate storage files\n') % len(candidates))
return candidates
def prune(candidates, src, dst, ui):
def linkfilter(src, dst, st):
try:
ts = os.stat(dst)
except OSError:
# Destination doesn't have this file?
return False
if util.samefile(src, dst):
return False
if not util.samedevice(src, dst):
# No point in continuing
raise util.Abort(
_('source and destination are on different devices'))
if st.st_size != ts.st_size:
return False
return st
targets = []
total = len(candidates)
pos = 0
for fn, st in candidates:
pos += 1
srcpath = os.path.join(src, fn)
tgt = os.path.join(dst, fn)
ts = linkfilter(srcpath, tgt, st)
if not ts:
ui.debug('not linkable: %s\n' % fn)
continue
targets.append((fn, ts.st_size))
ui.progress(_('pruning'), pos, fn, _('files'), total)
ui.progress(_('pruning'), None)
ui.status(_('pruned down to %d probably relinkable files\n') % len(targets))
return targets
def do_relink(src, dst, files, ui):
def relinkfile(src, dst):
bak = dst + '.bak'
os.rename(dst, bak)
try:
util.oslink(src, dst)
except OSError:
os.rename(bak, dst)
raise
os.remove(bak)
CHUNKLEN = 65536
relinked = 0
savedbytes = 0
pos = 0
total = len(files)
for f, sz in files:
pos += 1
source = os.path.join(src, f)
tgt = os.path.join(dst, f)
# Binary mode, so that read() works correctly, especially on Windows
sfp = file(source, 'rb')
dfp = file(tgt, 'rb')
sin = sfp.read(CHUNKLEN)
while sin:
din = dfp.read(CHUNKLEN)
if sin != din:
break
sin = sfp.read(CHUNKLEN)
sfp.close()
dfp.close()
if sin:
ui.debug('not linkable: %s\n' % f)
continue
try:
relinkfile(source, tgt)
ui.progress(_('relinking'), pos, f, _('files'), total)
relinked += 1
savedbytes += sz
except OSError, inst:
ui.warn('%s: %s\n' % (tgt, str(inst)))
ui.progress(_('relinking'), None)
ui.status(_('relinked %d files (%s reclaimed)\n') %
(relinked, util.bytecount(savedbytes)))
cmdtable = {
'relink': (
relink,
[],
_('[ORIGIN]')
)
}
| apache-2.0 |
ntt-nflex/nflex_connector_utils | nflex_connector_utils/tests/test_resources.py | 1 | 8781 | import pytest
from nflex_connector_utils import (
Resource, Appliance, Network, Server, ServiceOffering, Volume,
IpAddress, ImageDetail, Region, Locations, Connections, Metadata,
ComputePool, ColoSpace, Circuit)
class TestResources(object):
def test_resource(self):
data = Resource(id='id', name='name', type='type').serialize()
assert data['id'] == 'id'
assert data['type'] == 'type'
assert data['connections'] == {}
assert data['metadata'] == {}
assert data['base']['name'] == 'name'
assert data['base']['provider_created_at'] is None
assert data['base']['last_seen_at'] is not None
assert data['base']['regions'] == []
assert 'native_portal_link' not in data['base']
# Test exception raising
for key in ('id', 'name', 'type'):
with pytest.raises(ValueError):
# Check None value
kwargs = {'id': 'id', 'name': 'name', 'type': 'type'}
kwargs[key] = None
data = Resource(**kwargs).serialize()
with pytest.raises(ValueError):
kwargs = {'id': 'id', 'name': 'name', 'type': 'type'}
# Check missing key
del kwargs[key]
data = Resource(**kwargs).serialize()
with pytest.raises(ValueError):
kwargs = {'id': 'id', 'name': 'name', 'type': 'type'}
# Check bad type
kwargs[key] = {}
data = Resource(**kwargs).serialize()
kwargs = {'id': 'id', 'name': 'name', 'type': 'type'}
# Test native_portal_link
data = Resource(native_portal_link='foo', **kwargs).serialize()
assert data['base']['native_portal_link'] == 'foo'
# Ensure complex keys are there. More elaborate tests are elsewhere
data = Resource(region=Region(id='foo'), **kwargs).serialize()
assert len(data['base']['regions']) > 0
data = Resource(locations=Locations([{id: 'foo'}]), **kwargs). \
serialize()
assert len(data['base']['locations']) > 0
data = Resource(connections=Connections(servers=['foo']),
**kwargs).serialize()
assert len(data['connections'].keys()) > 0
data = Resource(metadata=Metadata([('key', 'value')]),
**kwargs).serialize()
assert len(data['metadata'].keys()) > 0
def test_appliance_details(self):
data = Appliance(id='id', name='name').serialize()
assert data['details']['appliance']['type_id'] is None
data = Appliance(id='id', name='name', type_id='foo').serialize()
assert data['details']['appliance']['type_id'] == 'foo'
def test_compute_pool_details(self):
data = ComputePool(id='id', name='name').serialize()
assert data['details']['compute_pool'] == {
'cpu_hz': None,
'memory_b': None,
'storage_b': None,
}
data = ComputePool(
id='id',
name='name',
cpu_hz=2000000,
memory_b=1024,
storage_b=1024,
billing_tag='something'
).serialize()
assert data['details']['compute_pool'] == {
'cpu_hz': 2000000,
'memory_b': 1024,
'storage_b': 1024,
'billing_tag': 'something'
}
def test_network_details(self):
# There isn't much to test here
data = Network(id='id', name='name').serialize()
assert 'network' in data['details']
def test_server_details(self):
# Check defaults
data = Server(id='id', name='name').serialize()
assert data['details']['server'] == {
'cpu_cores': None,
'cpu_hz': None,
'image_detail': None,
'instance_type': None,
'ip_addresses': [],
'provider_state': 'unknown',
'ram_b': None,
'state': 'unknown',
'volumes_b': None,
}
data = Server(
id='id',
name='name',
cpu_cores=2,
cpu_hz=2000000,
provider_state='foo',
ram_b=2 * 1024 * 1024 * 1024,
state='bar',
volumes_b=1 * 1024 * 1024 * 1024 * 1024,
is_virtual=False,
ip_addresses=[IpAddress(ip_address='127.0.0.1')],
image_detail=ImageDetail(id='foo')
).serialize()
# Ensure complex keys are there. More elaborate tests are elsewhere
ip_addresses = data['details']['server'].pop('ip_addresses')
image_detail = data['details']['server'].pop('image_detail')
assert len(ip_addresses) > 0
assert len(image_detail.keys()) > 0
assert data['details']['server'] == {
'cpu_cores': 2,
'cpu_hz': 2000000,
'instance_type': None,
'provider_state': 'foo',
'ram_b': 2 * 1024 * 1024 * 1024,
'state': 'bar',
'volumes_b': 1 * 1024 * 1024 * 1024 * 1024,
'is_virtual': False,
}
def test_service_offering_details(self):
data = ServiceOffering(id='id', name='name').serialize()
assert data['details']['service_offering']['type_id'] is None
data = ServiceOffering(id='id', name='name', type_id='foo').serialize()
assert data['details']['service_offering']['type_id'] == 'foo'
def test_volume_details(self):
data = Volume(id='id', name='name').serialize()
assert 'volume' in data['details']
assert data['details']['volume']['iops'] is None
assert data['details']['volume']['encrypted'] is None
assert data['details']['volume']['size_b'] is None
assert 'zone_name' not in data['details']['volume']
# iops should be None or an int
data = Volume(id='id', name='name', iops=None).serialize()
assert data['details']['volume']['iops'] is None
data = Volume(id='id', name='name', iops=1).serialize()
assert data['details']['volume']['iops'] == 1
data = Volume(id='id', name='name', iops=1.5).serialize()
assert data['details']['volume']['iops'] == 1
data = Volume(id='id', name='name', iops="5").serialize()
assert data['details']['volume']['iops'] == 5
volume = Volume(id='id', name='name', encrypted=True,
size_b=10, zone_name='foo')
data = volume.serialize()
assert data['details']['volume']['encrypted'] is True
assert data['details']['volume']['size_b'] == 10
assert volume.size_b == 10
assert data['details']['volume']['zone_name'] == 'foo'
def test_colo_space_details(self):
data = ColoSpace(id='id', name='name').serialize()
assert data['details']['colo_space'] == {
'power_allocation_w': None,
'type_id': None,
'colo_space_location': None,
'customer_name': None,
'customer_label': None,
'customer_description': None,
'combination': None
}
data = ColoSpace(
id='id',
name='COLOSPACE',
power_allocation_w=42,
type_id='cab',
colo_space_location='Somewhere over the rainbow',
customer_name='The Original Colo Space',
customer_label='Coley McColoface',
customer_description='Yet another Colo Space',
combination='Open Sesame'
).serialize()
assert data['details']['colo_space'] == {
'power_allocation_w': 42,
'type_id': 'cab',
'colo_space_location': 'Somewhere over the rainbow',
'customer_name': 'The Original Colo Space',
'customer_label': 'Coley McColoface',
'customer_description': 'Yet another Colo Space',
'combination': 'Open Sesame'
}
def test_circuit_details(self):
data = Circuit(id='id', name='name').serialize()
assert data['details']['circuit'] == {
'type_id': None,
'carrier': None,
'reference': None,
'endpoint_a': None,
'endpoint_b': None,
}
data = Circuit(
id='id',
name='circuit',
type_id='pvc',
carrier='test',
reference='test',
endpoint_a='test_endpoint_a',
endpoint_b='test_endpoint_b'
).serialize()
assert data['details']['circuit'] == {
'type_id': 'pvc',
'carrier': 'test',
'reference': 'test',
'endpoint_a': 'test_endpoint_a',
'endpoint_b': 'test_endpoint_b'
}
| gpl-2.0 |
jserver/mock-s3 | mock_s3/file_store.py | 1 | 10865 | import ConfigParser
import md5
import os
import shutil
from datetime import datetime
from errors import BucketNotEmpty, NoSuchBucket
from models import Bucket, BucketQuery, S3Item
CONTENT_FILE = '.mocks3_content'
METADATA_FILE = '.mocks3_metadata'
class FileStore(object):
def __init__(self, root):
self.root = root
if not os.path.exists(self.root):
os.makedirs(self.root)
self.buckets = self.get_all_buckets()
def get_bucket_folder(self, bucket_name):
return os.path.join(self.root, bucket_name)
def get_all_buckets(self):
buckets = []
bucket_list = os.listdir(self.root)
bucket_list.sort()
for bucket in bucket_list:
mtime = os.stat(os.path.join(self.root, bucket)).st_mtime
create_date = datetime.fromtimestamp(mtime).strftime('%Y-%m-%dT%H:%M:%S.000Z')
buckets.append(Bucket(bucket, create_date))
return buckets
def get_bucket(self, bucket_name):
for bucket in self.buckets:
if bucket.name == bucket_name:
return bucket
return None
def create_bucket(self, bucket_name):
if bucket_name not in [bucket.name for bucket in self.buckets]:
try:
os.makedirs(os.path.join(self.root, bucket_name))
except:
# mismatch
pass
self.buckets = self.get_all_buckets()
return self.get_bucket(bucket_name)
def delete_bucket(self, bucket_name):
bucket = self.get_bucket(bucket_name)
if not bucket:
raise NoSuchBucket
try:
os.rmdir(os.path.join(self.root, bucket_name))
self.buckets = self.get_all_buckets()
except:
# TODO: for now assume exception is directory is not empty
raise BucketNotEmpty
def get_all_keys(self, bucket, **kwargs):
max_keys = int(kwargs['max_keys'])
is_truncated = False
matches = []
for root, dirs, files in os.walk(os.path.join(self.root, bucket.name)):
pattern = os.path.join(self.root, bucket.name, kwargs['prefix'])
if root.startswith(pattern) and METADATA_FILE in files:
config = ConfigParser.RawConfigParser()
files_parsed = config.read(os.path.join(root, METADATA_FILE))
metadata = {}
if files_parsed:
metadata['size'] = config.getint('metadata', 'size')
metadata['md5'] = config.get('metadata', 'md5')
metadata['content_type'] = config.get('metadata', 'content_type')
metadata['creation_date'] = config.get('metadata', 'creation_date')
if config.has_option('metadata', 'modified_date'):
metadata['modified_date'] = config.get('metadata', 'modified_date')
actual_key = root.replace(self.root, '', 1)
actual_key = actual_key.replace('/' + bucket.name + '/', '')
matches.append(S3Item(actual_key, **metadata))
if len(matches) >= max_keys:
is_truncated = True
break
return BucketQuery(bucket, matches, is_truncated, **kwargs)
def get_item(self, bucket_name, item_name):
key_name = os.path.join(bucket_name, item_name)
dirname = os.path.join(self.root, key_name)
filename = os.path.join(dirname, CONTENT_FILE)
metafile = os.path.join(dirname, METADATA_FILE)
metadata = {}
config = ConfigParser.RawConfigParser()
files_parsed = config.read(metafile)
if files_parsed:
metadata['size'] = config.getint('metadata', 'size')
metadata['md5'] = config.get('metadata', 'md5')
metadata['filename'] = config.get('metadata', 'filename')
metadata['content_type'] = config.get('metadata', 'content_type')
metadata['creation_date'] = config.get('metadata', 'creation_date')
if config.has_option('metadata', 'modified_date'):
metadata['modified_date'] = config.get('metadata', 'modified_date')
if not metadata:
return None
item = S3Item(key_name, **metadata)
item.io = open(filename, 'rb')
return item
def copy_item(self, src_bucket_name, src_name, bucket_name, name, handler):
src_key_name = os.path.join(src_bucket_name, src_name)
src_dirname = os.path.join(self.root, src_key_name)
src_filename = os.path.join(src_dirname, CONTENT_FILE)
src_metafile = os.path.join(src_dirname, METADATA_FILE)
bucket = self.get_bucket(bucket_name)
key_name = os.path.join(bucket.name, name)
dirname = os.path.join(self.root, key_name)
filename = os.path.join(dirname, CONTENT_FILE)
metafile = os.path.join(dirname, METADATA_FILE)
if not os.path.exists(dirname):
os.makedirs(dirname)
shutil.copy(src_filename, filename)
shutil.copy(src_metafile, metafile)
config = ConfigParser.RawConfigParser()
files_parsed = config.read(metafile)
metadata = {}
if files_parsed:
metadata['size'] = config.getint('metadata', 'size')
metadata['md5'] = config.get('metadata', 'md5')
metadata['content_type'] = config.get('metadata', 'content_type')
metadata['creation_date'] = config.get('metadata', 'creation_date')
if config.has_option('metadata', 'modified_date'):
metadata['modified_date'] = config.get('metadata', 'modified_date')
return S3Item(key_name, **metadata)
def store_data(self, bucket, item_name, headers, data):
key_name = os.path.join(bucket.name, item_name)
dirname = os.path.join(self.root, key_name)
filename = os.path.join(dirname, CONTENT_FILE)
metafile = os.path.join(dirname, METADATA_FILE)
metadata = {}
config = ConfigParser.RawConfigParser()
files_parsed = config.read(metafile)
if files_parsed:
metadata['size'] = config.getint('metadata', 'size')
metadata['md5'] = config.get('metadata', 'md5')
metadata['filename'] = config.get('metadata', 'filename')
metadata['content_type'] = config.get('metadata', 'content_type')
metadata['creation_date'] = config.get('metadata', 'creation_date')
m = md5.new()
lower_headers = {}
for key in headers:
lower_headers[key.lower()] = headers[key]
headers = lower_headers
if 'content-type' not in headers:
headers['content-type'] = 'application/octet-stream'
size = int(headers['content-length'])
m.update(data)
if not os.path.exists(dirname):
os.makedirs(dirname)
with open(filename, 'wb') as f:
f.write(data)
if metadata:
metadata['md5'] = m.hexdigest()
metadata['modified_date'] = datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S.000Z')
metadata['content_type'] = headers['content-type']
metadata['size'] = size
else:
metadata = {
'content_type': headers['content-type'],
'creation_date': datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S.000Z'),
'md5': m.hexdigest(),
'filename': filename,
'size': size,
}
config.add_section('metadata')
config.set('metadata', 'size', metadata['size'])
config.set('metadata', 'md5', metadata['md5'])
config.set('metadata', 'filename', metadata['filename'])
config.set('metadata', 'content_type', metadata['content_type'])
config.set('metadata', 'creation_date', metadata['creation_date'])
if 'modified_date' in metadata:
config.set('metadata', 'modified_date', metadata['modified_date'])
with open(metafile, 'wb') as configfile:
config.write(configfile)
s3_item = S3Item(key, **metadata)
s3_item.io = open(filename, 'rb')
return s3_item
def store_item(self, bucket, item_name, handler):
key_name = os.path.join(bucket.name, item_name)
dirname = os.path.join(self.root, key_name)
filename = os.path.join(dirname, CONTENT_FILE)
metafile = os.path.join(dirname, METADATA_FILE)
metadata = {}
config = ConfigParser.RawConfigParser()
files_parsed = config.read(metafile)
if files_parsed:
metadata['size'] = config.getint('metadata', 'size')
metadata['md5'] = config.get('metadata', 'md5')
metadata['filename'] = config.get('metadata', 'filename')
metadata['content_type'] = config.get('metadata', 'content_type')
metadata['creation_date'] = config.get('metadata', 'creation_date')
m = md5.new()
headers = {}
for key in handler.headers:
headers[key.lower()] = handler.headers[key]
if 'content-type' not in headers:
headers['content-type'] = 'application/octet-stream'
size = int(headers['content-length'])
data = handler.rfile.read(size)
m.update(data)
if not os.path.exists(dirname):
os.makedirs(dirname)
with open(filename, 'wb') as f:
f.write(data)
if metadata:
metadata['md5'] = m.hexdigest()
metadata['modified_date'] = datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S.000Z')
metadata['content_type'] = headers['content-type']
metadata['size'] = size
else:
metadata = {
'content_type': headers['content-type'],
'creation_date': datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S.000Z'),
'md5': m.hexdigest(),
'filename': filename,
'size': size,
}
if not config.has_section('metadata'):
config.add_section('metadata')
config.set('metadata', 'size', metadata['size'])
config.set('metadata', 'md5', metadata['md5'])
config.set('metadata', 'filename', metadata['filename'])
config.set('metadata', 'content_type', metadata['content_type'])
config.set('metadata', 'creation_date', metadata['creation_date'])
if 'modified_date' in metadata:
config.set('metadata', 'modified_date', metadata['modified_date'])
with open(metafile, 'wb') as configfile:
config.write(configfile)
return S3Item(key, **metadata)
def delete_item(self, bucket_name, item_name):
dirname = os.path.join(self.root, bucket_name, item_name)
shutil.rmtree(dirname, ignore_errors=True)
| mit |
powerjg/gem5-ci-test | src/mem/CommMonitor.py | 27 | 5187 | # Copyright (c) 2012 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Thomas Grass
# Andreas Hansson
from m5.params import *
from m5.proxy import *
from MemObject import MemObject
from System import System
# The communication monitor will most typically be used in combination
# with periodic dumping and resetting of stats using schedStatEvent
class CommMonitor(MemObject):
type = 'CommMonitor'
cxx_header = "mem/comm_monitor.hh"
system = Param.System(Parent.any, "System that the monitor belongs to.")
# one port in each direction
master = MasterPort("Master port")
slave = SlavePort("Slave port")
# control the sample period window length of this monitor
sample_period = Param.Clock("1ms", "Sample period for histograms")
# for each histogram, set the number of bins and enable the user
# to disable the measurement, reads and writes use the same
# parameters
# histogram of burst length of packets (not using sample period)
burst_length_bins = Param.Unsigned('20', "# bins in burst length " \
"histograms")
disable_burst_length_hists = Param.Bool(False, "Disable burst length " \
"histograms")
# bandwidth per sample period
bandwidth_bins = Param.Unsigned('20', "# bins in bandwidth histograms")
disable_bandwidth_hists = Param.Bool(False, "Disable bandwidth histograms")
# latency from request to response (not using sample period)
latency_bins = Param.Unsigned('20', "# bins in latency histograms")
disable_latency_hists = Param.Bool(False, "Disable latency histograms")
# inter transaction time (ITT) distributions in uniformly sized
# bins up to the maximum, independently for read-to-read,
# write-to-write and the combined request-to-request that does not
# separate read and write requests
itt_bins = Param.Unsigned('20', "# bins in ITT distributions")
itt_max_bin = Param.Latency('100ns', "Max bin of ITT distributions")
disable_itt_dists = Param.Bool(False, "Disable ITT distributions")
# outstanding requests (that did not yet get a response) per
# sample period
outstanding_bins = Param.Unsigned('20', "# bins in outstanding " \
"requests histograms")
disable_outstanding_hists = Param.Bool(False, "Disable outstanding " \
"requests histograms")
# transactions (requests) observed per sample period
transaction_bins = Param.Unsigned('20', "# bins in transaction " \
"count histograms")
disable_transaction_hists = Param.Bool(False, "Disable transaction count " \
"histograms")
# address distributions (heatmaps) with associated address masks
# to selectively only look at certain bits of the address
read_addr_mask = Param.Addr(MaxAddr, "Address mask for read address")
write_addr_mask = Param.Addr(MaxAddr, "Address mask for write address")
disable_addr_dists = Param.Bool(True, "Disable address distributions")
| bsd-3-clause |
blazek/QGIS | python/plugins/processing/gui/menus.py | 6 | 13497 | # -*- coding: utf-8 -*-
"""
***************************************************************************
menus.py
---------------------
Date : February 2016
Copyright : (C) 2016 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'February 2016'
__copyright__ = '(C) 2016, Victor Olaya'
import os
from qgis.PyQt.QtCore import QCoreApplication
from qgis.PyQt.QtWidgets import QAction, QMenu
from qgis.PyQt.QtGui import QIcon
from qgis.PyQt.QtWidgets import QApplication
from processing.core.ProcessingConfig import ProcessingConfig, Setting
from processing.gui.MessageDialog import MessageDialog
from processing.gui.AlgorithmDialog import AlgorithmDialog
from qgis.utils import iface
from qgis.core import QgsApplication, QgsMessageLog, QgsStringUtils, QgsProcessingAlgorithm
from qgis.gui import QgsGui
from processing.gui.MessageBarProgress import MessageBarProgress
from processing.gui.AlgorithmExecutor import execute
from processing.gui.Postprocessing import handleAlgorithmResults
from processing.core.Processing import Processing
from processing.tools import dataobjects
algorithmsToolbar = None
menusSettingsGroup = 'Menus'
defaultMenuEntries = {}
vectorMenu = QApplication.translate('MainWindow', 'Vect&or')
analysisToolsMenu = vectorMenu + "/" + Processing.tr('&Analysis Tools')
defaultMenuEntries.update({'qgis:distancematrix': analysisToolsMenu,
'qgis:sumlinelengths': analysisToolsMenu,
'qgis:countpointsinpolygon': analysisToolsMenu,
'qgis:listuniquevalues': analysisToolsMenu,
'qgis:basicstatisticsforfields': analysisToolsMenu,
'qgis:nearestneighbouranalysis': analysisToolsMenu,
'native:meancoordinates': analysisToolsMenu,
'native:lineintersections': analysisToolsMenu})
researchToolsMenu = vectorMenu + "/" + Processing.tr('&Research Tools')
defaultMenuEntries.update({'qgis:creategrid': researchToolsMenu,
'qgis:randomselection': researchToolsMenu,
'qgis:randomselectionwithinsubsets': researchToolsMenu,
'qgis:randompointsinextent': researchToolsMenu,
'qgis:randompointsinlayerbounds': researchToolsMenu,
'qgis:randompointsinsidepolygons': researchToolsMenu,
'qgis:regularpoints': researchToolsMenu,
'native:selectbylocation': researchToolsMenu,
'qgis:polygonfromlayerextent': researchToolsMenu})
geoprocessingToolsMenu = vectorMenu + "/" + Processing.tr('&Geoprocessing Tools')
defaultMenuEntries.update({'native:buffer': geoprocessingToolsMenu,
'native:convexhull': geoprocessingToolsMenu,
'native:intersection': geoprocessingToolsMenu,
'native:union': geoprocessingToolsMenu,
'native:symmetricaldifference': geoprocessingToolsMenu,
'native:clip': geoprocessingToolsMenu,
'native:difference': geoprocessingToolsMenu,
'native:dissolve': geoprocessingToolsMenu,
'qgis:eliminateselectedpolygons': geoprocessingToolsMenu})
geometryToolsMenu = vectorMenu + "/" + Processing.tr('G&eometry Tools')
defaultMenuEntries.update({'qgis:checkvalidity': geometryToolsMenu,
'qgis:exportaddgeometrycolumns': geometryToolsMenu,
'native:centroids': geometryToolsMenu,
'qgis:delaunaytriangulation': geometryToolsMenu,
'qgis:voronoipolygons': geometryToolsMenu,
'native:simplifygeometries': geometryToolsMenu,
'qgis:densifygeometries': geometryToolsMenu,
'native:multiparttosingleparts': geometryToolsMenu,
'native:collect': geometryToolsMenu,
'native:polygonstolines': geometryToolsMenu,
'qgis:linestopolygons': geometryToolsMenu,
'native:extractvertices': geometryToolsMenu})
managementToolsMenu = vectorMenu + "/" + Processing.tr('&Data Management Tools')
defaultMenuEntries.update({'native:reprojectlayer': managementToolsMenu,
'qgis:joinattributesbylocation': managementToolsMenu,
'qgis:splitvectorlayer': managementToolsMenu,
'native:mergevectorlayers': managementToolsMenu,
'qgis:createspatialindex': managementToolsMenu})
rasterMenu = QApplication.translate('MainWindow', '&Raster')
projectionsMenu = rasterMenu + "/" + Processing.tr('Projections')
defaultMenuEntries.update({'gdal:warpreproject': projectionsMenu,
'gdal:extractprojection': projectionsMenu,
'gdal:assignprojection': projectionsMenu})
conversionMenu = rasterMenu + "/" + Processing.tr('Conversion')
defaultMenuEntries.update({'gdal:rasterize': conversionMenu,
'gdal:polygonize': conversionMenu,
'gdal:translate': conversionMenu,
'gdal:rgbtopct': conversionMenu,
'gdal:pcttorgb': conversionMenu})
extractionMenu = rasterMenu + "/" + Processing.tr('Extraction')
defaultMenuEntries.update({'gdal:contour': extractionMenu,
'gdal:cliprasterbyextent': extractionMenu,
'gdal:cliprasterbymasklayer': extractionMenu})
analysisMenu = rasterMenu + "/" + Processing.tr('Analysis')
defaultMenuEntries.update({'gdal:sieve': analysisMenu,
'gdal:nearblack': analysisMenu,
'gdal:fillnodata': analysisMenu,
'gdal:proximity': analysisMenu,
'gdal:griddatametrics': analysisMenu,
'gdal:gridaverage': analysisMenu,
'gdal:gridinversedistance': analysisMenu,
'gdal:gridnearestneighbor': analysisMenu,
'gdal:aspect': analysisMenu,
'gdal:hillshade': analysisMenu,
'gdal:roughness': analysisMenu,
'gdal:slope': analysisMenu,
'gdal:tpitopographicpositionindex': analysisMenu,
'gdal:triterrainruggednessindex': analysisMenu})
miscMenu = rasterMenu + "/" + Processing.tr('Miscellaneous')
defaultMenuEntries.update({'gdal:buildvirtualraster': miscMenu,
'gdal:merge': miscMenu,
'gdal:gdalinfo': miscMenu,
'gdal:overviews': miscMenu,
'gdal:tileindex': miscMenu})
def initializeMenus():
for m in defaultMenuEntries.keys():
alg = QgsApplication.processingRegistry().algorithmById(m)
if alg is None or alg.id() != m:
QgsMessageLog.logMessage(Processing.tr('Invalid algorithm ID for menu: {}').format(m), Processing.tr('Processing'))
for provider in QgsApplication.processingRegistry().providers():
for alg in provider.algorithms():
d = defaultMenuEntries.get(alg.id(), "")
setting = Setting(menusSettingsGroup, "MENU_" + alg.id(),
"Menu path", d)
ProcessingConfig.addSetting(setting)
setting = Setting(menusSettingsGroup, "BUTTON_" + alg.id(),
"Add button", False)
ProcessingConfig.addSetting(setting)
setting = Setting(menusSettingsGroup, "ICON_" + alg.id(),
"Icon", "", valuetype=Setting.FILE)
ProcessingConfig.addSetting(setting)
ProcessingConfig.readSettings()
def updateMenus():
removeMenus()
QCoreApplication.processEvents()
createMenus()
def createMenus():
for alg in QgsApplication.processingRegistry().algorithms():
menuPath = ProcessingConfig.getSetting("MENU_" + alg.id())
addButton = ProcessingConfig.getSetting("BUTTON_" + alg.id())
icon = ProcessingConfig.getSetting("ICON_" + alg.id())
if icon and os.path.exists(icon):
icon = QIcon(icon)
else:
icon = None
if menuPath:
paths = menuPath.split("/")
addAlgorithmEntry(alg, paths[0], paths[-1], addButton=addButton, icon=icon)
def removeMenus():
for alg in QgsApplication.processingRegistry().algorithms():
menuPath = ProcessingConfig.getSetting("MENU_" + alg.id())
if menuPath:
paths = menuPath.split("/")
removeAlgorithmEntry(alg, paths[0], paths[-1])
def addAlgorithmEntry(alg, menuName, submenuName, actionText=None, icon=None, addButton=False):
if actionText is None:
if (QgsGui.higFlags() & QgsGui.HigMenuTextIsTitleCase) and not (alg.flags() & QgsProcessingAlgorithm.FlagDisplayNameIsLiteral):
alg_title = QgsStringUtils.capitalize(alg.displayName(), QgsStringUtils.TitleCase)
else:
alg_title = alg.displayName()
actionText = alg_title + QCoreApplication.translate('Processing', '…')
action = QAction(icon or alg.icon(), actionText, iface.mainWindow())
alg_id = alg.id()
action.setData(alg_id)
action.triggered.connect(lambda: _executeAlgorithm(alg_id))
action.setObjectName("mProcessingUserMenu_%s" % alg_id)
if menuName:
menu = getMenu(menuName, iface.mainWindow().menuBar())
submenu = getMenu(submenuName, menu)
submenu.addAction(action)
if addButton:
global algorithmsToolbar
if algorithmsToolbar is None:
algorithmsToolbar = iface.addToolBar(QCoreApplication.translate('MainWindow', 'Processing Algorithms'))
algorithmsToolbar.setObjectName("ProcessingAlgorithms")
algorithmsToolbar.setToolTip(QCoreApplication.translate('MainWindow', 'Processing Algorithms Toolbar'))
algorithmsToolbar.addAction(action)
def removeAlgorithmEntry(alg, menuName, submenuName, delButton=True):
if menuName:
menu = getMenu(menuName, iface.mainWindow().menuBar())
subMenu = getMenu(submenuName, menu)
action = findAction(subMenu.actions(), alg)
if action is not None:
subMenu.removeAction(action)
if len(subMenu.actions()) == 0:
subMenu.deleteLater()
if delButton:
global algorithmsToolbar
if algorithmsToolbar is not None:
action = findAction(algorithmsToolbar.actions(), alg)
if action is not None:
algorithmsToolbar.removeAction(action)
def _executeAlgorithm(alg_id):
alg = QgsApplication.processingRegistry().createAlgorithmById(alg_id)
if alg is None:
dlg = MessageDialog()
dlg.setTitle(Processing.tr('Missing Algorithm'))
dlg.setMessage(
Processing.tr('The algorithm "{}" is no longer available. (Perhaps a plugin was uninstalled?)').format(alg_id))
dlg.exec_()
return
ok, message = alg.canExecute()
if not ok:
dlg = MessageDialog()
dlg.setTitle(Processing.tr('Missing Dependency'))
dlg.setMessage(
Processing.tr('<h3>Missing dependency. This algorithm cannot '
'be run :-( </h3>\n{0}').format(message))
dlg.exec_()
return
if (alg.countVisibleParameters()) > 0:
dlg = alg.createCustomParametersWidget(parent=iface.mainWindow())
if not dlg:
dlg = AlgorithmDialog(alg, parent=iface.mainWindow())
canvas = iface.mapCanvas()
prevMapTool = canvas.mapTool()
dlg.show()
dlg.exec_()
if canvas.mapTool() != prevMapTool:
try:
canvas.mapTool().reset()
except:
pass
canvas.setMapTool(prevMapTool)
else:
feedback = MessageBarProgress()
context = dataobjects.createContext(feedback)
parameters = {}
ret, results = execute(alg, parameters, context, feedback)
handleAlgorithmResults(alg, context, feedback)
feedback.close()
def getMenu(name, parent):
menus = [c for c in parent.children() if isinstance(c, QMenu) and c.title() == name]
if menus:
return menus[0]
else:
return parent.addMenu(name)
def findAction(actions, alg):
for action in actions:
if action.data() == alg.id():
return action
return None
| gpl-2.0 |
villaverde/iredadmin | libs/ldaplib/attrs.py | 2 | 4418 | # Author: Zhang Huangbin <zhb@iredmail.org>
# ---------------------------------------------------------
# Values.
# ---------------------------------------------------------
ACCOUNT_STATUS_ACTIVE = 'active'
ACCOUNT_STATUS_DISABLED = 'disabled'
ACCOUNT_STATUSES = [ACCOUNT_STATUS_ACTIVE, ACCOUNT_STATUS_DISABLED, ]
# All account types which can be converted to ldap dn.
ACCOUNT_TYPES_ALL = ['domain', 'catchall', 'admin', 'user', 'maillist', 'maillistExternal', 'alias', ]
ACCOUNT_TYPES_EMAIL = ['admin', 'user', 'maillist', 'maillistExternal', 'alias', ]
ACCOUNT_TYPES_DOMAIN = ['domain', 'catchall', ]
# Default groups which will be created while create a new domain.
# WARNING: Don't use unicode string here.
GROUP_USERS = 'Users'
GROUP_GROUPS = 'Groups'
GROUP_ALIASES = 'Aliases'
GROUP_EXTERNALS = 'Externals'
DEFAULT_GROUPS = [GROUP_USERS, GROUP_GROUPS, GROUP_ALIASES, GROUP_EXTERNALS, ]
#
DN_BETWEEN_USER_AND_DOMAIN = DN_BETWEEN_CATCHALL_AND_DOMAIN = 'ou=%s,' % (GROUP_USERS, )
DN_BETWEEN_MAILLIST_AND_DOMAIN = 'ou=%s,' % (GROUP_GROUPS, )
DN_BETWEEN_ALIAS_AND_DOMAIN = 'ou=%s,' % (GROUP_ALIASES, )
DN_BETWEEN_MAILLIST_EXTERNAL_AND_DOMAIN = 'ou=%s,' % (GROUP_EXTERNALS, )
# RDN of accounts. Default is 'mail'.
# Note: Although you can use other attr as RDN, but all mail user/list/alias
# must have 'mail' attribute.
RDN_USER = 'mail' # Supports: mail, cn, uid.
RDN_MAILLIST = RDN_ALIAS = RDN_ADMIN = RDN_CATCHALL = 'mail'
RDN_MAILLIST_EXTERNAL = 'memberOfGroup'
RDN_DOMAIN = 'domainName'
# ---------------------------------------------------------
# Attributes.
# ---------------------------------------------------------
ATTR_GLOBAL_ADMIN = 'domainGlobalAdmin'
ATTR_DOMAIN_CURRENT_QUOTA_SIZE = 'domainCurrentQuotaSize'
# ---------------------------------------------------------
# Admin related.
# ---------------------------------------------------------
ADMIN_SEARCH_ATTRS = ['mail', 'accountStatus', 'cn', 'preferredLanguage',
'domainGlobalAdmin', 'enabledService',
'objectClass',
]
ADMIN_ATTRS_ALL = ADMIN_SEARCH_ATTRS + ['sn', 'givenName']
# ---------------------------------------------------------
# Domain related.
# ---------------------------------------------------------
DOMAIN_FILTER = '(objectClass=mailDomain)'
# All availabe services.
DOMAIN_ENABLED_SERVICE = ('mail', 'domainalias', 'senderbcc', 'recipientbcc',)
# Services availabel in 'Service Control' page.
DOMAIN_SERVICE_UNDER_CONTROL = ['mail', 'domainalias', 'senderbcc', 'recipientbcc', ]
DOMAIN_SEARCH_ATTRS = [
# Attributes used in domain list page.
'domainName', 'domainAliasName', 'domainAdmin',
'cn', 'mtaTransport', 'accountStatus',
'domainCurrentQuotaSize',
'domainCurrentUserNumber',
'domainCurrentListNumber',
'domainCurrentAliasNumber',
'accountSetting',
]
DOMAIN_ATTRS_ALL = [
# Normal attributes.
'domainName', 'domainAliasName', 'cn', 'description', 'accountStatus', 'domainBackupMX',
'domainAdmin', 'mtaTransport', 'enabledService',
'domainRecipientBccAddress', 'domainSenderBccAddress',
'disclaimer',
'domainCurrentQuotaSize',
'domainCurrentUserNumber',
'domainCurrentListNumber',
'domainCurrentAliasNumber',
'accountSetting',
]
VALUES_DOMAIN_BACKUPMX = ['yes', 'no']
# ---------------------------------------------------------
# User related.
# ---------------------------------------------------------
USER_FILTER = '(objectClass=mailUser)'
USER_ATTR_PASSWORD = 'userPassword'
USER_SEARCH_ATTRS = [
# Required attributes.
'mail', 'cn', 'accountStatus', 'mailQuota',
'employeeNumber', 'title', 'shadowAddress', 'memberOfGroup',
'storageBaseDirectory', 'mailMessageStore',
]
USER_ATTRS_ALL = [
'mail', 'cn', 'accountStatus', 'mailQuota', 'jpegPhoto',
'sn', 'givenName',
'enabledService', 'memberOfGroup', 'employeeNumber', 'preferredLanguage',
'telephoneNumber', 'userRecipientBccAddress', 'userSenderBccAddress',
'mailForwardingAddress', 'mtaTransport',
'storageBaseDirectory', 'mailMessageStore', # Maildir
'mobile', 'title', 'shadowAddress',
'shadowLastChange', # Date of last password change, it's a integer.
# Per-user whitelist & blacklist.
'amavisWhitelistSender', 'amavisBlacklistSender',
'mailWhitelistRecipient', 'mailBlacklistRecipient',
'domainGlobalAdmin', # Global admin
]
| gpl-2.0 |
Velkan/shadowsocks | shadowsocks/local.py | 1015 | 2248 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import sys
import os
import logging
import signal
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '../'))
from shadowsocks import shell, daemon, eventloop, tcprelay, udprelay, asyncdns
def main():
shell.check_python()
# fix py2exe
if hasattr(sys, "frozen") and sys.frozen in \
("windows_exe", "console_exe"):
p = os.path.dirname(os.path.abspath(sys.executable))
os.chdir(p)
config = shell.get_config(True)
daemon.daemon_exec(config)
try:
logging.info("starting local at %s:%d" %
(config['local_address'], config['local_port']))
dns_resolver = asyncdns.DNSResolver()
tcp_server = tcprelay.TCPRelay(config, dns_resolver, True)
udp_server = udprelay.UDPRelay(config, dns_resolver, True)
loop = eventloop.EventLoop()
dns_resolver.add_to_loop(loop)
tcp_server.add_to_loop(loop)
udp_server.add_to_loop(loop)
def handler(signum, _):
logging.warn('received SIGQUIT, doing graceful shutting down..')
tcp_server.close(next_tick=True)
udp_server.close(next_tick=True)
signal.signal(getattr(signal, 'SIGQUIT', signal.SIGTERM), handler)
def int_handler(signum, _):
sys.exit(1)
signal.signal(signal.SIGINT, int_handler)
daemon.set_user(config.get('user', None))
loop.run()
except Exception as e:
shell.print_exception(e)
sys.exit(1)
if __name__ == '__main__':
main()
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.